diff options
author | Thierry Carrez <thierry@openstack.org> | 2011-07-26 11:46:25 +0200 |
---|---|---|
committer | Thierry Carrez <thierry@openstack.org> | 2011-07-26 11:46:25 +0200 |
commit | 5df221e970d8b060423034fa627735c5c24fce5d (patch) | |
tree | 5be2f5dfaeaee8dbc268f3c2f8cb26d56bd4b78d | |
parent | 85522bba82a4139a89915bba99865a50fd9b8f58 (diff) | |
parent | 986da4b2989bbd56db29117a0e8ad6a92643180c (diff) | |
download | nova-5df221e970d8b060423034fa627735c5c24fce5d.tar.gz nova-5df221e970d8b060423034fa627735c5c24fce5d.tar.xz nova-5df221e970d8b060423034fa627735c5c24fce5d.zip |
Merge diablo-3 development from trunk (rev1322)
196 files changed, 28340 insertions, 4728 deletions
diff --git a/.bzrignore b/.bzrignore index 14d8028f7..91277d100 100644 --- a/.bzrignore +++ b/.bzrignore @@ -13,3 +13,7 @@ nova/vcsversion.py clean.sqlite run_tests.log tests.sqlite +nova/tests/instance-* +tags +.coverage +covhtml @@ -14,6 +14,7 @@ <code@term.ie> <github@anarkystic.com> <code@term.ie> <termie@preciousroy.local> <corywright@gmail.com> <cory.wright@rackspace.com> +<dan@nicira.com> <danwent@dan-xs3-cs> <devin.carlen@gmail.com> <devcamcar@illian.local> <ewan.mellor@citrix.com> <emellor@silver> <itoumsn@nttdata.co.jp> <itoumsn@shayol> @@ -50,4 +51,5 @@ <ilyaalekseyev@acm.org> <ialekseev@griddynamics.com> <ilyaalekseyev@acm.org> <ilya@oscloud.ru> <reldan@oscloud.ru> <enugaev@griddynamics.com> -<kshileev@gmail.com> <kshileev@griddynamics.com>
\ No newline at end of file +<kshileev@gmail.com> <kshileev@griddynamics.com> +<nsokolov@griddynamics.com> <nsokolov@griddynamics.net> @@ -1,4 +1,7 @@ +Adam Gandelman <adamg@canonical.com> +Adam Johnson <adjohn@gmail.com> Alex Meade <alex.meade@rackspace.com> +Alexander Sakhnov <asakhnov@mirantis.com> Andrey Brindeyev <abrindeyev@griddynamics.com> Andy Smith <code@term.ie> Andy Southgate <andy.southgate@citrix.com> @@ -6,6 +9,7 @@ Anne Gentle <anne@openstack.org> Anthony Young <sleepsonthefloor@gmail.com> Antony Messerli <ant@openstack.org> Armando Migliaccio <Armando.Migliaccio@eu.citrix.com> +Arvind Somya <asomya@cisco.com> Bilal Akhtar <bilalakhtar@ubuntu.com> Brian Lamar <brian.lamar@rackspace.com> Brian Schott <bschott@isi.edu> @@ -17,9 +21,11 @@ Christian Berendt <berendt@b1-systems.de> Chuck Short <zulcss@ubuntu.com> Cory Wright <corywright@gmail.com> Dan Prince <dan.prince@rackspace.com> +Dan Wendlandt <dan@nicira.com> Dave Walker <DaveWalker@ubuntu.com> David Pravec <David.Pravec@danix.org> Dean Troyer <dtroyer@gmail.com> +Devendra Modium <dmodium@isi.edu> Devin Carlen <devin.carlen@gmail.com> Ed Leafe <ed@leafe.com> Eldar Nugaev <reldan@oscloud.ru> @@ -43,6 +49,7 @@ John Dewey <john@dewey.ws> John Tran <jtran@attinteractive.com> Jonathan Bryce <jbryce@jbryce.com> Jordan Rinke <jordan@openstack.org> +Joseph Suh <jsuh@isi.edu> Josh Durgin <joshd@hq.newdream.net> Josh Kearney <josh@jk0.org> Josh Kleinpeter <josh@kleinpeter.org> @@ -62,12 +69,14 @@ Masanori Itoh <itoumsn@nttdata.co.jp> Matt Dietz <matt.dietz@rackspace.com> Michael Gundlach <michael.gundlach@rackspace.com> Mike Scherbakov <mihgen@gmail.com> +Mohammed Naser <mnaser@vexxhost.com> Monsyne Dragon <mdragon@rackspace.com> Monty Taylor <mordred@inaugust.com> MORITA Kazutaka <morita.kazutaka@gmail.com> Muneyuki Noguchi <noguchimn@nttdata.co.jp> Nachi Ueno <ueno.nachi@lab.ntt.co.jp> Naveed Massjouni <naveedm9@gmail.com> +Nikolay Sokolov <nsokolov@griddynamics.com> Nirmal Ranganathan <nirmal.ranganathan@rackspace.com> Paul Voccio <paul@openstack.org> Renuka Apte <renuka.apte@citrix.com> @@ -77,10 +86,13 @@ Rick Harris <rconradharris@gmail.com> Rob Kost <kost@isi.edu> Ryan Lane <rlane@wikimedia.org> Ryan Lucio <rlucio@internap.com> +Ryu Ishimoto <ryu@midokura.jp> Salvatore Orlando <salvatore.orlando@eu.citrix.com> Sandy Walsh <sandy.walsh@rackspace.com> Sateesh Chodapuneedi <sateesh.chodapuneedi@citrix.com> +Scott Moser <smoser@ubuntu.com> Soren Hansen <soren.hansen@rackspace.com> +Stephanie Reese <reese.sm@gmail.com> Thierry Carrez <thierry@openstack.org> Todd Willey <todd@ansolabs.com> Trey Morris <trey.morris@rackspace.com> diff --git a/MANIFEST.in b/MANIFEST.in index 4e145de75..421cd806a 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -23,6 +23,7 @@ include nova/compute/interfaces.template include nova/console/xvp.conf.template include nova/db/sqlalchemy/migrate_repo/migrate.cfg include nova/db/sqlalchemy/migrate_repo/README +include nova/db/sqlalchemy/migrate_repo/versions/*.sql include nova/virt/interfaces.template include nova/virt/libvirt*.xml.template include nova/virt/cpuinfo.xml.template diff --git a/bin/instance-usage-audit b/bin/instance-usage-audit new file mode 100755 index 000000000..a06c6b1b3 --- /dev/null +++ b/bin/instance-usage-audit @@ -0,0 +1,116 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Cron script to generate usage notifications for instances neither created + nor destroyed in a given time period. + + Together with the notifications generated by compute on instance + create/delete/resize, over that ime period, this allows an external + system consuming usage notification feeds to calculate instance usage + for each tenant. + + Time periods are specified like so: + <number>[mdy] + + 1m = previous month. If the script is run April 1, it will generate usages + for March 1 thry March 31. + 3m = 3 previous months. + 90d = previous 90 days. + 1y = previous year. If run on Jan 1, it generates usages for + Jan 1 thru Dec 31 of the previous year. +""" + +import datetime +import gettext +import os +import sys +import time + +# If ../nova/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')): + sys.path.insert(0, POSSIBLE_TOPDIR) + +gettext.install('nova', unicode=1) + + +from nova import context +from nova import db +from nova import exception +from nova import flags +from nova import log as logging +from nova import utils + +from nova.notifier import api as notifier_api + +FLAGS = flags.FLAGS +flags.DEFINE_string('instance_usage_audit_period', '1m', + 'time period to generate instance usages for.') + + +def time_period(period): + today = datetime.date.today() + unit = period[-1] + if unit not in 'mdy': + raise ValueError('Time period must be m, d, or y') + n = int(period[:-1]) + if unit == 'm': + year = today.year - (n // 12) + n = n % 12 + if n >= today.month: + year -= 1 + month = 12 + (today.month - n) + else: + month = today.month - n + begin = datetime.datetime(day=1, month=month, year=year) + end = datetime.datetime(day=1, month=today.month, year=today.year) + + elif unit == 'y': + begin = datetime.datetime(day=1, month=1, year=today.year - n) + end = datetime.datetime(day=1, month=1, year=today.year) + + elif unit == 'd': + b = today - datetime.timedelta(days=n) + begin = datetime.datetime(day=b.day, month=b.month, year=b.year) + end = datetime.datetime(day=today.day, + month=today.month, + year=today.year) + + return (begin, end) + +if __name__ == '__main__': + utils.default_flagfile() + flags.FLAGS(sys.argv) + logging.setup() + begin, end = time_period(FLAGS.instance_usage_audit_period) + print "Creating usages for %s until %s" % (str(begin), str(end)) + instances = db.instance_get_active_by_window(context.get_admin_context(), + begin, + end) + print "%s instances" % len(instances) + for instance_ref in instances: + usage_info = utils.usage_from_instance(instance_ref, + audit_period_begining=str(begin), + audit_period_ending=str(end)) + notifier_api.notify('compute.%s' % FLAGS.host, + 'compute.instance.exists', + notifier_api.INFO, + usage_info) diff --git a/bin/nova-api b/bin/nova-api index fff67251f..fe8e83366 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -24,8 +24,10 @@ Starts both the EC2 and OpenStack APIs in separate processes. """ import os +import signal import sys + possible_topdir = os.path.normpath(os.path.join(os.path.abspath( sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")): @@ -34,17 +36,23 @@ if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")): import nova.service import nova.utils +from nova import flags + + +FLAGS = flags.FLAGS + def main(): """Launch EC2 and OSAPI services.""" nova.utils.Bootstrapper.bootstrap_binary(sys.argv) - ec2 = nova.service.WSGIService("ec2") - osapi = nova.service.WSGIService("osapi") - launcher = nova.service.Launcher() - launcher.launch_service(ec2) - launcher.launch_service(osapi) + + for api in FLAGS.enabled_apis: + service = nova.service.WSGIService(api) + launcher.launch_service(service) + + signal.signal(signal.SIGTERM, lambda *_: launcher.stop()) try: launcher.wait() diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index 5926b97de..325642d52 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -59,14 +59,12 @@ def add_lease(mac, ip_address, _hostname, _interface): LOG.debug(_("leasing ip")) network_manager = utils.import_object(FLAGS.network_manager) network_manager.lease_fixed_ip(context.get_admin_context(), - mac, ip_address) else: rpc.cast(context.get_admin_context(), "%s.%s" % (FLAGS.network_topic, FLAGS.host), {"method": "lease_fixed_ip", - "args": {"mac": mac, - "address": ip_address}}) + "args": {"address": ip_address}}) def old_lease(mac, ip_address, hostname, interface): @@ -81,21 +79,19 @@ def del_lease(mac, ip_address, _hostname, _interface): LOG.debug(_("releasing ip")) network_manager = utils.import_object(FLAGS.network_manager) network_manager.release_fixed_ip(context.get_admin_context(), - mac, ip_address) else: rpc.cast(context.get_admin_context(), "%s.%s" % (FLAGS.network_topic, FLAGS.host), {"method": "release_fixed_ip", - "args": {"mac": mac, - "address": ip_address}}) + "args": {"address": ip_address}}) def init_leases(interface): """Get the list of hosts for an interface.""" ctxt = context.get_admin_context() network_ref = db.network_get_by_bridge(ctxt, interface) - return linux_net.get_dhcp_leases(ctxt, network_ref['id']) + return linux_net.get_dhcp_leases(ctxt, network_ref) def main(): diff --git a/bin/nova-manage b/bin/nova-manage index 02f20347d..b63bd326f 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -61,6 +61,7 @@ import os import sys import time +from optparse import OptionParser # If ../nova/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... @@ -103,6 +104,14 @@ flags.DEFINE_flag(flags.HelpshortFlag()) flags.DEFINE_flag(flags.HelpXMLFlag()) +# Decorators for actions +def args(*args, **kwargs): + def _decorator(func): + func.__dict__.setdefault('options', []).insert(0, (args, kwargs)) + return func + return _decorator + + def param2id(object_id): """Helper function to convert various id types to internal id. args: [object_id], e.g. 'vol-0000000a' or 'volume-0000000a' or '10' @@ -120,10 +129,11 @@ class VpnCommands(object): self.manager = manager.AuthManager() self.pipe = pipelib.CloudPipe() + @args('--project', dest="project", metavar='<Project name>', + help='Project name') def list(self, project=None): - """Print a listing of the VPN data for one or all projects. + """Print a listing of the VPN data for one or all projects.""" - args: [project=all]""" print "%-12s\t" % 'project', print "%-20s\t" % 'ip:port', print "%-20s\t" % 'private_ip', @@ -165,24 +175,36 @@ class VpnCommands(object): self.pipe.launch_vpn_instance(p.id) time.sleep(10) + @args('--project', dest="project_id", metavar='<Project name>', + help='Project name') def run(self, project_id): """Start the VPN for a given project.""" self.pipe.launch_vpn_instance(project_id) + @args('--project', dest="project_id", metavar='<Project name>', + help='Project name') + @args('--ip', dest="ip", metavar='<IP Address>', help='IP Address') + @args('--port', dest="port", metavar='<Port>', help='Port') def change(self, project_id, ip, port): """Change the ip and port for a vpn. - args: project, ip, port""" + this will update all networks associated with a project + not sure if that's the desired behavior or not, patches accepted + + """ + # TODO(tr3buchet): perhaps this shouldn't update all networks + # associated with a project in the future project = self.manager.get_project(project_id) if not project: print 'No project %s' % (project_id) return - admin = context.get_admin_context() - network_ref = db.project_get_network(admin, project_id) - db.network_update(admin, - network_ref['id'], - {'vpn_public_address': ip, - 'vpn_public_port': int(port)}) + admin_context = context.get_admin_context() + networks = db.project_get_networks(admin_context, project_id) + for network in networks: + db.network_update(admin_context, + network['id'], + {'vpn_public_address': ip, + 'vpn_public_port': int(port)}) class ShellCommands(object): @@ -204,10 +226,10 @@ class ShellCommands(object): Falls back to Python shell if unavailable""" self.run('python') + @args('--shell', dest="shell", metavar='<bpython|ipython|python >', + help='Python shell') def run(self, shell=None): - """Runs a Python interactive interpreter. - - args: [shell=bpython]""" + """Runs a Python interactive interpreter.""" if not shell: shell = 'bpython' @@ -241,6 +263,7 @@ class ShellCommands(object): readline.parse_and_bind("tab:complete") code.interact() + @args('--path', dest='path', metavar='<path>', help='Script path') def script(self, path): """Runs the script from the specifed path with flags set properly. arguments: path""" @@ -253,10 +276,13 @@ class RoleCommands(object): def __init__(self): self.manager = manager.AuthManager() + @args('--user', dest="user", metavar='<user name>', help='User name') + @args('--role', dest="role", metavar='<user role>', help='User role') + @args('--project', dest="project", metavar='<Project name>', + help='Project name') def add(self, user, role, project=None): """adds role to user - if project is specified, adds project specific role - arguments: user, role [project]""" + if project is specified, adds project specific role""" if project: projobj = self.manager.get_project(project) if not projobj.has_member(user): @@ -264,17 +290,23 @@ class RoleCommands(object): return self.manager.add_role(user, role, project) + @args('--user', dest="user", metavar='<user name>', help='User name') + @args('--role', dest="role", metavar='<user role>', help='User role') + @args('--project', dest="project", metavar='<Project name>', + help='Project name') def has(self, user, role, project=None): """checks to see if user has role if project is specified, returns True if user has - the global role and the project role - arguments: user, role [project]""" + the global role and the project role""" print self.manager.has_role(user, role, project) + @args('--user', dest="user", metavar='<user name>', help='User name') + @args('--role', dest="role", metavar='<user role>', help='User role') + @args('--project', dest="project", metavar='<Project name>', + help='Project name') def remove(self, user, role, project=None): """removes role from user - if project is specified, removes project specific role - arguments: user, role [project]""" + if project is specified, removes project specific role""" self.manager.remove_role(user, role, project) @@ -298,32 +330,37 @@ class UserCommands(object): def __init__(self): self.manager = manager.AuthManager() + @args('--name', dest="name", metavar='<admin name>', help='Admin name') + @args('--access', dest="access", metavar='<access>', help='Access') + @args('--secret', dest="secret", metavar='<secret>', help='Secret') def admin(self, name, access=None, secret=None): - """creates a new admin and prints exports - arguments: name [access] [secret]""" + """creates a new admin and prints exports""" try: user = self.manager.create_user(name, access, secret, True) except exception.DBError, e: _db_error(e) self._print_export(user) + @args('--name', dest="name", metavar='<name>', help='User name') + @args('--access', dest="access", metavar='<access>', help='Access') + @args('--secret', dest="secret", metavar='<secret>', help='Secret') def create(self, name, access=None, secret=None): - """creates a new user and prints exports - arguments: name [access] [secret]""" + """creates a new user and prints exports""" try: user = self.manager.create_user(name, access, secret, False) except exception.DBError, e: _db_error(e) self._print_export(user) + @args('--name', dest="name", metavar='<name>', help='User name') def delete(self, name): """deletes an existing user arguments: name""" self.manager.delete_user(name) + @args('--name', dest="name", metavar='<admin name>', help='User name') def exports(self, name): - """prints access and secrets for user in export format - arguments: name""" + """prints access and secrets for user in export format""" user = self.manager.get_user(name) if user: self._print_export(user) @@ -331,11 +368,17 @@ class UserCommands(object): print "User %s doesn't exist" % name def list(self): - """lists all users - arguments: <none>""" + """lists all users""" for user in self.manager.get_users(): print user.name + @args('--name', dest="name", metavar='<name>', help='User name') + @args('--access', dest="access_key", metavar='<access>', + help='Access key') + @args('--secret', dest="secret_key", metavar='<secret>', + help='Secret key') + @args('--is_admin', dest='is_admin', metavar="<'T'|'F'>", + help='Is admin?') def modify(self, name, access_key, secret_key, is_admin): """update a users keys & admin flag arguments: accesskey secretkey admin @@ -349,9 +392,11 @@ class UserCommands(object): is_admin = False self.manager.modify_user(name, access_key, secret_key, is_admin) + @args('--name', dest="user_id", metavar='<name>', help='User name') + @args('--project', dest="project_id", metavar='<Project name>', + help='Project name') def revoke(self, user_id, project_id=None): - """revoke certs for a user - arguments: user_id [project_id]""" + """revoke certs for a user""" if project_id: crypto.revoke_certs_by_user_and_project(user_id, project_id) else: @@ -364,62 +409,85 @@ class ProjectCommands(object): def __init__(self): self.manager = manager.AuthManager() + @args('--project', dest="project_id", metavar='<Project name>', + help='Project name') + @args('--user', dest="user_id", metavar='<name>', help='User name') def add(self, project_id, user_id): - """Adds user to project - arguments: project_id user_id""" + """Adds user to project""" try: self.manager.add_to_project(user_id, project_id) except exception.UserNotFound as ex: print ex raise + @args('--project', dest="name", metavar='<Project name>', + help='Project name') + @args('--user', dest="project_manager", metavar='<user>', + help='Project manager') + @args('--desc', dest="description", metavar='<description>', + help='Description') def create(self, name, project_manager, description=None): - """Creates a new project - arguments: name project_manager [description]""" + """Creates a new project""" try: self.manager.create_project(name, project_manager, description) except exception.UserNotFound as ex: print ex raise + @args('--project', dest="name", metavar='<Project name>', + help='Project name') + @args('--user', dest="project_manager", metavar='<user>', + help='Project manager') + @args('--desc', dest="description", metavar='<description>', + help='Description') def modify(self, name, project_manager, description=None): - """Modifies a project - arguments: name project_manager [description]""" + """Modifies a project""" try: self.manager.modify_project(name, project_manager, description) except exception.UserNotFound as ex: print ex raise + @args('--project', dest="name", metavar='<Project name>', + help='Project name') def delete(self, name): - """Deletes an existing project - arguments: name""" + """Deletes an existing project""" try: self.manager.delete_project(name) except exception.ProjectNotFound as ex: print ex raise + @args('--project', dest="project_id", metavar='<Project name>', + help='Project name') + @args('--user', dest="user_id", metavar='<name>', help='User name') + @args('--file', dest="filename", metavar='<filename>', + help='File name(Default: novarc)') def environment(self, project_id, user_id, filename='novarc'): - """Exports environment variables to an sourcable file - arguments: project_id user_id [filename='novarc]""" + """Exports environment variables to an sourcable file""" try: rc = self.manager.get_environment_rc(user_id, project_id) except (exception.UserNotFound, exception.ProjectNotFound) as ex: print ex raise - with open(filename, 'w') as f: - f.write(rc) + if filename == "-": + sys.stdout.write(rc) + else: + with open(filename, 'w') as f: + f.write(rc) + @args('--user', dest="username", metavar='<username>', help='User name') def list(self, username=None): - """Lists all projects - arguments: [username]""" + """Lists all projects""" for project in self.manager.get_projects(username): print project.name + @args('--project', dest="project_id", metavar='<Project name>', + help='Project name') + @args('--key', dest="key", metavar='<key>', help='Key') + @args('--value', dest="value", metavar='<value>', help='Value') def quota(self, project_id, key=None, value=None): - """Set or display quotas for project - arguments: project_id [key] [value]""" + """Set or display quotas for project""" ctxt = context.get_admin_context() if key: if value.lower() == 'unlimited': @@ -434,44 +502,55 @@ class ProjectCommands(object): value = 'unlimited' print '%s: %s' % (key, value) + @args('--project', dest="project_id", metavar='<Project name>', + help='Project name') + @args('--user', dest="user_id", metavar='<name>', help='User name') def remove(self, project_id, user_id): - """Removes user from project - arguments: project_id user_id""" + """Removes user from project""" try: self.manager.remove_from_project(user_id, project_id) except (exception.UserNotFound, exception.ProjectNotFound) as ex: print ex raise + @args('--project', dest="project_id", metavar='<Project name>', + help='Project name') def scrub(self, project_id): - """Deletes data associated with project - arguments: project_id""" - ctxt = context.get_admin_context() - network_ref = db.project_get_network(ctxt, project_id) - db.network_disassociate(ctxt, network_ref['id']) - groups = db.security_group_get_by_project(ctxt, project_id) + """Deletes data associated with project""" + admin_context = context.get_admin_context() + networks = db.project_get_networks(admin_context, project_id) + for network in networks: + db.network_disassociate(admin_context, network['id']) + groups = db.security_group_get_by_project(admin_context, project_id) for group in groups: - db.security_group_destroy(ctxt, group['id']) + db.security_group_destroy(admin_context, group['id']) + @args('--project', dest="project_id", metavar='<Project name>', + help='Project name') + @args('--user', dest="user_id", metavar='<name>', help='User name') + @args('--file', dest="filename", metavar='<filename>', + help='File name(Default: nova.zip)') def zipfile(self, project_id, user_id, filename='nova.zip'): - """Exports credentials for project to a zip file - arguments: project_id user_id [filename='nova.zip]""" + """Exports credentials for project to a zip file""" try: zip_file = self.manager.get_credentials(user_id, project_id) - with open(filename, 'w') as f: - f.write(zip_file) + if filename == "-": + sys.stdout.write(zip_file) + else: + with open(filename, 'w') as f: + f.write(zip_file) except (exception.UserNotFound, exception.ProjectNotFound) as ex: print ex raise except db.api.NoMoreNetworks: print _('No more networks available. If this is a new ' 'installation, you need\nto call something like this:\n\n' - ' nova-manage network create 10.0.0.0/8 10 64\n\n') + ' nova-manage network create pvt 10.0.0.0/8 10 64\n\n') except exception.ProcessExecutionError, e: print e - print _("The above error may show that the certificate db has not " - "been created.\nPlease create a database by running a " - "nova-api server on this host.") + print _("The above error may show that the certificate db has " + "not been created.\nPlease create a database by running " + "a nova-api server on this host.") AccountCommands = ProjectCommands @@ -479,15 +558,16 @@ AccountCommands = ProjectCommands class FixedIpCommands(object): """Class for managing fixed ip.""" + @args('--host', dest="host", metavar='<host>', help='Host') def list(self, host=None): - """Lists all fixed ips (optionally by host) arguments: [host]""" + """Lists all fixed ips (optionally by host)""" ctxt = context.get_admin_context() try: if host is None: fixed_ips = db.fixed_ip_get_all(ctxt) else: - fixed_ips = db.fixed_ip_get_all_by_host(ctxt, host) + fixed_ips = db.fixed_ip_get_all_by_instance_host(ctxt, host) except exception.NotFound as ex: print "error: %s" % ex sys.exit(2) @@ -505,7 +585,7 @@ class FixedIpCommands(object): instance = fixed_ip['instance'] hostname = instance['hostname'] host = instance['host'] - mac_address = instance['mac_address'] + mac_address = fixed_ip['virtual_interface']['address'] print "%-18s\t%-15s\t%-17s\t%-15s\t%s" % ( fixed_ip['network']['cidr'], fixed_ip['address'], @@ -515,24 +595,24 @@ class FixedIpCommands(object): class FloatingIpCommands(object): """Class for managing floating ip.""" - def create(self, host, range): - """Creates floating ips for host by range - arguments: host ip_range""" + @args('--ip_range', dest="range", metavar='<range>', help='IP range') + def create(self, range): + """Creates floating ips for zone by range""" for address in netaddr.IPNetwork(range): db.floating_ip_create(context.get_admin_context(), - {'address': str(address), - 'host': host}) + {'address': str(address)}) + @args('--ip_range', dest="ip_range", metavar='<range>', help='IP range') def delete(self, ip_range): - """Deletes floating ips by range - arguments: range""" + """Deletes floating ips by range""" for address in netaddr.IPNetwork(ip_range): db.floating_ip_destroy(context.get_admin_context(), str(address)) + @args('--host', dest="host", metavar='<host>', help='Host') def list(self, host=None): """Lists all floating ips (optionally by host) - arguments: [host]""" + Note: if host is given, only active floating IPs are returned""" ctxt = context.get_admin_context() if host is None: floating_ips = db.floating_ip_get_all(ctxt) @@ -550,10 +630,36 @@ class FloatingIpCommands(object): class NetworkCommands(object): """Class for managing networks.""" - def create(self, fixed_range=None, num_networks=None, network_size=None, - vlan_start=None, vpn_start=None, fixed_range_v6=None, - gateway_v6=None, label='public'): + @args('--label', dest="label", metavar='<label>', + help='Label(ex: public)') + @args('--network', dest="fixed_range", metavar='<x.x.x.x/yy>', + help='Network') + @args('--num_networks', dest="num_networks", metavar='<number>', + help='How many networks create') + @args('--network_size', dest="network_size", metavar='<number>', + help='How many hosts in network') + @args('--vlan', dest="vlan_start", metavar='<vlan id>', help='vlan id') + @args('--vpn', dest="vpn_start", help='vpn start') + @args('--fixed_range_v6', dest="fixed_range_v6", help='fixed ipv6 range') + @args('--gateway_v6', dest="gateway_v6", help='ipv6 gateway') + @args('--flat_network_bridge', dest="flat_network_bridge", + metavar='<flat network bridge>', help='Flat_network_bridge') + @args('--bridge_interface', dest="bridge_interface", + metavar='<bridge interface>', help='Bridge_interface') + @args('--multi_host', dest="multi_host", metavar="<'T'|'F'>", + help='Multi host') + @args('--dns1', dest="dns1", metavar="<DNS Address>", help='First DNS') + @args('--dns2', dest="dns2", metavar="<DNS Address>", help='Second DNS') + def create(self, label=None, fixed_range=None, num_networks=None, + network_size=None, multi_host=None, vlan_start=None, + vpn_start=None, fixed_range_v6=None, gateway_v6=None, + flat_network_bridge=None, bridge_interface=None, + dns1=None, dns2=None): """Creates fixed ips for host by range""" + if not label: + msg = _('a label (ex: public) is required to create networks.') + print msg + raise TypeError(msg) if not fixed_range: msg = _('Fixed range in the form of 10.0.0.0/8 is ' 'required to create networks.') @@ -563,41 +669,67 @@ class NetworkCommands(object): num_networks = FLAGS.num_networks if not network_size: network_size = FLAGS.network_size + if not multi_host: + multi_host = FLAGS.multi_host + else: + multi_host = multi_host == 'T' if not vlan_start: vlan_start = FLAGS.vlan_start if not vpn_start: vpn_start = FLAGS.vpn_start if not fixed_range_v6: fixed_range_v6 = FLAGS.fixed_range_v6 + if not flat_network_bridge: + flat_network_bridge = FLAGS.flat_network_bridge + if not bridge_interface: + bridge_interface = FLAGS.flat_interface or FLAGS.vlan_interface if not gateway_v6: gateway_v6 = FLAGS.gateway_v6 + if not dns1 and FLAGS.flat_network_dns: + dns1 = FLAGS.flat_network_dns net_manager = utils.import_object(FLAGS.network_manager) + try: net_manager.create_networks(context.get_admin_context(), + label=label, cidr=fixed_range, + multi_host=multi_host, num_networks=int(num_networks), network_size=int(network_size), vlan_start=int(vlan_start), vpn_start=int(vpn_start), cidr_v6=fixed_range_v6, gateway_v6=gateway_v6, - label=label) + bridge=flat_network_bridge, + bridge_interface=bridge_interface, + dns1=dns1, + dns2=dns2) except ValueError, e: print e raise e def list(self): """List all created networks""" - print "%-18s\t%-15s\t%-15s\t%-15s" % (_('network'), - _('netmask'), - _('start address'), - 'DNS') + print "%-18s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s" % ( + _('network'), + _('netmask'), + _('start address'), + _('DNS1'), + _('DNS2'), + _('VlanID'), + 'project') for network in db.network_get_all(context.get_admin_context()): - print "%-18s\t%-15s\t%-15s\t%-15s" % (network.cidr, - network.netmask, - network.dhcp_start, - network.dns) - + print "%-18s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s" % ( + network.cidr, + network.netmask, + network.dhcp_start, + network.dns1, + network.dns2, + network.vlan, + network.project_id) + + @args('--network', dest="fixed_range", metavar='<x.x.x.x/yy>', + help='Network to delete') def delete(self, fixed_range): """Deletes a network""" network = db.network_get_by_cidr(context.get_admin_context(), \ @@ -611,13 +743,11 @@ class NetworkCommands(object): class VmCommands(object): """Class for mangaging VM instances.""" + @args('--host', dest="host", metavar='<host>', help='Host') def list(self, host=None): - """Show a list of all instances + """Show a list of all instances""" - :param host: show all instance on specified host. - :param instance: show specificed instance. - """ - print "%-10s %-15s %-10s %-10s %-19s %-12s %-12s %-12s" \ + print "%-10s %-15s %-10s %-10s %-26s %-9s %-9s %-9s" \ " %-10s %-10s %-10s %-5s" % ( _('instance'), _('node'), @@ -639,14 +769,14 @@ class VmCommands(object): context.get_admin_context(), host) for instance in instances: - print "%-10s %-15s %-10s %-10s %-19s %-12s %-12s %-12s" \ + print "%-10s %-15s %-10s %-10s %-26s %-9s %-9s %-9s" \ " %-10s %-10s %-10s %-5d" % ( instance['hostname'], instance['host'], - instance['instance_type'], + instance['instance_type'].name, instance['state_description'], instance['launched_at'], - instance['image_id'], + instance['image_ref'], instance['kernel_id'], instance['ramdisk_id'], instance['project_id'], @@ -654,13 +784,11 @@ class VmCommands(object): instance['availability_zone'], instance['launch_index']) + @args('--ec2_id', dest='ec2_id', metavar='<ec2 id>', help='EC2 ID') + @args('--dest', dest='dest', metavar='<Destanation>', + help='destanation node') def live_migration(self, ec2_id, dest): - """Migrates a running instance to a new machine. - - :param ec2_id: instance id which comes from euca-describe-instance. - :param dest: destination host name. - - """ + """Migrates a running instance to a new machine.""" ctxt = context.get_admin_context() instance_id = ec2utils.ec2_id_to_id(ec2_id) @@ -690,9 +818,13 @@ class VmCommands(object): class ServiceCommands(object): """Enable and disable running services""" + @args('--host', dest='host', metavar='<host>', help='Host') + @args('--service', dest='service', metavar='<service>', + help='Nova service') def list(self, host=None, service=None): - """Show a list of all running services. Filter by host & service name. - args: [host] [service]""" + """ + Show a list of all running services. Filter by host & service name. + """ ctxt = context.get_admin_context() now = utils.utcnow() services = db.service_get_all(ctxt) @@ -711,9 +843,11 @@ class ServiceCommands(object): active, art, svc['updated_at']) + @args('--host', dest='host', metavar='<host>', help='Host') + @args('--service', dest='service', metavar='<service>', + help='Nova service') def enable(self, host, service): - """Enable scheduling for a service - args: host service""" + """Enable scheduling for a service""" ctxt = context.get_admin_context() svc = db.service_get_by_args(ctxt, host, service) if not svc: @@ -721,9 +855,11 @@ class ServiceCommands(object): return db.service_update(ctxt, svc['id'], {'disabled': False}) + @args('--host', dest='host', metavar='<host>', help='Host') + @args('--service', dest='service', metavar='<service>', + help='Nova service') def disable(self, host, service): - """Disable scheduling for a service - args: host service""" + """Disable scheduling for a service""" ctxt = context.get_admin_context() svc = db.service_get_by_args(ctxt, host, service) if not svc: @@ -731,12 +867,9 @@ class ServiceCommands(object): return db.service_update(ctxt, svc['id'], {'disabled': True}) + @args('--host', dest='host', metavar='<host>', help='Host') def describe_resource(self, host): - """Describes cpu/memory/hdd info for host. - - :param host: hostname. - - """ + """Describes cpu/memory/hdd info for host.""" result = rpc.call(context.get_admin_context(), FLAGS.scheduler_topic, @@ -764,12 +897,9 @@ class ServiceCommands(object): val['memory_mb'], val['local_gb']) + @args('--host', dest='host', metavar='<host>', help='Host') def update_resource(self, host): - """Updates available vcpu/memory/disk info for host. - - :param host: hostname. - - """ + """Updates available vcpu/memory/disk info for host.""" ctxt = context.get_admin_context() service_refs = db.service_get_all_by_host(ctxt, host) @@ -785,12 +915,36 @@ class ServiceCommands(object): {"method": "update_available_resource"}) +class HostCommands(object): + """List hosts""" + + def list(self, zone=None): + """Show a list of all physical hosts. Filter by zone. + args: [zone]""" + print "%-25s\t%-15s" % (_('host'), + _('zone')) + ctxt = context.get_admin_context() + now = utils.utcnow() + services = db.service_get_all(ctxt) + if zone: + services = [s for s in services if s['availability_zone'] == zone] + hosts = [] + for srv in services: + if not [h for h in hosts if h['host'] == srv['host']]: + hosts.append(srv) + + for h in hosts: + print "%-25s\t%-15s" % (h['host'], h['availability_zone']) + + class DbCommands(object): """Class for managing the database.""" def __init__(self): pass + @args('--version', dest='version', metavar='<version>', + help='Database version') def sync(self, version=None): """Sync the database up to the most recent version.""" return migration.db_sync(version) @@ -810,14 +964,18 @@ class VersionCommands(object): print _("%s (%s)") %\ (version.version_string(), version.version_string_with_vcs()) + def __call__(self): + self.list() + class VolumeCommands(object): """Methods for dealing with a cloud in an odd state""" + @args('--volume', dest='volume_id', metavar='<volume id>', + help='Volume ID') def delete(self, volume_id): """Delete a volume, bypassing the check that it - must be available. - args: volume_id_id""" + must be available.""" ctxt = context.get_admin_context() volume = db.volume_get(ctxt, param2id(volume_id)) host = volume['host'] @@ -838,11 +996,12 @@ class VolumeCommands(object): {"method": "delete_volume", "args": {"volume_id": volume['id']}}) + @args('--volume', dest='volume_id', metavar='<volume id>', + help='Volume ID') def reattach(self, volume_id): """Re-attach a volume that has previously been attached to an instance. Typically called after a compute host - has been rebooted. - args: volume_id_id""" + has been rebooted.""" ctxt = context.get_admin_context() volume = db.volume_get(ctxt, param2id(volume_id)) if not volume['instance_id']: @@ -869,16 +1028,27 @@ class InstanceTypeCommands(object): val["flavorid"], val["swap"], val["rxtx_quota"], val["rxtx_cap"], deleted) + @args('--name', dest='name', metavar='<name>', + help='Name of instance type/flavor') + @args('--memory', dest='memory', metavar='<memory size>', + help='Memory size') + @args('--cpu', dest='vcpus', metavar='<num cores>', help='Number cpus') + @args('--local_gb', dest='local_gb', metavar='<local_gb>', + help='local_gb') + @args('--flavor', dest='flavorid', metavar='<flavor id>', + help='Flavor ID') + @args('--swap', dest='swap', metavar='<swap>', help='Swap') + @args('--rxtx_quota', dest='rxtx_quota', metavar='<rxtx_quota>', + help='rxtx_quota') + @args('--rxtx_cap', dest='rxtx_cap', metavar='<rxtx_cap>', + help='rxtx_cap') def create(self, name, memory, vcpus, local_gb, flavorid, swap=0, rxtx_quota=0, rxtx_cap=0): - """Creates instance types / flavors - arguments: name memory vcpus local_gb flavorid [swap] [rxtx_quota] - [rxtx_cap] - """ + """Creates instance types / flavors""" try: instance_types.create(name, memory, vcpus, local_gb, flavorid, swap, rxtx_quota, rxtx_cap) - except exception.InvalidInput: + except exception.InvalidInput, e: print "Must supply valid parameters to create instance_type" print e sys.exit(1) @@ -897,9 +1067,10 @@ class InstanceTypeCommands(object): else: print "%s created" % name + @args('--name', dest='name', metavar='<name>', + help='Name of instance type/flavor') def delete(self, name, purge=None): - """Marks instance types / flavors as deleted - arguments: name""" + """Marks instance types / flavors as deleted""" try: if purge == "--purge": instance_types.purge(name) @@ -918,9 +1089,10 @@ class InstanceTypeCommands(object): else: print "%s %s" % (name, verb) + @args('--name', dest='name', metavar='<name>', + help='Name of instance type/flavor') def list(self, name=None): - """Lists all active or specific instance types / flavors - arguments: [name]""" + """Lists all active or specific instance types / flavors""" try: if name is None: inst_types = instance_types.get_all_types() @@ -968,11 +1140,18 @@ class ImageCommands(object): except Exception as exc: print _("Failed to register %(path)s: %(exc)s") % locals() + @args('--image', dest='image', metavar=' - """.replace(" ", "") % (locals())) - - self.assertEqual(expected_image.toxml(), actual_image.toxml()) - def test_get_image_404_json(self): request = webob.Request.blank('/v1.0/images/NonExistantImage') response = request.get_response(fakes.wsgi_app()) @@ -553,7 +538,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): # because the element hasn't changed definition expected = minidom.parseString(""" <itemNotFound code="404" - xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"> + xmlns="http://docs.openstack.org/compute/api/v1.1"> <message> Image not found. </message> @@ -579,23 +564,20 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): continue href = "http://localhost/v1.1/images/%s" % image["id"] + bookmark = "http://localhost/images/%s" % image["id"] test_image = { "id": image["id"], "name": image["name"], - "links": [{ - "rel": "self", - "href": "http://localhost/v1.1/images/%s" % image["id"], - }, - { - "rel": "bookmark", - "type": "application/json", - "href": href, - }, - { - "rel": "bookmark", - "type": "application/xml", - "href": href, - }], + "links": [ + { + "rel": "self", + "href": href, + }, + { + "rel": "bookmark", + "href": bookmark, + }, + ], } self.assertTrue(test_image in response_list) @@ -617,14 +599,14 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): }, { 'id': 124, - 'name': 'queued backup', + 'name': 'queued snapshot', 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'QUEUED', }, { 'id': 125, - 'name': 'saving backup', + 'name': 'saving snapshot', 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'SAVING', @@ -632,14 +614,14 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): }, { 'id': 126, - 'name': 'active backup', + 'name': 'active snapshot', 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'ACTIVE' }, { 'id': 127, - 'name': 'killed backup', + 'name': 'killed snapshot', 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'FAILED', @@ -660,10 +642,13 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): response_dict = json.loads(response.body) response_list = response_dict["images"] + server_href = "http://localhost/v1.1/servers/42" + server_bookmark = "http://localhost/servers/42" expected = [{ 'id': 123, 'name': 'public image', + 'metadata': {}, 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'ACTIVE', @@ -673,107 +658,134 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): }, { "rel": "bookmark", - "type": "application/json", - "href": "http://localhost/v1.1/images/123", - }, - { - "rel": "bookmark", - "type": "application/xml", - "href": "http://localhost/v1.1/images/123", + "href": "http://localhost/images/123", }], }, { 'id': 124, - 'name': 'queued backup', - 'serverRef': "http://localhost:8774/v1.1/servers/42", + 'name': 'queued snapshot', + 'metadata': { + u'instance_ref': u'http://localhost/v1.1/servers/42', + u'user_id': u'1', + }, 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'QUEUED', + 'server': { + 'id': 42, + "links": [{ + "rel": "self", + "href": server_href, + }, + { + "rel": "bookmark", + "href": server_bookmark, + }], + }, "links": [{ "rel": "self", "href": "http://localhost/v1.1/images/124", }, { "rel": "bookmark", - "type": "application/json", - "href": "http://localhost/v1.1/images/124", - }, - { - "rel": "bookmark", - "type": "application/xml", - "href": "http://localhost/v1.1/images/124", + "href": "http://localhost/images/124", }], }, { 'id': 125, - 'name': 'saving backup', - 'serverRef': "http://localhost:8774/v1.1/servers/42", + 'name': 'saving snapshot', + 'metadata': { + u'instance_ref': u'http://localhost/v1.1/servers/42', + u'user_id': u'1', + }, 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'SAVING', 'progress': 0, + 'server': { + 'id': 42, + "links": [{ + "rel": "self", + "href": server_href, + }, + { + "rel": "bookmark", + "href": server_bookmark, + }], + }, "links": [{ "rel": "self", "href": "http://localhost/v1.1/images/125", }, { "rel": "bookmark", - "type": "application/json", - "href": "http://localhost/v1.1/images/125", - }, - { - "rel": "bookmark", - "type": "application/xml", - "href": "http://localhost/v1.1/images/125", + "href": "http://localhost/images/125", }], }, { 'id': 126, - 'name': 'active backup', - 'serverRef': "http://localhost:8774/v1.1/servers/42", + 'name': 'active snapshot', + 'metadata': { + u'instance_ref': u'http://localhost/v1.1/servers/42', + u'user_id': u'1', + }, 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'ACTIVE', + 'server': { + 'id': 42, + "links": [{ + "rel": "self", + "href": server_href, + }, + { + "rel": "bookmark", + "href": server_bookmark, + }], + }, "links": [{ "rel": "self", "href": "http://localhost/v1.1/images/126", }, { "rel": "bookmark", - "type": "application/json", - "href": "http://localhost/v1.1/images/126", - }, - { - "rel": "bookmark", - "type": "application/xml", - "href": "http://localhost/v1.1/images/126", + "href": "http://localhost/images/126", }], }, { 'id': 127, - 'name': 'killed backup', - 'serverRef': "http://localhost:8774/v1.1/servers/42", + 'name': 'killed snapshot', + 'metadata': { + u'instance_ref': u'http://localhost/v1.1/servers/42', + u'user_id': u'1', + }, 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'FAILED', + 'server': { + 'id': 42, + "links": [{ + "rel": "self", + "href": server_href, + }, + { + "rel": "bookmark", + "href": server_bookmark, + }], + }, "links": [{ "rel": "self", "href": "http://localhost/v1.1/images/127", }, { "rel": "bookmark", - "type": "application/json", - "href": "http://localhost/v1.1/images/127", - }, - { - "rel": "bookmark", - "type": "application/xml", - "href": "http://localhost/v1.1/images/127", + "href": "http://localhost/images/127", }], }, { 'id': 129, 'name': None, + 'metadata': {}, 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'ACTIVE', @@ -783,13 +795,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): }, { "rel": "bookmark", - "type": "application/json", - "href": "http://localhost/v1.1/images/129", - }, - { - "rel": "bookmark", - "type": "application/xml", - "href": "http://localhost/v1.1/images/129", + "href": "http://localhost/images/129", }], }, ] @@ -797,154 +803,206 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): self.assertDictListMatch(expected, response_list) def test_image_filter_with_name(self): - mocker = mox.Mox() - image_service = mocker.CreateMockAnything() + image_service = self.mox.CreateMockAnything() context = object() filters = {'name': 'testname'} - image_service.index( - context, filters=filters).AndReturn([]) - mocker.ReplayAll() - request = webob.Request.blank( - '/v1.1/images?name=testname') + image_service.index(context, filters=filters).AndReturn([]) + self.mox.ReplayAll() + request = webob.Request.blank('/v1.1/images?name=testname') request.environ['nova.context'] = context controller = images.ControllerV11(image_service=image_service) controller.index(request) - mocker.VerifyAll() + self.mox.VerifyAll() def test_image_filter_with_status(self): - mocker = mox.Mox() - image_service = mocker.CreateMockAnything() + image_service = self.mox.CreateMockAnything() context = object() filters = {'status': 'ACTIVE'} - image_service.index( - context, filters=filters).AndReturn([]) - mocker.ReplayAll() - request = webob.Request.blank( - '/v1.1/images?status=ACTIVE') + image_service.index(context, filters=filters).AndReturn([]) + self.mox.ReplayAll() + request = webob.Request.blank('/v1.1/images?status=ACTIVE') request.environ['nova.context'] = context controller = images.ControllerV11(image_service=image_service) controller.index(request) - mocker.VerifyAll() + self.mox.VerifyAll() def test_image_filter_with_property(self): - mocker = mox.Mox() - image_service = mocker.CreateMockAnything() + image_service = self.mox.CreateMockAnything() context = object() filters = {'property-test': '3'} - image_service.index( - context, filters=filters).AndReturn([]) - mocker.ReplayAll() - request = webob.Request.blank( - '/v1.1/images?property-test=3') + image_service.index(context, filters=filters).AndReturn([]) + self.mox.ReplayAll() + request = webob.Request.blank('/v1.1/images?property-test=3') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.index(request) + self.mox.VerifyAll() + + def test_image_filter_server(self): + image_service = self.mox.CreateMockAnything() + context = object() + # 'server' should be converted to 'property-instance_ref' + filters = {'property-instance_ref': 'http://localhost:8774/servers/12'} + image_service.index(context, filters=filters).AndReturn([]) + self.mox.ReplayAll() + request = webob.Request.blank('/v1.1/images?server=' + 'http://localhost:8774/servers/12') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.index(request) + self.mox.VerifyAll() + + def test_image_filter_changes_since(self): + image_service = self.mox.CreateMockAnything() + context = object() + filters = {'changes-since': '2011-01-24T17:08Z'} + image_service.index(context, filters=filters).AndReturn([]) + self.mox.ReplayAll() + request = webob.Request.blank('/v1.1/images?changes-since=' + '2011-01-24T17:08Z') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.index(request) + self.mox.VerifyAll() + + def test_image_filter_with_type(self): + image_service = self.mox.CreateMockAnything() + context = object() + filters = {'property-image_type': 'BASE'} + image_service.index(context, filters=filters).AndReturn([]) + self.mox.ReplayAll() + request = webob.Request.blank('/v1.1/images?type=BASE') request.environ['nova.context'] = context controller = images.ControllerV11(image_service=image_service) controller.index(request) - mocker.VerifyAll() + self.mox.VerifyAll() def test_image_filter_not_supported(self): - mocker = mox.Mox() - image_service = mocker.CreateMockAnything() + image_service = self.mox.CreateMockAnything() context = object() filters = {'status': 'ACTIVE'} - image_service.index( - context, filters=filters).AndReturn([]) - mocker.ReplayAll() - request = webob.Request.blank( - '/v1.1/images?status=ACTIVE&UNSUPPORTEDFILTER=testname') + image_service.detail(context, filters=filters).AndReturn([]) + self.mox.ReplayAll() + request = webob.Request.blank('/v1.1/images?status=ACTIVE&' + 'UNSUPPORTEDFILTER=testname') request.environ['nova.context'] = context controller = images.ControllerV11(image_service=image_service) - controller.index(request) - mocker.VerifyAll() + controller.detail(request) + self.mox.VerifyAll() def test_image_no_filters(self): - mocker = mox.Mox() - image_service = mocker.CreateMockAnything() + image_service = self.mox.CreateMockAnything() context = object() filters = {} image_service.index( context, filters=filters).AndReturn([]) - mocker.ReplayAll() + self.mox.ReplayAll() request = webob.Request.blank( '/v1.1/images') request.environ['nova.context'] = context controller = images.ControllerV11(image_service=image_service) controller.index(request) - mocker.VerifyAll() + self.mox.VerifyAll() def test_image_detail_filter_with_name(self): - mocker = mox.Mox() - image_service = mocker.CreateMockAnything() + image_service = self.mox.CreateMockAnything() context = object() filters = {'name': 'testname'} - image_service.detail( - context, filters=filters).AndReturn([]) - mocker.ReplayAll() - request = webob.Request.blank( - '/v1.1/images/detail?name=testname') + image_service.detail(context, filters=filters).AndReturn([]) + self.mox.ReplayAll() + request = webob.Request.blank('/v1.1/images/detail?name=testname') request.environ['nova.context'] = context controller = images.ControllerV11(image_service=image_service) controller.detail(request) - mocker.VerifyAll() + self.mox.VerifyAll() def test_image_detail_filter_with_status(self): - mocker = mox.Mox() - image_service = mocker.CreateMockAnything() + image_service = self.mox.CreateMockAnything() context = object() filters = {'status': 'ACTIVE'} - image_service.detail( - context, filters=filters).AndReturn([]) - mocker.ReplayAll() - request = webob.Request.blank( - '/v1.1/images/detail?status=ACTIVE') + image_service.detail(context, filters=filters).AndReturn([]) + self.mox.ReplayAll() + request = webob.Request.blank('/v1.1/images/detail?status=ACTIVE') request.environ['nova.context'] = context controller = images.ControllerV11(image_service=image_service) controller.detail(request) - mocker.VerifyAll() + self.mox.VerifyAll() def test_image_detail_filter_with_property(self): - mocker = mox.Mox() - image_service = mocker.CreateMockAnything() + image_service = self.mox.CreateMockAnything() context = object() filters = {'property-test': '3'} - image_service.detail( - context, filters=filters).AndReturn([]) - mocker.ReplayAll() - request = webob.Request.blank( - '/v1.1/images/detail?property-test=3') + image_service.detail(context, filters=filters).AndReturn([]) + self.mox.ReplayAll() + request = webob.Request.blank('/v1.1/images/detail?property-test=3') request.environ['nova.context'] = context controller = images.ControllerV11(image_service=image_service) controller.detail(request) - mocker.VerifyAll() + self.mox.VerifyAll() + + def test_image_detail_filter_server(self): + image_service = self.mox.CreateMockAnything() + context = object() + # 'server' should be converted to 'property-instance_ref' + filters = {'property-instance_ref': 'http://localhost:8774/servers/12'} + image_service.index(context, filters=filters).AndReturn([]) + self.mox.ReplayAll() + request = webob.Request.blank('/v1.1/images/detail?server=' + 'http://localhost:8774/servers/12') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.index(request) + self.mox.VerifyAll() + + def test_image_detail_filter_changes_since(self): + image_service = self.mox.CreateMockAnything() + context = object() + filters = {'changes-since': '2011-01-24T17:08Z'} + image_service.index(context, filters=filters).AndReturn([]) + self.mox.ReplayAll() + request = webob.Request.blank('/v1.1/images/detail?changes-since=' + '2011-01-24T17:08Z') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.index(request) + self.mox.VerifyAll() + + def test_image_detail_filter_with_type(self): + image_service = self.mox.CreateMockAnything() + context = object() + filters = {'property-image_type': 'BASE'} + image_service.index(context, filters=filters).AndReturn([]) + self.mox.ReplayAll() + request = webob.Request.blank('/v1.1/images/detail?type=BASE') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.index(request) + self.mox.VerifyAll() def test_image_detail_filter_not_supported(self): - mocker = mox.Mox() - image_service = mocker.CreateMockAnything() + image_service = self.mox.CreateMockAnything() context = object() filters = {'status': 'ACTIVE'} - image_service.detail( - context, filters=filters).AndReturn([]) - mocker.ReplayAll() - request = webob.Request.blank( - '/v1.1/images/detail?status=ACTIVE&UNSUPPORTEDFILTER=testname') + image_service.detail(context, filters=filters).AndReturn([]) + self.mox.ReplayAll() + request = webob.Request.blank('/v1.1/images/detail?status=ACTIVE&' + 'UNSUPPORTEDFILTER=testname') request.environ['nova.context'] = context controller = images.ControllerV11(image_service=image_service) controller.detail(request) - mocker.VerifyAll() + self.mox.VerifyAll() def test_image_detail_no_filters(self): - mocker = mox.Mox() - image_service = mocker.CreateMockAnything() + image_service = self.mox.CreateMockAnything() context = object() filters = {} - image_service.detail( - context, filters=filters).AndReturn([]) - mocker.ReplayAll() - request = webob.Request.blank( - '/v1.1/images/detail') + image_service.detail(context, filters=filters).AndReturn([]) + self.mox.ReplayAll() + request = webob.Request.blank('/v1.1/images/detail') request.environ['nova.context'] = context controller = images.ControllerV11(image_service=image_service) controller.detail(request) - mocker.VerifyAll() + self.mox.VerifyAll() def test_get_image_found(self): req = webob.Request.blank('/v1.0/images/123') @@ -969,8 +1027,48 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): self.assertEqual(res.status_int, 404) def test_create_image(self): + body = dict(image=dict(serverId='123', name='Snapshot 1')) + req = webob.Request.blank('/v1.0/images') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(200, response.status_int) + + def test_create_snapshot_no_name(self): + """Name is required for snapshots""" + body = dict(image=dict(serverId='123')) + req = webob.Request.blank('/v1.0/images') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, response.status_int) + + def test_create_backup_no_name(self): + """Name is also required for backups""" + body = dict(image=dict(serverId='123', image_type='backup', + backup_type='daily', rotation=1)) + req = webob.Request.blank('/v1.0/images') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, response.status_int) + + def test_create_backup_with_rotation_and_backup_type(self): + """The happy path for creating backups + + Creating a backup is an admin-only operation, as opposed to snapshots + which are available to anybody. + """ + # FIXME(sirp): teardown needed? + FLAGS.allow_admin_api = True - body = dict(image=dict(serverId='123', name='Backup 1')) + # FIXME(sirp): should the fact that backups are admin_only be a FLAG + body = dict(image=dict(serverId='123', image_type='backup', + name='Backup 1', + backup_type='daily', rotation=1)) req = webob.Request.blank('/v1.0/images') req.method = 'POST' req.body = json.dumps(body) @@ -978,9 +1076,54 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): response = req.get_response(fakes.wsgi_app()) self.assertEqual(200, response.status_int) + def test_create_backup_no_rotation(self): + """Rotation is required for backup requests""" + # FIXME(sirp): teardown needed? + FLAGS.allow_admin_api = True + + # FIXME(sirp): should the fact that backups are admin_only be a FLAG + body = dict(image=dict(serverId='123', name='daily', + image_type='backup', backup_type='daily')) + req = webob.Request.blank('/v1.0/images') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, response.status_int) + + def test_create_backup_no_backup_type(self): + """Backup Type (daily or weekly) is required for backup requests""" + # FIXME(sirp): teardown needed? + FLAGS.allow_admin_api = True + + # FIXME(sirp): should the fact that backups are admin_only be a FLAG + body = dict(image=dict(serverId='123', name='daily', + image_type='backup', rotation=1)) + req = webob.Request.blank('/v1.0/images') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, response.status_int) + + def test_create_image_with_invalid_image_type(self): + """Valid image_types are snapshot | daily | weekly""" + # FIXME(sirp): teardown needed? + FLAGS.allow_admin_api = True + + # FIXME(sirp): should the fact that backups are admin_only be a FLAG + body = dict(image=dict(serverId='123', image_type='monthly', + rotation=1)) + req = webob.Request.blank('/v1.0/images') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, response.status_int) + def test_create_image_no_server_id(self): - body = dict(image=dict(name='Backup 1')) + body = dict(image=dict(name='Snapshot 1')) req = webob.Request.blank('/v1.0/images') req.method = 'POST' req.body = json.dumps(body) @@ -990,7 +1133,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): def test_create_image_v1_1(self): - body = dict(image=dict(serverRef='123', name='Backup 1')) + body = dict(image=dict(serverRef='123', name='Snapshot 1')) req = webob.Request.blank('/v1.1/images') req.method = 'POST' req.body = json.dumps(body) @@ -1001,6 +1144,34 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): def test_create_image_v1_1_actual_server_ref(self): serverRef = 'http://localhost/v1.1/servers/1' + serverBookmark = 'http://localhost/servers/1' + body = dict(image=dict(serverRef=serverRef, name='Backup 1')) + req = webob.Request.blank('/v1.1/images') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(200, response.status_int) + result = json.loads(response.body) + expected = { + 'id': 1, + 'links': [ + { + 'rel': 'self', + 'href': serverRef, + }, + { + 'rel': 'bookmark', + 'href': serverBookmark, + }, + ] + } + self.assertEqual(result['image']['server'], expected) + + def test_create_image_v1_1_actual_server_ref_port(self): + + serverRef = 'http://localhost:8774/v1.1/servers/1' + serverBookmark = 'http://localhost:8774/servers/1' body = dict(image=dict(serverRef=serverRef, name='Backup 1')) req = webob.Request.blank('/v1.1/images') req.method = 'POST' @@ -1009,7 +1180,20 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): response = req.get_response(fakes.wsgi_app()) self.assertEqual(200, response.status_int) result = json.loads(response.body) - self.assertEqual(result['image']['serverRef'], serverRef) + expected = { + 'id': 1, + 'links': [ + { + 'rel': 'self', + 'href': serverRef, + }, + { + 'rel': 'bookmark', + 'href': serverBookmark, + }, + ] + } + self.assertEqual(result['image']['server'], expected) def test_create_image_v1_1_server_ref_bad_hostname(self): @@ -1022,42 +1206,31 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): response = req.get_response(fakes.wsgi_app()) self.assertEqual(400, response.status_int) - def test_create_image_v1_1_xml_serialization(self): + def test_create_image_v1_1_no_server_ref(self): - body = dict(image=dict(serverRef='123', name='Backup 1')) + body = dict(image=dict(name='Snapshot 1')) req = webob.Request.blank('/v1.1/images') req.method = 'POST' req.body = json.dumps(body) req.headers["content-type"] = "application/json" - req.headers["accept"] = "application/xml" response = req.get_response(fakes.wsgi_app()) - self.assertEqual(200, response.status_int) - resp_xml = minidom.parseString(response.body.replace(" ", "")) - expected_href = "http://localhost/v1.1/images/123" - expected_image = minidom.parseString(""" - <image - created="None" - id="123" - name="Backup 1" - serverRef="http://localhost/v1.1/servers/123" - status="ACTIVE" - updated="None" - xmlns="http://docs.openstack.org/compute/api/v1.1"> - <links> - <link href="%(expected_href)s" rel="self"/> - <link href="%(expected_href)s" rel="bookmark" - type="application/json" /> - <link href="%(expected_href)s" rel="bookmark" - type="application/xml" /> - </links> - </image> - """.replace(" ", "") % (locals())) + self.assertEqual(400, response.status_int) - self.assertEqual(expected_image.toxml(), resp_xml.toxml()) + def test_create_image_v1_1_server_ref_missing_version(self): - def test_create_image_v1_1_no_server_ref(self): + serverRef = 'http://localhost/servers/1' + body = dict(image=dict(serverRef=serverRef, name='Backup 1')) + req = webob.Request.blank('/v1.1/images') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, response.status_int) + + def test_create_image_v1_1_server_ref_missing_id(self): - body = dict(image=dict(name='Backup 1')) + serverRef = 'http://localhost/v1.1/servers' + body = dict(image=dict(serverRef=serverRef, name='Backup 1')) req = webob.Request.blank('/v1.1/images') req.method = 'POST' req.body = json.dumps(body) @@ -1084,19 +1257,21 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): status='active', properties={}) image_id += 1 - # Backup for User 1 - server_ref = 'http://localhost:8774/v1.1/servers/42' - backup_properties = {'instance_ref': server_ref, 'user_id': '1'} + # Snapshot for User 1 + server_ref = 'http://localhost/v1.1/servers/42' + snapshot_properties = {'instance_ref': server_ref, 'user_id': '1'} for status in ('queued', 'saving', 'active', 'killed'): - add_fixture(id=image_id, name='%s backup' % status, + add_fixture(id=image_id, name='%s snapshot' % status, is_public=False, status=status, - properties=backup_properties) + properties=snapshot_properties) image_id += 1 - # Backup for User 2 - other_backup_properties = {'instance_id': '43', 'user_id': '2'} - add_fixture(id=image_id, name='someone elses backup', is_public=False, - status='active', properties=other_backup_properties) + # Snapshot for User 2 + other_snapshot_properties = {'instance_id': '43', 'user_id': '2'} + add_fixture(id=image_id, name='someone elses snapshot', + is_public=False, status='active', + properties=other_snapshot_properties) + image_id += 1 # Image without a name @@ -1105,3 +1280,512 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): image_id += 1 return fixtures + + +class ImageXMLSerializationTest(test.TestCase): + + TIMESTAMP = "2010-10-11T10:30:22Z" + SERVER_HREF = 'http://localhost/v1.1/servers/123' + SERVER_BOOKMARK = 'http://localhost/servers/123' + IMAGE_HREF = 'http://localhost/v1.1/images/%s' + IMAGE_BOOKMARK = 'http://localhost/images/%s' + + def test_show(self): + serializer = images.ImageXMLSerializer() + + fixture = { + 'image': { + 'id': 1, + 'name': 'Image1', + 'created': self.TIMESTAMP, + 'updated': self.TIMESTAMP, + 'status': 'ACTIVE', + 'progress': 80, + 'server': { + 'id': 1, + 'links': [ + { + 'href': self.SERVER_HREF, + 'rel': 'self', + }, + { + 'href': self.SERVER_BOOKMARK, + 'rel': 'bookmark', + }, + ], + }, + 'metadata': { + 'key1': 'value1', + }, + 'links': [ + { + 'href': self.IMAGE_HREF % 1, + 'rel': 'self', + }, + { + 'href': self.IMAGE_BOOKMARK % 1, + 'rel': 'bookmark', + }, + ], + }, + } + + output = serializer.serialize(fixture, 'show') + actual = minidom.parseString(output.replace(" ", "")) + + expected_server_href = self.SERVER_HREF + expected_server_bookmark = self.SERVER_BOOKMARK + expected_href = self.IMAGE_HREF % 1 + expected_bookmark = self.IMAGE_BOOKMARK % 1 + expected_now = self.TIMESTAMP + expected = minidom.parseString(""" + <image id="1" + xmlns="http://docs.openstack.org/compute/api/v1.1" + xmlns:atom="http://www.w3.org/2005/Atom" + name="Image1" + updated="%(expected_now)s" + created="%(expected_now)s" + status="ACTIVE" + progress="80"> + <server id="1"> + <atom:link rel="self" href="%(expected_server_href)s"/> + <atom:link rel="bookmark" href="%(expected_server_bookmark)s"/> + </server> + <metadata> + <meta key="key1"> + value1 + </meta> + </metadata> + <atom:link href="%(expected_href)s" rel="self"/> + <atom:link href="%(expected_bookmark)s" rel="bookmark"/> + </image> + """.replace(" ", "") % (locals())) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_show_zero_metadata(self): + serializer = images.ImageXMLSerializer() + + fixture = { + 'image': { + 'id': 1, + 'name': 'Image1', + 'created': self.TIMESTAMP, + 'updated': self.TIMESTAMP, + 'status': 'ACTIVE', + 'server': { + 'id': 1, + 'links': [ + { + 'href': self.SERVER_HREF, + 'rel': 'self', + }, + { + 'href': self.SERVER_BOOKMARK, + 'rel': 'bookmark', + }, + ], + }, + 'metadata': {}, + 'links': [ + { + 'href': self.IMAGE_HREF % 1, + 'rel': 'self', + }, + { + 'href': self.IMAGE_BOOKMARK % 1, + 'rel': 'bookmark', + }, + ], + }, + } + + output = serializer.serialize(fixture, 'show') + actual = minidom.parseString(output.replace(" ", "")) + + expected_server_href = self.SERVER_HREF + expected_server_bookmark = self.SERVER_BOOKMARK + expected_href = self.IMAGE_HREF % 1 + expected_bookmark = self.IMAGE_BOOKMARK % 1 + expected_now = self.TIMESTAMP + expected = minidom.parseString(""" + <image id="1" + xmlns="http://docs.openstack.org/compute/api/v1.1" + xmlns:atom="http://www.w3.org/2005/Atom" + name="Image1" + updated="%(expected_now)s" + created="%(expected_now)s" + status="ACTIVE"> + <server id="1"> + <atom:link rel="self" href="%(expected_server_href)s"/> + <atom:link rel="bookmark" href="%(expected_server_bookmark)s"/> + </server> + <atom:link href="%(expected_href)s" rel="self"/> + <atom:link href="%(expected_bookmark)s" rel="bookmark"/> + </image> + """.replace(" ", "") % (locals())) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_show_image_no_metadata_key(self): + serializer = images.ImageXMLSerializer() + + fixture = { + 'image': { + 'id': 1, + 'name': 'Image1', + 'created': self.TIMESTAMP, + 'updated': self.TIMESTAMP, + 'status': 'ACTIVE', + 'server': { + 'id': 1, + 'links': [ + { + 'href': self.SERVER_HREF, + 'rel': 'self', + }, + { + 'href': self.SERVER_BOOKMARK, + 'rel': 'bookmark', + }, + ], + }, + 'links': [ + { + 'href': self.IMAGE_HREF % 1, + 'rel': 'self', + }, + { + 'href': self.IMAGE_BOOKMARK % 1, + 'rel': 'bookmark', + }, + ], + }, + } + + output = serializer.serialize(fixture, 'show') + actual = minidom.parseString(output.replace(" ", "")) + + expected_server_href = self.SERVER_HREF + expected_server_bookmark = self.SERVER_BOOKMARK + expected_href = self.IMAGE_HREF % 1 + expected_bookmark = self.IMAGE_BOOKMARK % 1 + expected_now = self.TIMESTAMP + expected = minidom.parseString(""" + <image id="1" + xmlns="http://docs.openstack.org/compute/api/v1.1" + xmlns:atom="http://www.w3.org/2005/Atom" + name="Image1" + updated="%(expected_now)s" + created="%(expected_now)s" + status="ACTIVE"> + <server id="1"> + <atom:link rel="self" href="%(expected_server_href)s"/> + <atom:link rel="bookmark" href="%(expected_server_bookmark)s"/> + </server> + <atom:link href="%(expected_href)s" rel="self"/> + <atom:link href="%(expected_bookmark)s" rel="bookmark"/> + </image> + """.replace(" ", "") % (locals())) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_show_no_server(self): + serializer = images.ImageXMLSerializer() + + fixture = { + 'image': { + 'id': 1, + 'name': 'Image1', + 'created': self.TIMESTAMP, + 'updated': self.TIMESTAMP, + 'status': 'ACTIVE', + 'metadata': { + 'key1': 'value1', + }, + 'links': [ + { + 'href': self.IMAGE_HREF % 1, + 'rel': 'self', + }, + { + 'href': self.IMAGE_BOOKMARK % 1, + 'rel': 'bookmark', + }, + ], + }, + } + + output = serializer.serialize(fixture, 'show') + actual = minidom.parseString(output.replace(" ", "")) + + expected_href = self.IMAGE_HREF % 1 + expected_bookmark = self.IMAGE_BOOKMARK % 1 + expected_now = self.TIMESTAMP + expected = minidom.parseString(""" + <image id="1" + xmlns="http://docs.openstack.org/compute/api/v1.1" + xmlns:atom="http://www.w3.org/2005/Atom" + name="Image1" + updated="%(expected_now)s" + created="%(expected_now)s" + status="ACTIVE"> + <metadata> + <meta key="key1"> + value1 + </meta> + </metadata> + <atom:link href="%(expected_href)s" rel="self"/> + <atom:link href="%(expected_bookmark)s" rel="bookmark"/> + </image> + """.replace(" ", "") % (locals())) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_index(self): + serializer = images.ImageXMLSerializer() + + fixture = { + 'images': [ + { + 'id': 1, + 'name': 'Image1', + 'links': [ + { + 'href': self.IMAGE_HREF % 1, + 'rel': 'self', + }, + ], + }, + { + 'id': 2, + 'name': 'Image2', + 'links': [ + { + 'href': self.IMAGE_HREF % 2, + 'rel': 'self', + }, + ], + }, + ] + } + + output = serializer.serialize(fixture, 'index') + actual = minidom.parseString(output.replace(" ", "")) + + expected_server_href = self.SERVER_HREF + expected_server_bookmark = self.SERVER_BOOKMARK + expected_href = self.IMAGE_HREF % 1 + expected_bookmark = self.IMAGE_BOOKMARK % 1 + expected_href_two = self.IMAGE_HREF % 2 + expected_bookmark_two = self.IMAGE_BOOKMARK % 2 + expected_now = self.TIMESTAMP + expected = minidom.parseString(""" + <images + xmlns="http://docs.openstack.org/compute/api/v1.1" + xmlns:atom="http://www.w3.org/2005/Atom"> + <image id="1" name="Image1"> + <atom:link href="%(expected_href)s" rel="self"/> + </image> + <image id="2" name="Image2"> + <atom:link href="%(expected_href_two)s" rel="self"/> + </image> + </images> + """.replace(" ", "") % (locals())) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_index_zero_images(self): + serializer = images.ImageXMLSerializer() + + fixtures = { + 'images': [], + } + + output = serializer.serialize(fixtures, 'index') + actual = minidom.parseString(output.replace(" ", "")) + + expected = minidom.parseString(""" + <images + xmlns="http://docs.openstack.org/compute/api/v1.1" + xmlns:atom="http://www.w3.org/2005/Atom" /> + """.replace(" ", "") % (locals())) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_detail(self): + serializer = images.ImageXMLSerializer() + + fixture = { + 'images': [ + { + 'id': 1, + 'name': 'Image1', + 'created': self.TIMESTAMP, + 'updated': self.TIMESTAMP, + 'status': 'ACTIVE', + 'server': { + 'id': 1, + 'links': [ + { + 'href': self.SERVER_HREF, + 'rel': 'self', + }, + { + 'href': self.SERVER_BOOKMARK, + 'rel': 'bookmark', + }, + ], + }, + 'links': [ + { + 'href': self.IMAGE_HREF % 1, + 'rel': 'self', + }, + { + 'href': self.IMAGE_BOOKMARK % 1, + 'rel': 'bookmark', + }, + ], + }, + { + 'id': 2, + 'name': 'Image2', + 'created': self.TIMESTAMP, + 'updated': self.TIMESTAMP, + 'status': 'SAVING', + 'progress': 80, + 'metadata': { + 'key1': 'value1', + }, + 'links': [ + { + 'href': self.IMAGE_HREF % 2, + 'rel': 'self', + }, + { + 'href': self.IMAGE_BOOKMARK % 2, + 'rel': 'bookmark', + }, + ], + }, + ] + } + + output = serializer.serialize(fixture, 'detail') + actual = minidom.parseString(output.replace(" ", "")) + + expected_server_href = self.SERVER_HREF + expected_server_bookmark = self.SERVER_BOOKMARK + expected_href = self.IMAGE_HREF % 1 + expected_bookmark = self.IMAGE_BOOKMARK % 1 + expected_href_two = self.IMAGE_HREF % 2 + expected_bookmark_two = self.IMAGE_BOOKMARK % 2 + expected_now = self.TIMESTAMP + expected = minidom.parseString(""" + <images + xmlns="http://docs.openstack.org/compute/api/v1.1" + xmlns:atom="http://www.w3.org/2005/Atom"> + <image id="1" + name="Image1" + updated="%(expected_now)s" + created="%(expected_now)s" + status="ACTIVE"> + <server id="1"> + <atom:link rel="self" href="%(expected_server_href)s"/> + <atom:link rel="bookmark" href="%(expected_server_bookmark)s"/> + </server> + <atom:link href="%(expected_href)s" rel="self"/> + <atom:link href="%(expected_bookmark)s" rel="bookmark"/> + </image> + <image id="2" + name="Image2" + updated="%(expected_now)s" + created="%(expected_now)s" + status="SAVING" + progress="80"> + <metadata> + <meta key="key1"> + value1 + </meta> + </metadata> + <atom:link href="%(expected_href_two)s" rel="self"/> + <atom:link href="%(expected_bookmark_two)s" rel="bookmark"/> + </image> + </images> + """.replace(" ", "") % (locals())) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_create(self): + serializer = images.ImageXMLSerializer() + + fixture = { + 'image': { + 'id': 1, + 'name': 'Image1', + 'created': self.TIMESTAMP, + 'updated': self.TIMESTAMP, + 'status': 'SAVING', + 'progress': 80, + 'server': { + 'id': 1, + 'links': [ + { + 'href': self.SERVER_HREF, + 'rel': 'self', + }, + { + 'href': self.SERVER_BOOKMARK, + 'rel': 'bookmark', + }, + ], + }, + 'metadata': { + 'key1': 'value1', + }, + 'links': [ + { + 'href': self.IMAGE_HREF % 1, + 'rel': 'self', + }, + { + 'href': self.IMAGE_BOOKMARK % 1, + 'rel': 'bookmark', + }, + ], + }, + } + + output = serializer.serialize(fixture, 'create') + actual = minidom.parseString(output.replace(" ", "")) + + expected_server_href = self.SERVER_HREF + expected_server_bookmark = self.SERVER_BOOKMARK + expected_href = self.IMAGE_HREF % 1 + expected_bookmark = self.IMAGE_BOOKMARK % 1 + expected_now = self.TIMESTAMP + expected = minidom.parseString(""" + <image id="1" + xmlns="http://docs.openstack.org/compute/api/v1.1" + xmlns:atom="http://www.w3.org/2005/Atom" + name="Image1" + updated="%(expected_now)s" + created="%(expected_now)s" + status="SAVING" + progress="80"> + <server id="1"> + <atom:link rel="self" href="%(expected_server_href)s"/> + <atom:link rel="bookmark" href="%(expected_server_bookmark)s"/> + </server> + <metadata> + <meta key="key1"> + value1 + </meta> + </metadata> + <atom:link href="%(expected_href)s" rel="self"/> + <atom:link href="%(expected_bookmark)s" rel="bookmark"/> + </image> + """.replace(" ", "") % (locals())) + + self.assertEqual(expected.toxml(), actual.toxml()) diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py index 38c959fae..8a3fe681a 100644 --- a/nova/tests/api/openstack/test_limits.py +++ b/nova/tests/api/openstack/test_limits.py @@ -24,11 +24,12 @@ import stubout import time import unittest import webob - -from xml.dom.minidom import parseString +from xml.dom import minidom import nova.context from nova.api.openstack import limits +from nova.api.openstack import views +from nova import test TEST_LIMITS = [ @@ -166,7 +167,7 @@ class LimitsControllerV10Test(BaseLimitTestSuite): request = self._get_index_request("application/xml") response = request.get_response(self.controller) - expected = parseString(""" + expected = minidom.parseString(""" <limits xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"> <rate/> @@ -174,7 +175,7 @@ class LimitsControllerV10Test(BaseLimitTestSuite): </limits> """.replace(" ", "")) - body = parseString(response.body.replace(" ", "")) + body = minidom.parseString(response.body.replace(" ", "")) self.assertEqual(expected.toxml(), body.toxml()) @@ -184,7 +185,7 @@ class LimitsControllerV10Test(BaseLimitTestSuite): request = self._populate_limits(request) response = request.get_response(self.controller) - expected = parseString(""" + expected = minidom.parseString(""" <limits xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"> <rate> @@ -196,7 +197,7 @@ class LimitsControllerV10Test(BaseLimitTestSuite): <absolute/> </limits> """.replace(" ", "")) - body = parseString(response.body.replace(" ", "")) + body = minidom.parseString(response.body.replace(" ", "")) self.assertEqual(expected.toxml(), body.toxml()) @@ -210,6 +211,7 @@ class LimitsControllerV11Test(BaseLimitTestSuite): """Run before each test.""" BaseLimitTestSuite.setUp(self) self.controller = limits.create_resource('1.1') + self.maxDiff = None def _get_index_request(self, accept_header="application/json"): """Helper to set routing arguments.""" @@ -266,14 +268,14 @@ class LimitsControllerV11Test(BaseLimitTestSuite): "limit": [ { "verb": "GET", - "next-available": 0, + "next-available": "1970-01-01T00:00:00Z", "unit": "MINUTE", "value": 10, "remaining": 10, }, { "verb": "POST", - "next-available": 0, + "next-available": "1970-01-01T00:00:00Z", "unit": "HOUR", "value": 5, "remaining": 5, @@ -286,7 +288,7 @@ class LimitsControllerV11Test(BaseLimitTestSuite): "limit": [ { "verb": "GET", - "next-available": 0, + "next-available": "1970-01-01T00:00:00Z", "unit": "MINUTE", "value": 5, "remaining": 5, @@ -328,7 +330,7 @@ class LimitsControllerV11Test(BaseLimitTestSuite): "limit": [ { "verb": "GET", - "next-available": 0, + "next-available": "1970-01-01T00:00:00Z", "unit": "MINUTE", "value": 10, "remaining": 10, @@ -341,7 +343,7 @@ class LimitsControllerV11Test(BaseLimitTestSuite): "limit": [ { "verb": "GET", - "next-available": 0, + "next-available": "1970-01-01T00:00:00Z", "unit": "MINUTE", "value": 10, "remaining": 10, @@ -400,6 +402,10 @@ class LimitsControllerV11Test(BaseLimitTestSuite): self._test_index_absolute_limits_json(expected) +class TestLimiter(limits.Limiter): + pass + + class LimitMiddlewareTest(BaseLimitTestSuite): """ Tests for the `limits.RateLimitingMiddleware` class. @@ -413,10 +419,14 @@ class LimitMiddlewareTest(BaseLimitTestSuite): def setUp(self): """Prepare middleware for use through fake WSGI app.""" BaseLimitTestSuite.setUp(self) - _limits = [ - limits.Limit("GET", "*", ".*", 1, 60), - ] - self.app = limits.RateLimitingMiddleware(self._empty_app, _limits) + _limits = '(GET, *, .*, 1, MINUTE)' + self.app = limits.RateLimitingMiddleware(self._empty_app, _limits, + "%s.TestLimiter" % + self.__class__.__module__) + + def test_limit_class(self): + """Test that middleware selected correct limiter class.""" + assert isinstance(self.app._limiter, TestLimiter) def test_good_request(self): """Test successful GET request through middleware.""" @@ -450,7 +460,7 @@ class LimitMiddlewareTest(BaseLimitTestSuite): response = request.get_response(self.app) self.assertEqual(response.status_int, 403) - root = parseString(response.body).childNodes[0] + root = minidom.parseString(response.body).childNodes[0] expected = "Only 1 GET request(s) can be made to * every minute." details = root.getElementsByTagName("details") @@ -492,6 +502,72 @@ class LimitTest(BaseLimitTestSuite): self.assertEqual(4, limit.last_request) +class ParseLimitsTest(BaseLimitTestSuite): + """ + Tests for the default limits parser in the in-memory + `limits.Limiter` class. + """ + + def test_invalid(self): + """Test that parse_limits() handles invalid input correctly.""" + self.assertRaises(ValueError, limits.Limiter.parse_limits, + ';;;;;') + + def test_bad_rule(self): + """Test that parse_limits() handles bad rules correctly.""" + self.assertRaises(ValueError, limits.Limiter.parse_limits, + 'GET, *, .*, 20, minute') + + def test_missing_arg(self): + """Test that parse_limits() handles missing args correctly.""" + self.assertRaises(ValueError, limits.Limiter.parse_limits, + '(GET, *, .*, 20)') + + def test_bad_value(self): + """Test that parse_limits() handles bad values correctly.""" + self.assertRaises(ValueError, limits.Limiter.parse_limits, + '(GET, *, .*, foo, minute)') + + def test_bad_unit(self): + """Test that parse_limits() handles bad units correctly.""" + self.assertRaises(ValueError, limits.Limiter.parse_limits, + '(GET, *, .*, 20, lightyears)') + + def test_multiple_rules(self): + """Test that parse_limits() handles multiple rules correctly.""" + try: + l = limits.Limiter.parse_limits('(get, *, .*, 20, minute);' + '(PUT, /foo*, /foo.*, 10, hour);' + '(POST, /bar*, /bar.*, 5, second);' + '(Say, /derp*, /derp.*, 1, day)') + except ValueError, e: + assert False, str(e) + + # Make sure the number of returned limits are correct + self.assertEqual(len(l), 4) + + # Check all the verbs... + expected = ['GET', 'PUT', 'POST', 'SAY'] + self.assertEqual([t.verb for t in l], expected) + + # ...the URIs... + expected = ['*', '/foo*', '/bar*', '/derp*'] + self.assertEqual([t.uri for t in l], expected) + + # ...the regexes... + expected = ['.*', '/foo.*', '/bar.*', '/derp.*'] + self.assertEqual([t.regex for t in l], expected) + + # ...the values... + expected = [20, 10, 5, 1] + self.assertEqual([t.value for t in l], expected) + + # ...and the units... + expected = [limits.PER_MINUTE, limits.PER_HOUR, + limits.PER_SECOND, limits.PER_DAY] + self.assertEqual([t.unit for t in l], expected) + + class LimiterTest(BaseLimitTestSuite): """ Tests for the in-memory `limits.Limiter` class. @@ -500,7 +576,8 @@ class LimiterTest(BaseLimitTestSuite): def setUp(self): """Run before each test.""" BaseLimitTestSuite.setUp(self) - self.limiter = limits.Limiter(TEST_LIMITS) + userlimits = {'user:user3': ''} + self.limiter = limits.Limiter(TEST_LIMITS, **userlimits) def _check(self, num, verb, url, username=None): """Check and yield results from checks.""" @@ -605,6 +682,12 @@ class LimiterTest(BaseLimitTestSuite): results = list(self._check(10, "PUT", "/anything")) self.assertEqual(expected, results) + def test_user_limit(self): + """ + Test user-specific limits. + """ + self.assertEqual(self.limiter.levels['user3'], []) + def test_multiple_users(self): """ Tests involving multiple users. @@ -619,6 +702,11 @@ class LimiterTest(BaseLimitTestSuite): results = list(self._check(15, "PUT", "/anything", "user2")) self.assertEqual(expected, results) + # User3 + expected = [None] * 20 + results = list(self._check(20, "PUT", "/anything", "user3")) + self.assertEqual(expected, results) + self.time += 1.0 # User1 again @@ -818,3 +906,195 @@ class WsgiLimiterProxyTest(BaseLimitTestSuite): "made to /delayed every minute.") self.assertEqual((delay, error), expected) + + +class LimitsViewBuilderV11Test(test.TestCase): + + def setUp(self): + self.view_builder = views.limits.ViewBuilderV11() + self.rate_limits = [ + { + "URI": "*", + "regex": ".*", + "value": 10, + "verb": "POST", + "remaining": 2, + "unit": "MINUTE", + "resetTime": 1311272226 + }, + { + "URI": "*/servers", + "regex": "^/servers", + "value": 50, + "verb": "POST", + "remaining": 10, + "unit": "DAY", + "resetTime": 1311272226 + }, + ] + self.absolute_limits = { + "metadata_items": 1, + "injected_files": 5, + "injected_file_content_bytes": 5, + } + + def tearDown(self): + pass + + def test_build_limits(self): + expected_limits = { + "limits": { + "rate": [ + { + "uri": "*", + "regex": ".*", + "limit": [ + { + "value": 10, + "verb": "POST", + "remaining": 2, + "unit": "MINUTE", + "next-available": "2011-07-21T18:17:06Z" + }, + ] + }, + { + "uri": "*/servers", + "regex": "^/servers", + "limit": [ + { + "value": 50, + "verb": "POST", + "remaining": 10, + "unit": "DAY", + "next-available": "2011-07-21T18:17:06Z" + }, + ] + }, + ], + "absolute": { + "maxServerMeta": 1, + "maxImageMeta": 1, + "maxPersonality": 5, + "maxPersonalitySize": 5 + } + } + } + + output = self.view_builder.build(self.rate_limits, + self.absolute_limits) + self.assertDictMatch(output, expected_limits) + + def test_build_limits_empty_limits(self): + expected_limits = { + "limits": { + "rate": [], + "absolute": {} + } + } + + abs_limits = {} + rate_limits = [] + output = self.view_builder.build(rate_limits, abs_limits) + self.assertDictMatch(output, expected_limits) + + +class LimitsXMLSerializationTest(test.TestCase): + + def setUp(self): + self.maxDiff = None + + def tearDown(self): + pass + + def test_index(self): + serializer = limits.LimitsXMLSerializer() + + fixture = { + "limits": { + "rate": [ + { + "uri": "*", + "regex": ".*", + "limit": [ + { + "value": 10, + "verb": "POST", + "remaining": 2, + "unit": "MINUTE", + "next-available": "2011-12-15T22:42:45Z" + }, + ] + }, + { + "uri": "*/servers", + "regex": "^/servers", + "limit": [ + { + "value": 50, + "verb": "POST", + "remaining": 10, + "unit": "DAY", + "next-available": "2011-12-15T22:42:45Z" + }, + ] + }, + ], + "absolute": { + "maxServerMeta": 1, + "maxImageMeta": 1, + "maxPersonality": 5, + "maxPersonalitySize": 10240 + } + } + } + + output = serializer.serialize(fixture, 'index') + actual = minidom.parseString(output.replace(" ", "")) + + expected = minidom.parseString(""" + <limits xmlns="http://docs.openstack.org/compute/api/v1.1"> + <rates> + <rate uri="*" regex=".*"> + <limit value="10" verb="POST" remaining="2" + unit="MINUTE" + next-available="2011-12-15T22:42:45Z"/> + </rate> + <rate uri="*/servers" regex="^/servers"> + <limit value="50" verb="POST" remaining="10" + unit="DAY" + next-available="2011-12-15T22:42:45Z"/> + </rate> + </rates> + <absolute> + <limit name="maxServerMeta" value="1"/> + <limit name="maxPersonality" value="5"/> + <limit name="maxImageMeta" value="1"/> + <limit name="maxPersonalitySize" value="10240"/> + </absolute> + </limits> + """.replace(" ", "")) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_index_no_limits(self): + serializer = limits.LimitsXMLSerializer() + + fixture = { + "limits": { + "rate": [], + "absolute": {} + } + } + + output = serializer.serialize(fixture, 'index') + actual = minidom.parseString(output.replace(" ", "")) + + expected = minidom.parseString(""" + <limits xmlns="http://docs.openstack.org/compute/api/v1.1"> + <rates /> + <absolute /> + </limits> + """.replace(" ", "")) + + self.assertEqual(expected.toxml(), actual.toxml()) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index b53c6c9be..56bc2cf10 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -30,8 +30,9 @@ from nova import flags from nova import test from nova import utils import nova.api.openstack -from nova.api.openstack import servers from nova.api.openstack import create_instance_helper +from nova.api.openstack import servers +from nova.api.openstack import wsgi import nova.compute.api from nova.compute import instance_types from nova.compute import power_state @@ -65,6 +66,18 @@ def return_server_by_uuid(context, uuid): return stub_instance(id, uuid=uuid) +def return_virtual_interface_by_instance(interfaces): + def _return_virtual_interface_by_instance(context, instance_id): + return interfaces + return _return_virtual_interface_by_instance + + +def return_virtual_interface_instance_nonexistant(interfaces): + def _return_virtual_interface_by_instance(context, instance_id): + raise exception.InstanceNotFound(instance_id=instance_id) + return _return_virtual_interface_by_instance + + def return_server_with_addresses(private, public): def _return_server(context, id): return stub_instance(id, private_address=private, @@ -72,6 +85,12 @@ def return_server_with_addresses(private, public): return _return_server +def return_server_with_interfaces(interfaces): + def _return_server(context, id): + return stub_instance(id, interfaces=interfaces) + return _return_server + + def return_server_with_power_state(power_state): def _return_server(context, id): return stub_instance(id, power_state=power_state) @@ -118,16 +137,19 @@ def instance_update(context, instance_id, kwargs): return stub_instance(instance_id) -def instance_address(context, instance_id): +def instance_addresses(context, instance_id): return None def stub_instance(id, user_id=1, private_address=None, public_addresses=None, host=None, power_state=0, reservation_id="", - uuid=FAKE_UUID): + uuid=FAKE_UUID, interfaces=None): metadata = [] metadata.append(InstanceMetadata(key='seq', value=id)) + if interfaces is None: + interfaces = [] + inst_type = instance_types.get_instance_type_by_flavor_id(1) if public_addresses is None: @@ -171,9 +193,10 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None, "display_description": "", "locked": False, "metadata": metadata, - "uuid": uuid} + "uuid": uuid, + "virtual_interfaces": interfaces} - instance["fixed_ip"] = { + instance["fixed_ips"] = { "address": private_address, "floating_ips": [{"address":ip} for ip in public_addresses]} @@ -220,10 +243,10 @@ class ServersTest(test.TestCase): self.stubs.Set(nova.db.api, 'instance_add_security_group', return_security_group) self.stubs.Set(nova.db.api, 'instance_update', instance_update) - self.stubs.Set(nova.db.api, 'instance_get_fixed_address', - instance_address) + self.stubs.Set(nova.db.api, 'instance_get_fixed_addresses', + instance_addresses) self.stubs.Set(nova.db.api, 'instance_get_floating_address', - instance_address) + instance_addresses) self.stubs.Set(nova.compute.API, 'pause', fake_compute_api) self.stubs.Set(nova.compute.API, 'unpause', fake_compute_api) self.stubs.Set(nova.compute.API, 'suspend', fake_compute_api) @@ -290,13 +313,7 @@ class ServersTest(test.TestCase): }, { "rel": "bookmark", - "type": "application/json", - "href": "http://localhost/v1.1/servers/1", - }, - { - "rel": "bookmark", - "type": "application/xml", - "href": "http://localhost/v1.1/servers/1", + "href": "http://localhost/servers/1", }, ] @@ -417,22 +434,203 @@ class ServersTest(test.TestCase): self.assertEquals(ip.getAttribute('addr'), private) def test_get_server_by_id_with_addresses_v1_1(self): - private = "192.168.0.3" - public = ["1.2.3.4"] - new_return_server = return_server_with_addresses(private, public) + FLAGS.use_ipv6 = True + interfaces = [ + { + 'network': {'label': 'network_1'}, + 'fixed_ips': [ + {'address': '192.168.0.3'}, + {'address': '192.168.0.4'}, + ], + }, + { + 'network': {'label': 'network_2'}, + 'fixed_ips': [ + {'address': '172.19.0.1'}, + {'address': '172.19.0.2'}, + ], + 'fixed_ipv6': '2001:4860::12', + }, + ] + new_return_server = return_server_with_interfaces(interfaces) self.stubs.Set(nova.db.api, 'instance_get', new_return_server) + req = webob.Request.blank('/v1.1/servers/1') res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) self.assertEqual(res_dict['server']['id'], 1) self.assertEqual(res_dict['server']['name'], 'server1') addresses = res_dict['server']['addresses'] - self.assertEqual(len(addresses["public"]), len(public)) - self.assertEqual(addresses["public"][0], - {"version": 4, "addr": public[0]}) - self.assertEqual(len(addresses["private"]), 1) - self.assertEqual(addresses["private"][0], - {"version": 4, "addr": private}) + expected = { + 'network_1': [ + {'addr': '192.168.0.3', 'version': 4}, + {'addr': '192.168.0.4', 'version': 4}, + ], + 'network_2': [ + {'addr': '172.19.0.1', 'version': 4}, + {'addr': '172.19.0.2', 'version': 4}, + {'addr': '2001:4860::12', 'version': 6}, + ], + } + + self.assertEqual(addresses, expected) + + def test_get_server_by_id_with_addresses_v1_1_ipv6_disabled(self): + FLAGS.use_ipv6 = False + interfaces = [ + { + 'network': {'label': 'network_1'}, + 'fixed_ips': [ + {'address': '192.168.0.3'}, + {'address': '192.168.0.4'}, + ], + }, + { + 'network': {'label': 'network_2'}, + 'fixed_ips': [ + {'address': '172.19.0.1'}, + {'address': '172.19.0.2'}, + ], + 'fixed_ipv6': '2001:4860::12', + }, + ] + new_return_server = return_server_with_interfaces(interfaces) + self.stubs.Set(nova.db.api, 'instance_get', new_return_server) + + req = webob.Request.blank('/v1.1/servers/1') + res = req.get_response(fakes.wsgi_app()) + + res_dict = json.loads(res.body) + self.assertEqual(res_dict['server']['id'], 1) + self.assertEqual(res_dict['server']['name'], 'server1') + addresses = res_dict['server']['addresses'] + expected = { + 'network_1': [ + {'addr': '192.168.0.3', 'version': 4}, + {'addr': '192.168.0.4', 'version': 4}, + ], + 'network_2': [ + {'addr': '172.19.0.1', 'version': 4}, + {'addr': '172.19.0.2', 'version': 4}, + ], + } + + self.assertEqual(addresses, expected) + + def test_get_server_addresses_v1_1(self): + FLAGS.use_ipv6 = True + interfaces = [ + { + 'network': {'label': 'network_1'}, + 'fixed_ips': [ + {'address': '192.168.0.3'}, + {'address': '192.168.0.4'}, + ], + }, + { + 'network': {'label': 'network_2'}, + 'fixed_ips': [ + { + 'address': '172.19.0.1', + 'floating_ips': [ + {'address': '1.2.3.4'}, + ], + }, + {'address': '172.19.0.2'}, + ], + 'fixed_ipv6': '2001:4860::12', + }, + ] + + _return_vifs = return_virtual_interface_by_instance(interfaces) + self.stubs.Set(nova.db.api, + 'virtual_interface_get_by_instance', + _return_vifs) + + req = webob.Request.blank('/v1.1/servers/1/ips') + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + expected = { + 'addresses': { + 'network_1': [ + {'version': 4, 'addr': '192.168.0.3'}, + {'version': 4, 'addr': '192.168.0.4'}, + ], + 'network_2': [ + {'version': 4, 'addr': '172.19.0.1'}, + {'version': 4, 'addr': '1.2.3.4'}, + {'version': 4, 'addr': '172.19.0.2'}, + {'version': 6, 'addr': '2001:4860::12'}, + ], + }, + } + + self.assertEqual(res_dict, expected) + + def test_get_server_addresses_single_network_v1_1(self): + FLAGS.use_ipv6 = True + interfaces = [ + { + 'network': {'label': 'network_1'}, + 'fixed_ips': [ + {'address': '192.168.0.3'}, + {'address': '192.168.0.4'}, + ], + }, + { + 'network': {'label': 'network_2'}, + 'fixed_ips': [ + { + 'address': '172.19.0.1', + 'floating_ips': [ + {'address': '1.2.3.4'}, + ], + }, + {'address': '172.19.0.2'}, + ], + 'fixed_ipv6': '2001:4860::12', + }, + ] + _return_vifs = return_virtual_interface_by_instance(interfaces) + self.stubs.Set(nova.db.api, + 'virtual_interface_get_by_instance', + _return_vifs) + + req = webob.Request.blank('/v1.1/servers/1/ips/network_2') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) + expected = { + 'network_2': [ + {'version': 4, 'addr': '172.19.0.1'}, + {'version': 4, 'addr': '1.2.3.4'}, + {'version': 4, 'addr': '172.19.0.2'}, + {'version': 6, 'addr': '2001:4860::12'}, + ], + } + self.assertEqual(res_dict, expected) + + def test_get_server_addresses_nonexistant_network_v1_1(self): + _return_vifs = return_virtual_interface_by_instance([]) + self.stubs.Set(nova.db.api, + 'virtual_interface_get_by_instance', + _return_vifs) + + req = webob.Request.blank('/v1.1/servers/1/ips/network_0') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 404) + + def test_get_server_addresses_nonexistant_server_v1_1(self): + _return_vifs = return_virtual_interface_instance_nonexistant([]) + self.stubs.Set(nova.db.api, + 'virtual_interface_get_by_instance', + _return_vifs) + + req = webob.Request.blank('/v1.1/servers/600/ips') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 404) def test_get_server_list(self): req = webob.Request.blank('/v1.0/servers') @@ -514,13 +712,7 @@ class ServersTest(test.TestCase): }, { "rel": "bookmark", - "type": "application/json", - "href": "http://localhost/v1.1/servers/%d" % (i,), - }, - { - "rel": "bookmark", - "type": "application/xml", - "href": "http://localhost/v1.1/servers/%d" % (i,), + "href": "http://localhost/servers/%d" % (i,), }, ] @@ -596,7 +788,7 @@ class ServersTest(test.TestCase): def fake_method(*args, **kwargs): pass - def project_get_network(context, user_id): + def project_get_networks(context, user_id): return dict(id='1', host='localhost') def queue_get_for(context, *args): @@ -608,7 +800,8 @@ class ServersTest(test.TestCase): def image_id_from_hash(*args, **kwargs): return 2 - self.stubs.Set(nova.db.api, 'project_get_network', project_get_network) + self.stubs.Set(nova.db.api, 'project_get_networks', + project_get_networks) self.stubs.Set(nova.db.api, 'instance_create', instance_create) self.stubs.Set(nova.rpc, 'cast', fake_method) self.stubs.Set(nova.rpc, 'call', fake_method) @@ -749,6 +942,18 @@ class ServersTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 400) + def test_create_instance_no_server_entity(self): + self._setup_for_create_instance() + + body = {} + + req = webob.Request.blank('/v1.0/servers') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 422) + def test_create_instance_whitespace_name(self): self._setup_for_create_instance() @@ -797,13 +1002,45 @@ class ServersTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) server = json.loads(res.body)['server'] self.assertEqual(16, len(server['adminPass'])) self.assertEqual('server_test', server['name']) self.assertEqual(1, server['id']) self.assertEqual(flavor_ref, server['flavorRef']) self.assertEqual(image_href, server['imageRef']) - self.assertEqual(res.status_int, 200) + + def test_create_instance_v1_1_invalid_flavor_href(self): + self._setup_for_create_instance() + + image_href = 'http://localhost/v1.1/images/2' + flavor_ref = 'http://localhost/v1.1/flavors/asdf' + body = dict(server=dict( + name='server_test', imageRef=image_href, flavorRef=flavor_ref, + metadata={'hello': 'world', 'open': 'stack'}, + personality={})) + req = webob.Request.blank('/v1.1/servers') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_create_instance_v1_1_bad_flavor_href(self): + self._setup_for_create_instance() + + image_href = 'http://localhost/v1.1/images/2' + flavor_ref = 'http://localhost/v1.1/flavors/17' + body = dict(server=dict( + name='server_test', imageRef=image_href, flavorRef=flavor_ref, + metadata={'hello': 'world', 'open': 'stack'}, + personality={})) + req = webob.Request.blank('/v1.1/servers') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) def test_create_instance_v1_1_bad_href(self): self._setup_for_create_instance() @@ -911,11 +1148,11 @@ class ServersTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 400) - def test_update_no_body(self): + def test_update_server_no_body(self): req = webob.Request.blank('/v1.0/servers/1') req.method = 'PUT' res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 422) + self.assertEqual(res.status_int, 400) def test_update_nonstring_name(self): """ Confirm that update is filtering params """ @@ -977,6 +1214,21 @@ class ServersTest(test.TestCase): self.assertEqual(mock_method.instance_id, '1') self.assertEqual(mock_method.password, 'bacon') + def test_update_server_no_body_v1_1(self): + req = webob.Request.blank('/v1.0/servers/1') + req.method = 'PUT' + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_update_server_name_v1_1(self): + req = webob.Request.blank('/v1.1/servers/1') + req.method = 'PUT' + req.content_type = 'application/json' + req.body = json.dumps({'server': {'name': 'new-name'}}) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 204) + self.assertEqual(res.body, '') + def test_update_server_adminPass_ignored_v1_1(self): inst_dict = dict(name='server_test', adminPass='bacon') self.body = json.dumps(dict(server=inst_dict)) @@ -995,6 +1247,7 @@ class ServersTest(test.TestCase): req.body = self.body res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 204) + self.assertEqual(res.body, '') def test_create_backup_schedules(self): req = webob.Request.blank('/v1.0/servers/1/backup_schedule') @@ -1443,6 +1696,57 @@ class ServersTest(test.TestCase): self.assertEqual(res.status, '202 Accepted') self.assertEqual(self.server_delete_called, True) + def test_rescue_accepted(self): + FLAGS.allow_admin_api = True + body = {} + + self.called = False + + def rescue_mock(*args, **kwargs): + self.called = True + + self.stubs.Set(nova.compute.api.API, 'rescue', rescue_mock) + req = webob.Request.blank('/v1.0/servers/1/rescue') + req.method = 'POST' + req.content_type = 'application/json' + + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(self.called, True) + self.assertEqual(res.status_int, 202) + + def test_rescue_raises_handled(self): + FLAGS.allow_admin_api = True + body = {} + + def rescue_mock(*args, **kwargs): + raise Exception('Who cares?') + + self.stubs.Set(nova.compute.api.API, 'rescue', rescue_mock) + req = webob.Request.blank('/v1.0/servers/1/rescue') + req.method = 'POST' + req.content_type = 'application/json' + + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 422) + + def test_delete_server_instance_v1_1(self): + req = webob.Request.blank('/v1.1/servers/1') + req.method = 'DELETE' + + self.server_delete_called = False + + def instance_destroy_mock(context, id): + self.server_delete_called = True + + self.stubs.Set(nova.db.api, 'instance_destroy', + instance_destroy_mock) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 204) + self.assertEqual(self.server_delete_called, True) + def test_resize_server(self): req = self.webreq('/1/action', 'POST', dict(resize=dict(flavorId=3))) @@ -1499,7 +1803,7 @@ class ServersTest(test.TestCase): self.stubs.Set(nova.compute.api.API, 'resize', resize_mock) res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 400) + self.assertEqual(res.status_int, 500) def test_resized_server_has_correct_status(self): req = self.webreq('/1', 'GET') @@ -1567,6 +1871,23 @@ class ServersTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 400) + def test_migrate_server(self): + """This is basically the same as resize, only we provide the `migrate` + attribute in the body's dict. + """ + req = self.webreq('/1/action', 'POST', dict(migrate=None)) + + self.resize_called = False + + def resize_mock(*args): + self.resize_called = True + + self.stubs.Set(nova.compute.api.API, 'resize', resize_mock) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + self.assertEqual(self.resize_called, True) + def test_shutdown_status(self): new_server = return_server_with_power_state(power_state.SHUTDOWN) self.stubs.Set(nova.db.api, 'instance_get', new_server) @@ -1601,7 +1922,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): "imageId": "1", "flavorId": "1", }} - self.assertEquals(request, expected) + self.assertEquals(request['body'], expected) def test_request_with_empty_metadata(self): serial_request = """ @@ -1616,7 +1937,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): "flavorId": "1", "metadata": {}, }} - self.assertEquals(request, expected) + self.assertEquals(request['body'], expected) def test_request_with_empty_personality(self): serial_request = """ @@ -1631,7 +1952,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): "flavorId": "1", "personality": [], }} - self.assertEquals(request, expected) + self.assertEquals(request['body'], expected) def test_request_with_empty_metadata_and_personality(self): serial_request = """ @@ -1648,7 +1969,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): "metadata": {}, "personality": [], }} - self.assertEquals(request, expected) + self.assertEquals(request['body'], expected) def test_request_with_empty_metadata_and_personality_reversed(self): serial_request = """ @@ -1665,7 +1986,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): "metadata": {}, "personality": [], }} - self.assertEquals(request, expected) + self.assertEquals(request['body'], expected) def test_request_with_one_personality(self): serial_request = """ @@ -1677,7 +1998,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): </server>""" request = self.deserializer.deserialize(serial_request, 'create') expected = [{"path": "/etc/conf", "contents": "aabbccdd"}] - self.assertEquals(request["server"]["personality"], expected) + self.assertEquals(request['body']["server"]["personality"], expected) def test_request_with_two_personalities(self): serial_request = """ @@ -1688,7 +2009,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): request = self.deserializer.deserialize(serial_request, 'create') expected = [{"path": "/etc/conf", "contents": "aabbccdd"}, {"path": "/etc/sudoers", "contents": "abcd"}] - self.assertEquals(request["server"]["personality"], expected) + self.assertEquals(request['body']["server"]["personality"], expected) def test_request_second_personality_node_ignored(self): serial_request = """ @@ -1703,7 +2024,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): </server>""" request = self.deserializer.deserialize(serial_request, 'create') expected = [{"path": "/etc/conf", "contents": "aabbccdd"}] - self.assertEquals(request["server"]["personality"], expected) + self.assertEquals(request['body']["server"]["personality"], expected) def test_request_with_one_personality_missing_path(self): serial_request = """ @@ -1712,7 +2033,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): <personality><file>aabbccdd</file></personality></server>""" request = self.deserializer.deserialize(serial_request, 'create') expected = [{"contents": "aabbccdd"}] - self.assertEquals(request["server"]["personality"], expected) + self.assertEquals(request['body']["server"]["personality"], expected) def test_request_with_one_personality_empty_contents(self): serial_request = """ @@ -1721,7 +2042,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): <personality><file path="/etc/conf"></file></personality></server>""" request = self.deserializer.deserialize(serial_request, 'create') expected = [{"path": "/etc/conf", "contents": ""}] - self.assertEquals(request["server"]["personality"], expected) + self.assertEquals(request['body']["server"]["personality"], expected) def test_request_with_one_personality_empty_contents_variation(self): serial_request = """ @@ -1730,7 +2051,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): <personality><file path="/etc/conf"/></personality></server>""" request = self.deserializer.deserialize(serial_request, 'create') expected = [{"path": "/etc/conf", "contents": ""}] - self.assertEquals(request["server"]["personality"], expected) + self.assertEquals(request['body']["server"]["personality"], expected) def test_request_with_one_metadata(self): serial_request = """ @@ -1742,7 +2063,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): </server>""" request = self.deserializer.deserialize(serial_request, 'create') expected = {"alpha": "beta"} - self.assertEquals(request["server"]["metadata"], expected) + self.assertEquals(request['body']["server"]["metadata"], expected) def test_request_with_two_metadata(self): serial_request = """ @@ -1755,7 +2076,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): </server>""" request = self.deserializer.deserialize(serial_request, 'create') expected = {"alpha": "beta", "foo": "bar"} - self.assertEquals(request["server"]["metadata"], expected) + self.assertEquals(request['body']["server"]["metadata"], expected) def test_request_with_metadata_missing_value(self): serial_request = """ @@ -1767,7 +2088,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): </server>""" request = self.deserializer.deserialize(serial_request, 'create') expected = {"alpha": ""} - self.assertEquals(request["server"]["metadata"], expected) + self.assertEquals(request['body']["server"]["metadata"], expected) def test_request_with_two_metadata_missing_value(self): serial_request = """ @@ -1780,7 +2101,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): </server>""" request = self.deserializer.deserialize(serial_request, 'create') expected = {"alpha": "", "delta": ""} - self.assertEquals(request["server"]["metadata"], expected) + self.assertEquals(request['body']["server"]["metadata"], expected) def test_request_with_metadata_missing_key(self): serial_request = """ @@ -1792,7 +2113,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): </server>""" request = self.deserializer.deserialize(serial_request, 'create') expected = {"": "beta"} - self.assertEquals(request["server"]["metadata"], expected) + self.assertEquals(request['body']["server"]["metadata"], expected) def test_request_with_two_metadata_missing_key(self): serial_request = """ @@ -1805,7 +2126,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): </server>""" request = self.deserializer.deserialize(serial_request, 'create') expected = {"": "gamma"} - self.assertEquals(request["server"]["metadata"], expected) + self.assertEquals(request['body']["server"]["metadata"], expected) def test_request_with_metadata_duplicate_key(self): serial_request = """ @@ -1818,7 +2139,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): </server>""" request = self.deserializer.deserialize(serial_request, 'create') expected = {"foo": "baz"} - self.assertEquals(request["server"]["metadata"], expected) + self.assertEquals(request['body']["server"]["metadata"], expected) def test_canonical_request_from_docs(self): serial_request = """ @@ -1864,7 +2185,7 @@ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==""", ], }} request = self.deserializer.deserialize(serial_request, 'create') - self.assertEqual(request, expected) + self.assertEqual(request['body'], expected) def test_request_xmlser_with_flavor_image_href(self): serial_request = """ @@ -1874,12 +2195,68 @@ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==""", flavorRef="http://localhost:8774/v1.1/flavors/1"> </server>""" request = self.deserializer.deserialize(serial_request, 'create') - self.assertEquals(request["server"]["flavorRef"], + self.assertEquals(request['body']["server"]["flavorRef"], "http://localhost:8774/v1.1/flavors/1") - self.assertEquals(request["server"]["imageRef"], + self.assertEquals(request['body']["server"]["imageRef"], "http://localhost:8774/v1.1/images/1") +class TextAddressesXMLSerialization(test.TestCase): + + serializer = nova.api.openstack.ips.IPXMLSerializer() + + def test_show(self): + fixture = { + 'network_2': [ + {'addr': '192.168.0.1', 'version': 4}, + {'addr': 'fe80::beef', 'version': 6}, + ], + } + output = self.serializer.serialize(fixture, 'show') + actual = minidom.parseString(output.replace(" ", "")) + + expected = minidom.parseString(""" + <network xmlns="http://docs.openstack.org/compute/api/v1.1" + id="network_2"> + <ip version="4" addr="192.168.0.1"/> + <ip version="6" addr="fe80::beef"/> + </network> + """.replace(" ", "")) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_index(self): + fixture = { + 'addresses': { + 'network_1': [ + {'addr': '192.168.0.3', 'version': 4}, + {'addr': '192.168.0.5', 'version': 4}, + ], + 'network_2': [ + {'addr': '192.168.0.1', 'version': 4}, + {'addr': 'fe80::beef', 'version': 6}, + ], + }, + } + output = self.serializer.serialize(fixture, 'index') + actual = minidom.parseString(output.replace(" ", "")) + + expected = minidom.parseString(""" + <addresses xmlns="http://docs.openstack.org/compute/api/v1.1"> + <network id="network_2"> + <ip version="4" addr="192.168.0.1"/> + <ip version="6" addr="fe80::beef"/> + </network> + <network id="network_1"> + <ip version="4" addr="192.168.0.3"/> + <ip version="4" addr="192.168.0.5"/> + </network> + </addresses> + """.replace(" ", "")) + + self.assertEqual(expected.toxml(), actual.toxml()) + + class TestServerInstanceCreation(test.TestCase): def setUp(self): @@ -1941,7 +2318,7 @@ class TestServerInstanceCreation(test.TestCase): def _get_create_request_json(self, body_dict): req = webob.Request.blank('/v1.0/servers') - req.content_type = 'application/json' + req.headers['Content-Type'] = 'application/json' req.method = 'POST' req.body = json.dumps(body_dict) return req diff --git a/nova/tests/api/openstack/test_versions.py b/nova/tests/api/openstack/test_versions.py index fd8d50904..da964ee1f 100644 --- a/nova/tests/api/openstack/test_versions.py +++ b/nova/tests/api/openstack/test_versions.py @@ -21,6 +21,7 @@ import webob from nova import context from nova import test from nova.tests.api.openstack import fakes +from nova.api.openstack import versions from nova.api.openstack import views @@ -43,19 +44,21 @@ class VersionsTest(test.TestCase): { "id": "v1.1", "status": "CURRENT", + "updated": "2011-07-18T11:30:00Z", "links": [ { "rel": "self", - "href": "http://localhost/v1.1", + "href": "http://localhost/v1.1/", }], }, { "id": "v1.0", "status": "DEPRECATED", + "updated": "2010-10-09T11:30:00Z", "links": [ { "rel": "self", - "href": "http://localhost/v1.0", + "href": "http://localhost/v1.0/", }], }, ] @@ -69,15 +72,12 @@ class VersionsTest(test.TestCase): self.assertEqual(res.content_type, "application/xml") expected = """<versions> - <version id="v1.1" status="CURRENT"> - <links> - <link href="http://localhost/v1.1" rel="self"/> - </links> + <version id="v1.1" status="CURRENT" updated="2011-07-18T11:30:00Z"> + <atom:link href="http://localhost/v1.1/" rel="self"/> </version> - <version id="v1.0" status="DEPRECATED"> - <links> - <link href="http://localhost/v1.0" rel="self"/> - </links> + <version id="v1.0" status="DEPRECATED" + updated="2010-10-09T11:30:00Z"> + <atom:link href="http://localhost/v1.0/" rel="self"/> </version> </versions>""".replace(" ", "").replace("\n", "") @@ -85,21 +85,64 @@ class VersionsTest(test.TestCase): self.assertEqual(expected, actual) + def test_get_version_list_atom(self): + req = webob.Request.blank('/') + req.accept = "application/atom+xml" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + self.assertEqual(res.content_type, "application/atom+xml") + + expected = """ + <feed xmlns="http://www.w3.org/2005/Atom"> + <title type="text">Available API Versions</title> + <updated>2011-07-18T11:30:00Z</updated> + <id>http://localhost/</id> + <author> + <name>Rackspace</name> + <uri>http://www.rackspace.com/</uri> + </author> + <link href="http://localhost/" rel="self"/> + <entry> + <id>http://localhost/v1.1/</id> + <title type="text">Version v1.1</title> + <updated>2011-07-18T11:30:00Z</updated> + <link href="http://localhost/v1.1/" rel="self"/> + <content type="text"> + Version v1.1 CURRENT (2011-07-18T11:30:00Z) + </content> + </entry> + <entry> + <id>http://localhost/v1.0/</id> + <title type="text">Version v1.0</title> + <updated>2010-10-09T11:30:00Z</updated> + <link href="http://localhost/v1.0/" rel="self"/> + <content type="text"> + Version v1.0 DEPRECATED (2010-10-09T11:30:00Z) + </content> + </entry> + </feed> + """.replace(" ", "").replace("\n", "") + + actual = res.body.replace(" ", "").replace("\n", "") + + self.assertEqual(expected, actual) + def test_view_builder(self): base_url = "http://example.org/" version_data = { "id": "3.2.1", "status": "CURRENT", - } + "updated": "2011-07-18T11:30:00Z"} expected = { "id": "3.2.1", "status": "CURRENT", + "updated": "2011-07-18T11:30:00Z", "links": [ { "rel": "self", - "href": "http://example.org/3.2.1", + "href": "http://example.org/3.2.1/", }, ], } @@ -113,9 +156,99 @@ class VersionsTest(test.TestCase): base_url = "http://example.org/app/" version_number = "v1.4.6" - expected = "http://example.org/app/v1.4.6" + expected = "http://example.org/app/v1.4.6/" builder = views.versions.ViewBuilder(base_url) actual = builder.generate_href(version_number) self.assertEqual(actual, expected) + + def test_xml_serializer(self): + versions_data = { + 'versions': [ + { + "id": "2.7.1", + "updated": "2011-07-18T11:30:00Z", + "status": "DEPRECATED", + "links": [ + { + "rel": "self", + "href": "http://test/2.7.1", + }, + ], + }, + ] + } + + expected = """ + <versions> + <version id="2.7.1" status="DEPRECATED" + updated="2011-07-18T11:30:00Z"> + <atom:link href="http://test/2.7.1" rel="self"/> + </version> + </versions>""".replace(" ", "").replace("\n", "") + + serializer = versions.VersionsXMLSerializer() + response = serializer.default(versions_data) + response = response.replace(" ", "").replace("\n", "") + self.assertEqual(expected, response) + + def test_atom_serializer(self): + versions_data = { + 'versions': [ + { + "id": "2.9.8", + "updated": "2011-07-20T11:40:00Z", + "status": "CURRENT", + "links": [ + { + "rel": "self", + "href": "http://test/2.9.8", + }, + ], + }, + ] + } + + expected = """ + <feed xmlns="http://www.w3.org/2005/Atom"> + <title type="text"> + Available API Versions + </title> + <updated> + 2011-07-20T11:40:00Z + </updated> + <id> + http://test/ + </id> + <author> + <name> + Rackspace + </name> + <uri> + http://www.rackspace.com/ + </uri> + </author> + <link href="http://test/" rel="self"/> + <entry> + <id> + http://test/2.9.8 + </id> + <title type="text"> + Version 2.9.8 + </title> + <updated> + 2011-07-20T11:40:00Z + </updated> + <link href="http://test/2.9.8" rel="self"/> + <content type="text"> + Version 2.9.8 CURRENT (2011-07-20T11:40:00Z) + </content> + </entry> + </feed>""".replace(" ", "").replace("\n", "") + + serializer = versions.VersionsAtomSerializer() + response = serializer.default(versions_data) + print response + response = response.replace(" ", "").replace("\n", "") + self.assertEqual(expected, response) diff --git a/nova/tests/api/openstack/test_wsgi.py b/nova/tests/api/openstack/test_wsgi.py index 73a26a087..6dea78d17 100644 --- a/nova/tests/api/openstack/test_wsgi.py +++ b/nova/tests/api/openstack/test_wsgi.py @@ -12,8 +12,7 @@ class RequestTest(test.TestCase): def test_content_type_missing(self): request = wsgi.Request.blank('/tests/123', method='POST') request.body = "<body />" - self.assertRaises(exception.InvalidContentType, - request.get_content_type) + self.assertEqual(None, request.get_content_type()) def test_content_type_unsupported(self): request = wsgi.Request.blank('/tests/123', method='POST') @@ -76,24 +75,48 @@ class RequestTest(test.TestCase): self.assertEqual(result, "application/json") -class DictSerializerTest(test.TestCase): +class ActionDispatcherTest(test.TestCase): def test_dispatch(self): - serializer = wsgi.DictSerializer() + serializer = wsgi.ActionDispatcher() + serializer.create = lambda x: 'pants' + self.assertEqual(serializer.dispatch({}, action='create'), 'pants') + + def test_dispatch_action_None(self): + serializer = wsgi.ActionDispatcher() serializer.create = lambda x: 'pants' serializer.default = lambda x: 'trousers' - self.assertEqual(serializer.serialize({}, 'create'), 'pants') + self.assertEqual(serializer.dispatch({}, action=None), 'trousers') def test_dispatch_default(self): - serializer = wsgi.DictSerializer() + serializer = wsgi.ActionDispatcher() serializer.create = lambda x: 'pants' serializer.default = lambda x: 'trousers' - self.assertEqual(serializer.serialize({}, 'update'), 'trousers') + self.assertEqual(serializer.dispatch({}, action='update'), 'trousers') - def test_dispatch_action_None(self): + +class ResponseHeadersSerializerTest(test.TestCase): + def test_default(self): + serializer = wsgi.ResponseHeadersSerializer() + response = webob.Response() + serializer.serialize(response, {'v': '123'}, 'asdf') + self.assertEqual(response.status_int, 200) + + def test_custom(self): + class Serializer(wsgi.ResponseHeadersSerializer): + def update(self, response, data): + response.status_int = 404 + response.headers['X-Custom-Header'] = data['v'] + serializer = Serializer() + response = webob.Response() + serializer.serialize(response, {'v': '123'}, 'update') + self.assertEqual(response.status_int, 404) + self.assertEqual(response.headers['X-Custom-Header'], '123') + + +class DictSerializerTest(test.TestCase): + def test_dispatch_default(self): serializer = wsgi.DictSerializer() - serializer.create = lambda x: 'pants' - serializer.default = lambda x: 'trousers' - self.assertEqual(serializer.serialize({}, None), 'trousers') + self.assertEqual(serializer.serialize({}, 'update'), '') class XMLDictSerializerTest(test.TestCase): @@ -117,23 +140,9 @@ class JSONDictSerializerTest(test.TestCase): class TextDeserializerTest(test.TestCase): - def test_dispatch(self): - deserializer = wsgi.TextDeserializer() - deserializer.create = lambda x: 'pants' - deserializer.default = lambda x: 'trousers' - self.assertEqual(deserializer.deserialize({}, 'create'), 'pants') - def test_dispatch_default(self): deserializer = wsgi.TextDeserializer() - deserializer.create = lambda x: 'pants' - deserializer.default = lambda x: 'trousers' - self.assertEqual(deserializer.deserialize({}, 'update'), 'trousers') - - def test_dispatch_action_None(self): - deserializer = wsgi.TextDeserializer() - deserializer.create = lambda x: 'pants' - deserializer.default = lambda x: 'trousers' - self.assertEqual(deserializer.deserialize({}, None), 'trousers') + self.assertEqual(deserializer.deserialize({}, 'update'), {}) class JSONDeserializerTest(test.TestCase): @@ -144,12 +153,17 @@ class JSONDeserializerTest(test.TestCase): "bs": ["1", "2", "3", {"c": {"c1": "1"}}], "d": {"e": "1"}, "f": "1"}}""" - as_dict = dict(a={ - 'a1': '1', - 'a2': '2', - 'bs': ['1', '2', '3', {'c': dict(c1='1')}], - 'd': {'e': '1'}, - 'f': '1'}) + as_dict = { + 'body': { + 'a': { + 'a1': '1', + 'a2': '2', + 'bs': ['1', '2', '3', {'c': {'c1': '1'}}], + 'd': {'e': '1'}, + 'f': '1', + }, + }, + } deserializer = wsgi.JSONDeserializer() self.assertEqual(deserializer.deserialize(data), as_dict) @@ -163,23 +177,44 @@ class XMLDeserializerTest(test.TestCase): <f>1</f> </a> """.strip() - as_dict = dict(a={ - 'a1': '1', - 'a2': '2', - 'bs': ['1', '2', '3', {'c': dict(c1='1')}], - 'd': {'e': '1'}, - 'f': '1'}) + as_dict = { + 'body': { + 'a': { + 'a1': '1', + 'a2': '2', + 'bs': ['1', '2', '3', {'c': {'c1': '1'}}], + 'd': {'e': '1'}, + 'f': '1', + }, + }, + } metadata = {'plurals': {'bs': 'b', 'ts': 't'}} deserializer = wsgi.XMLDeserializer(metadata=metadata) self.assertEqual(deserializer.deserialize(xml), as_dict) def test_xml_empty(self): xml = """<a></a>""" - as_dict = {"a": {}} + as_dict = {"body": {"a": {}}} deserializer = wsgi.XMLDeserializer() self.assertEqual(deserializer.deserialize(xml), as_dict) +class RequestHeadersDeserializerTest(test.TestCase): + def test_default(self): + deserializer = wsgi.RequestHeadersDeserializer() + req = wsgi.Request.blank('/') + self.assertEqual(deserializer.deserialize(req, 'asdf'), {}) + + def test_custom(self): + class Deserializer(wsgi.RequestHeadersDeserializer): + def update(self, request): + return {'a': request.headers['X-Custom-Header']} + deserializer = Deserializer() + req = wsgi.Request.blank('/') + req.headers['X-Custom-Header'] = 'b' + self.assertEqual(deserializer.deserialize(req, 'update'), {'a': 'b'}) + + class ResponseSerializerTest(test.TestCase): def setUp(self): class JSONSerializer(object): @@ -190,29 +225,43 @@ class ResponseSerializerTest(test.TestCase): def serialize(self, data, action='default'): return 'pew_xml' - self.serializers = { + class HeadersSerializer(object): + def serialize(self, response, data, action): + response.status_int = 404 + + self.body_serializers = { 'application/json': JSONSerializer(), 'application/XML': XMLSerializer(), } - self.serializer = wsgi.ResponseSerializer(serializers=self.serializers) + self.serializer = wsgi.ResponseSerializer(self.body_serializers, + HeadersSerializer()) def tearDown(self): pass def test_get_serializer(self): - self.assertEqual(self.serializer.get_serializer('application/json'), - self.serializers['application/json']) + ctype = 'application/json' + self.assertEqual(self.serializer.get_body_serializer(ctype), + self.body_serializers[ctype]) def test_get_serializer_unknown_content_type(self): self.assertRaises(exception.InvalidContentType, - self.serializer.get_serializer, + self.serializer.get_body_serializer, 'application/unknown') def test_serialize_response(self): response = self.serializer.serialize({}, 'application/json') self.assertEqual(response.headers['Content-Type'], 'application/json') self.assertEqual(response.body, 'pew_json') + self.assertEqual(response.status_int, 404) + + def test_serialize_response_None(self): + response = self.serializer.serialize(None, 'application/json') + print response + self.assertEqual(response.headers['Content-Type'], 'application/json') + self.assertEqual(response.body, '') + self.assertEqual(response.status_int, 404) def test_serialize_response_dict_to_unknown_content_type(self): self.assertRaises(exception.InvalidContentType, @@ -230,24 +279,23 @@ class RequestDeserializerTest(test.TestCase): def deserialize(self, data, action='default'): return 'pew_xml' - self.deserializers = { + self.body_deserializers = { 'application/json': JSONDeserializer(), 'application/XML': XMLDeserializer(), } - self.deserializer = wsgi.RequestDeserializer( - deserializers=self.deserializers) + self.deserializer = wsgi.RequestDeserializer(self.body_deserializers) def tearDown(self): pass def test_get_deserializer(self): - expected = self.deserializer.get_deserializer('application/json') - self.assertEqual(expected, self.deserializers['application/json']) + expected = self.deserializer.get_body_deserializer('application/json') + self.assertEqual(expected, self.body_deserializers['application/json']) def test_get_deserializer_unknown_content_type(self): self.assertRaises(exception.InvalidContentType, - self.deserializer.get_deserializer, + self.deserializer.get_body_deserializer, 'application/unknown') def test_get_expected_content_type(self): diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py index 098577e4c..6a6e13d93 100644 --- a/nova/tests/api/openstack/test_zones.py +++ b/nova/tests/api/openstack/test_zones.py @@ -34,7 +34,7 @@ FLAGS.verbose = True def zone_get(context, zone_id): return dict(id=1, api_url='http://example.com', username='bob', - password='xxx') + password='xxx', weight_scale=1.0, weight_offset=0.0) def zone_create(context, values): @@ -57,9 +57,9 @@ def zone_delete(context, zone_id): def zone_get_all_scheduler(*args): return [ dict(id=1, api_url='http://example.com', username='bob', - password='xxx'), + password='xxx', weight_scale=1.0, weight_offset=0.0), dict(id=2, api_url='http://example.org', username='alice', - password='qwerty'), + password='qwerty', weight_scale=1.0, weight_offset=0.0), ] @@ -70,9 +70,9 @@ def zone_get_all_scheduler_empty(*args): def zone_get_all_db(context): return [ dict(id=1, api_url='http://example.com', username='bob', - password='xxx'), + password='xxx', weight_scale=1.0, weight_offset=0.0), dict(id=2, api_url='http://example.org', username='alice', - password='qwerty'), + password='qwerty', weight_scale=1.0, weight_offset=0.0), ] diff --git a/nova/tests/db/fakes.py b/nova/tests/db/fakes.py index 8bdea359a..19028a451 100644 --- a/nova/tests/db/fakes.py +++ b/nova/tests/db/fakes.py @@ -20,10 +20,327 @@ import time from nova import db +from nova import exception from nova import test from nova import utils +class FakeModel(object): + """Stubs out for model.""" + def __init__(self, values): + self.values = values + + def __getattr__(self, name): + return self.values[name] + + def __getitem__(self, key): + if key in self.values: + return self.values[key] + else: + raise NotImplementedError() + + def __repr__(self): + return '<FakeModel: %s>' % self.values + + +def stub_out(stubs, funcs): + """Set the stubs in mapping in the db api.""" + for func in funcs: + func_name = '_'.join(func.__name__.split('_')[1:]) + stubs.Set(db, func_name, func) + + +def stub_out_db_network_api(stubs): + network_fields = {'id': 0, + 'cidr': '192.168.0.0/24', + 'netmask': '255.255.255.0', + 'cidr_v6': 'dead:beef::/64', + 'netmask_v6': '64', + 'project_id': 'fake', + 'label': 'fake', + 'gateway': '192.168.0.1', + 'bridge': 'fa0', + 'bridge_interface': 'fake_fa0', + 'broadcast': '192.168.0.255', + 'gateway_v6': 'dead:beef::1', + 'dns': '192.168.0.1', + 'vlan': None, + 'host': None, + 'injected': False, + 'vpn_public_address': '192.168.0.2'} + + fixed_ip_fields = {'id': 0, + 'network_id': 0, + 'network': FakeModel(network_fields), + 'address': '192.168.0.100', + 'instance': False, + 'instance_id': 0, + 'allocated': False, + 'virtual_interface_id': 0, + 'virtual_interface': None, + 'floating_ips': []} + + flavor_fields = {'id': 0, + 'rxtx_cap': 3} + + floating_ip_fields = {'id': 0, + 'address': '192.168.1.100', + 'fixed_ip_id': None, + 'fixed_ip': None, + 'project_id': None, + 'auto_assigned': False} + + virtual_interface_fields = {'id': 0, + 'address': 'DE:AD:BE:EF:00:00', + 'network_id': 0, + 'instance_id': 0, + 'network': FakeModel(network_fields)} + + fixed_ips = [fixed_ip_fields] + floating_ips = [floating_ip_fields] + virtual_interfacees = [virtual_interface_fields] + networks = [network_fields] + + def fake_floating_ip_allocate_address(context, project_id): + ips = filter(lambda i: i['fixed_ip_id'] == None \ + and i['project_id'] == None, + floating_ips) + if not ips: + raise exception.NoMoreFloatingIps() + ips[0]['project_id'] = project_id + return FakeModel(ips[0]) + + def fake_floating_ip_deallocate(context, address): + ips = filter(lambda i: i['address'] == address, + floating_ips) + if ips: + ips[0]['project_id'] = None + ips[0]['auto_assigned'] = False + + def fake_floating_ip_disassociate(context, address): + ips = filter(lambda i: i['address'] == address, + floating_ips) + if ips: + fixed_ip_address = None + if ips[0]['fixed_ip']: + fixed_ip_address = ips[0]['fixed_ip']['address'] + ips[0]['fixed_ip'] = None + return fixed_ip_address + + def fake_floating_ip_fixed_ip_associate(context, floating_address, + fixed_address): + float = filter(lambda i: i['address'] == floating_address, + floating_ips) + fixed = filter(lambda i: i['address'] == fixed_address, + fixed_ips) + if float and fixed: + float[0]['fixed_ip'] = fixed[0] + float[0]['fixed_ip_id'] = fixed[0]['id'] + + def fake_floating_ip_get_all_by_host(context, host): + # TODO(jkoelker): Once we get the patches that remove host from + # the floating_ip table, we'll need to stub + # this out + pass + + def fake_floating_ip_get_by_address(context, address): + if isinstance(address, FakeModel): + # NOTE(tr3buchet): yo dawg, i heard you like addresses + address = address['address'] + ips = filter(lambda i: i['address'] == address, + floating_ips) + if not ips: + raise exception.FloatingIpNotFoundForAddress(address=address) + return FakeModel(ips[0]) + + def fake_floating_ip_set_auto_assigned(contex, address): + ips = filter(lambda i: i['address'] == address, + floating_ips) + if ips: + ips[0]['auto_assigned'] = True + + def fake_fixed_ip_associate(context, address, instance_id): + ips = filter(lambda i: i['address'] == address, + fixed_ips) + if not ips: + raise exception.NoMoreFixedIps() + ips[0]['instance'] = True + ips[0]['instance_id'] = instance_id + + def fake_fixed_ip_associate_pool(context, network_id, instance_id): + ips = filter(lambda i: (i['network_id'] == network_id \ + or i['network_id'] is None) \ + and not i['instance'], + fixed_ips) + if not ips: + raise exception.NoMoreFixedIps() + ips[0]['instance'] = True + ips[0]['instance_id'] = instance_id + return ips[0]['address'] + + def fake_fixed_ip_create(context, values): + ip = dict(fixed_ip_fields) + ip['id'] = max([i['id'] for i in fixed_ips] or [-1]) + 1 + for key in values: + ip[key] = values[key] + return ip['address'] + + def fake_fixed_ip_disassociate(context, address): + ips = filter(lambda i: i['address'] == address, + fixed_ips) + if ips: + ips[0]['instance_id'] = None + ips[0]['instance'] = None + ips[0]['virtual_interface'] = None + ips[0]['virtual_interface_id'] = None + + def fake_fixed_ip_disassociate_all_by_timeout(context, host, time): + return 0 + + def fake_fixed_ip_get_by_instance(context, instance_id): + ips = filter(lambda i: i['instance_id'] == instance_id, + fixed_ips) + return [FakeModel(i) for i in ips] + + def fake_fixed_ip_get_by_address(context, address): + ips = filter(lambda i: i['address'] == address, + fixed_ips) + if ips: + return FakeModel(ips[0]) + + def fake_fixed_ip_get_network(context, address): + ips = filter(lambda i: i['address'] == address, + fixed_ips) + if ips: + nets = filter(lambda n: n['id'] == ips[0]['network_id'], + networks) + if nets: + return FakeModel(nets[0]) + + def fake_fixed_ip_update(context, address, values): + ips = filter(lambda i: i['address'] == address, + fixed_ips) + if ips: + for key in values: + ips[0][key] = values[key] + if key == 'virtual_interface_id': + vif = filter(lambda x: x['id'] == values[key], + virtual_interfacees) + if not vif: + continue + fixed_ip_fields['virtual_interface'] = FakeModel(vif[0]) + + def fake_instance_type_get(context, id): + if flavor_fields['id'] == id: + return FakeModel(flavor_fields) + + def fake_virtual_interface_create(context, values): + vif = dict(virtual_interface_fields) + vif['id'] = max([m['id'] for m in virtual_interfacees] or [-1]) + 1 + for key in values: + vif[key] = values[key] + return FakeModel(vif) + + def fake_virtual_interface_delete_by_instance(context, instance_id): + addresses = [m for m in virtual_interfacees \ + if m['instance_id'] == instance_id] + try: + for address in addresses: + virtual_interfacees.remove(address) + except ValueError: + pass + + def fake_virtual_interface_get_by_instance(context, instance_id): + return [FakeModel(m) for m in virtual_interfacees \ + if m['instance_id'] == instance_id] + + def fake_virtual_interface_get_by_instance_and_network(context, + instance_id, + network_id): + vif = filter(lambda m: m['instance_id'] == instance_id and \ + m['network_id'] == network_id, + virtual_interfacees) + if not vif: + return None + return FakeModel(vif[0]) + + def fake_network_create_safe(context, values): + net = dict(network_fields) + net['id'] = max([n['id'] for n in networks] or [-1]) + 1 + for key in values: + net[key] = values[key] + return FakeModel(net) + + def fake_network_get(context, network_id): + net = filter(lambda n: n['id'] == network_id, networks) + if not net: + return None + return FakeModel(net[0]) + + def fake_network_get_all(context): + return [FakeModel(n) for n in networks] + + def fake_network_get_all_by_host(context, host): + nets = filter(lambda n: n['host'] == host, networks) + return [FakeModel(n) for n in nets] + + def fake_network_get_all_by_instance(context, instance_id): + nets = filter(lambda n: n['instance_id'] == instance_id, networks) + return [FakeModel(n) for n in nets] + + def fake_network_set_host(context, network_id, host_id): + nets = filter(lambda n: n['id'] == network_id, networks) + for net in nets: + net['host'] = host_id + return host_id + + def fake_network_update(context, network_id, values): + nets = filter(lambda n: n['id'] == network_id, networks) + for net in nets: + for key in values: + net[key] = values[key] + + def fake_project_get_networks(context, project_id): + return [FakeModel(n) for n in networks \ + if n['project_id'] == project_id] + + def fake_queue_get_for(context, topic, node): + return "%s.%s" % (topic, node) + + funcs = [fake_floating_ip_allocate_address, + fake_floating_ip_deallocate, + fake_floating_ip_disassociate, + fake_floating_ip_fixed_ip_associate, + fake_floating_ip_get_all_by_host, + fake_floating_ip_get_by_address, + fake_floating_ip_set_auto_assigned, + fake_fixed_ip_associate, + fake_fixed_ip_associate_pool, + fake_fixed_ip_create, + fake_fixed_ip_disassociate, + fake_fixed_ip_disassociate_all_by_timeout, + fake_fixed_ip_get_by_instance, + fake_fixed_ip_get_by_address, + fake_fixed_ip_get_network, + fake_fixed_ip_update, + fake_instance_type_get, + fake_virtual_interface_create, + fake_virtual_interface_delete_by_instance, + fake_virtual_interface_get_by_instance, + fake_virtual_interface_get_by_instance_and_network, + fake_network_create_safe, + fake_network_get, + fake_network_get_all, + fake_network_get_all_by_host, + fake_network_get_all_by_instance, + fake_network_set_host, + fake_network_update, + fake_project_get_networks, + fake_queue_get_for] + + stub_out(stubs, funcs) + + def stub_out_db_instance_api(stubs, injected=True): """Stubs out the db API for creating Instances.""" @@ -92,27 +409,13 @@ def stub_out_db_instance_api(stubs, injected=True): 'address_v6': 'fe80::a00:3', 'network_id': 'fake_flat'} - class FakeModel(object): - """Stubs out for model.""" - def __init__(self, values): - self.values = values - - def __getattr__(self, name): - return self.values[name] - - def __getitem__(self, key): - if key in self.values: - return self.values[key] - else: - raise NotImplementedError() - def fake_instance_type_get_all(context, inactive=0): return INSTANCE_TYPES def fake_instance_type_get_by_name(context, name): return INSTANCE_TYPES[name] - def fake_instance_type_get_by_id(context, id): + def fake_instance_type_get(context, id): for name, inst_type in INSTANCE_TYPES.iteritems(): if str(inst_type['id']) == str(id): return inst_type @@ -132,26 +435,22 @@ def stub_out_db_instance_api(stubs, injected=True): else: return [FakeModel(flat_network_fields)] - def fake_instance_get_fixed_address(context, instance_id): - return FakeModel(fixed_ip_fields).address + def fake_instance_get_fixed_addresses(context, instance_id): + return [FakeModel(fixed_ip_fields).address] - def fake_instance_get_fixed_address_v6(context, instance_id): - return FakeModel(fixed_ip_fields).address + def fake_instance_get_fixed_addresses_v6(context, instance_id): + return [FakeModel(fixed_ip_fields).address] - def fake_fixed_ip_get_all_by_instance(context, instance_id): + def fake_fixed_ip_get_by_instance(context, instance_id): return [FakeModel(fixed_ip_fields)] - stubs.Set(db, 'network_get_by_instance', fake_network_get_by_instance) - stubs.Set(db, 'network_get_all_by_instance', - fake_network_get_all_by_instance) - stubs.Set(db, 'instance_type_get_all', fake_instance_type_get_all) - stubs.Set(db, 'instance_type_get_by_name', fake_instance_type_get_by_name) - stubs.Set(db, 'instance_type_get_by_id', fake_instance_type_get_by_id) - stubs.Set(db, 'instance_get_fixed_address', - fake_instance_get_fixed_address) - stubs.Set(db, 'instance_get_fixed_address_v6', - fake_instance_get_fixed_address_v6) - stubs.Set(db, 'network_get_all_by_instance', - fake_network_get_all_by_instance) - stubs.Set(db, 'fixed_ip_get_all_by_instance', - fake_fixed_ip_get_all_by_instance) + funcs = [fake_network_get_by_instance, + fake_network_get_all_by_instance, + fake_instance_type_get_all, + fake_instance_type_get_by_name, + fake_instance_type_get, + fake_instance_get_fixed_addresses, + fake_instance_get_fixed_addresses_v6, + fake_network_get_all_by_instance, + fake_fixed_ip_get_by_instance] + stub_out(stubs, funcs) diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py index 1e0b90d82..aac3ff330 100644 --- a/nova/tests/glance/stubs.py +++ b/nova/tests/glance/stubs.py @@ -64,8 +64,8 @@ class FakeGlance(object): pass def get_image_meta(self, image_id): - return self.IMAGE_FIXTURES[image_id]['image_meta'] + return self.IMAGE_FIXTURES[int(image_id)]['image_meta'] def get_image(self, image_id): - image = self.IMAGE_FIXTURES[image_id] + image = self.IMAGE_FIXTURES[int(image_id)] return image['image_meta'], image['image_data'] diff --git a/nova/tests/image/__init__.py b/nova/tests/image/__init__.py index b94e2e54e..6dab802f2 100644 --- a/nova/tests/image/__init__.py +++ b/nova/tests/image/__init__.py @@ -14,3 +14,6 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + +# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work +from nova.tests import * diff --git a/nova/tests/image/test_s3.py b/nova/tests/image/test_s3.py new file mode 100644 index 000000000..231e109f8 --- /dev/null +++ b/nova/tests/image/test_s3.py @@ -0,0 +1,122 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Isaku Yamahata +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import context +from nova import flags +from nova import test +from nova.image import s3 + +FLAGS = flags.FLAGS + + +ami_manifest_xml = """<?xml version="1.0" ?> +<manifest> + <version>2011-06-17</version> + <bundler> + <name>test-s3</name> + <version>0</version> + <release>0</release> + </bundler> + <machine_configuration> + <architecture>x86_64</architecture> + <block_device_mapping> + <mapping> + <virtual>ami</virtual> + <device>sda1</device> + </mapping> + <mapping> + <virtual>root</virtual> + <device>/dev/sda1</device> + </mapping> + <mapping> + <virtual>ephemeral0</virtual> + <device>sda2</device> + </mapping> + <mapping> + <virtual>swap</virtual> + <device>sda3</device> + </mapping> + </block_device_mapping> + </machine_configuration> +</manifest> +""" + + +class TestS3ImageService(test.TestCase): + def setUp(self): + super(TestS3ImageService, self).setUp() + self.orig_image_service = FLAGS.image_service + FLAGS.image_service = 'nova.image.fake.FakeImageService' + self.image_service = s3.S3ImageService() + self.context = context.RequestContext(None, None) + + def tearDown(self): + super(TestS3ImageService, self).tearDown() + FLAGS.image_service = self.orig_image_service + + def _assertEqualList(self, list0, list1, keys): + self.assertEqual(len(list0), len(list1)) + key = keys[0] + for x in list0: + self.assertEqual(len(x), len(keys)) + self.assertTrue(key in x) + for y in list1: + self.assertTrue(key in y) + if x[key] == y[key]: + for k in keys: + self.assertEqual(x[k], y[k]) + + def test_s3_create(self): + metadata = {'properties': { + 'root_device_name': '/dev/sda1', + 'block_device_mapping': [ + {'device_name': '/dev/sda1', + 'snapshot_id': 'snap-12345678', + 'delete_on_termination': True}, + {'device_name': '/dev/sda2', + 'virutal_name': 'ephemeral0'}, + {'device_name': '/dev/sdb0', + 'no_device': True}]}} + _manifest, image = self.image_service._s3_parse_manifest( + self.context, metadata, ami_manifest_xml) + image_id = image['id'] + + ret_image = self.image_service.show(self.context, image_id) + self.assertTrue('properties' in ret_image) + properties = ret_image['properties'] + + self.assertTrue('mappings' in properties) + mappings = properties['mappings'] + expected_mappings = [ + {"device": "sda1", "virtual": "ami"}, + {"device": "/dev/sda1", "virtual": "root"}, + {"device": "sda2", "virtual": "ephemeral0"}, + {"device": "sda3", "virtual": "swap"}] + self._assertEqualList(mappings, expected_mappings, + ['device', 'virtual']) + + self.assertTrue('block_device_mapping', properties) + block_device_mapping = properties['block_device_mapping'] + expected_bdm = [ + {'device_name': '/dev/sda1', + 'snapshot_id': 'snap-12345678', + 'delete_on_termination': True}, + {'device_name': '/dev/sda2', + 'virutal_name': 'ephemeral0'}, + {'device_name': '/dev/sdb0', + 'no_device': True}] + self.assertEqual(block_device_mapping, expected_bdm) diff --git a/nova/tests/integrated/__init__.py b/nova/tests/integrated/__init__.py index 10e0a91d7..430af8754 100644 --- a/nova/tests/integrated/__init__.py +++ b/nova/tests/integrated/__init__.py @@ -18,3 +18,5 @@ :mod:`integrated` -- Tests whole systems, using mock services where needed ================================= """ +# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work +from nova.tests import * diff --git a/nova/tests/integrated/api/client.py b/nova/tests/integrated/api/client.py index 76c03c5fa..035a35aab 100644 --- a/nova/tests/integrated/api/client.py +++ b/nova/tests/integrated/api/client.py @@ -71,8 +71,8 @@ class TestOpenStackClient(object): self.auth_uri = auth_uri def request(self, url, method='GET', body=None, headers=None): - if headers is None: - headers = {} + _headers = {'Content-Type': 'application/json'} + _headers.update(headers or {}) parsed_url = urlparse.urlparse(url) port = parsed_url.port @@ -94,9 +94,8 @@ class TestOpenStackClient(object): LOG.info(_("Doing %(method)s on %(relative_url)s") % locals()) if body: LOG.info(_("Body: %s") % body) - headers.setdefault('Content-Type', 'application/json') - conn.request(method, relative_url, body, headers) + conn.request(method, relative_url, body, _headers) response = conn.getresponse() return response @@ -173,9 +172,20 @@ class TestOpenStackClient(object): response = self.api_request(relative_uri, **kwargs) return self._decode_json(response) + def api_put(self, relative_uri, body, **kwargs): + kwargs['method'] = 'PUT' + if body: + headers = kwargs.setdefault('headers', {}) + headers['Content-Type'] = 'application/json' + kwargs['body'] = json.dumps(body) + + kwargs.setdefault('check_response_status', [200, 202, 204]) + response = self.api_request(relative_uri, **kwargs) + return self._decode_json(response) + def api_delete(self, relative_uri, **kwargs): kwargs['method'] = 'DELETE' - kwargs.setdefault('check_response_status', [200, 202]) + kwargs.setdefault('check_response_status', [200, 202, 204]) return self.api_request(relative_uri, **kwargs) def get_server(self, server_id): @@ -188,6 +198,9 @@ class TestOpenStackClient(object): def post_server(self, server): return self.api_post('/servers', server)['server'] + def put_server(self, server_id, server): + return self.api_put('/servers/%s' % server_id, server) + def post_server_action(self, server_id, data): return self.api_post('/servers/%s/action' % server_id, data) diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index fcb517cf5..4e8e85c7b 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -285,6 +285,25 @@ class ServersTest(integrated_helpers._IntegratedTestBase): # Cleanup self._delete_server(created_server_id) + def test_rename_server(self): + """Test building and renaming a server.""" + + # Create a server + server = self._build_minimal_create_server_request() + created_server = self.api.post_server({'server': server}) + LOG.debug("created_server: %s" % created_server) + server_id = created_server['id'] + self.assertTrue(server_id) + + # Rename the server to 'new-name' + self.api.put_server(server_id, {'server': {'name': 'new-name'}}) + + # Check the name of the server + created_server = self.api.get_server(server_id) + self.assertEqual(created_server['name'], 'new-name') + + # Cleanup + self._delete_server(server_id) if __name__ == "__main__": unittest.main() diff --git a/nova/tests/network/__init__.py b/nova/tests/network/__init__.py deleted file mode 100644 index 97f96b6fa..000000000 --- a/nova/tests/network/__init__.py +++ /dev/null @@ -1,67 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Utility methods -""" -import os - -from nova import context -from nova import db -from nova import flags -from nova import log as logging -from nova import utils - -FLAGS = flags.FLAGS -LOG = logging.getLogger('nova.tests.network') - - -def binpath(script): - """Returns the absolute path to a script in bin""" - return os.path.abspath(os.path.join(__file__, "../../../../bin", script)) - - -def lease_ip(private_ip): - """Run add command on dhcpbridge""" - network_ref = db.fixed_ip_get_network(context.get_admin_context(), - private_ip) - instance_ref = db.fixed_ip_get_instance(context.get_admin_context(), - private_ip) - cmd = (binpath('nova-dhcpbridge'), 'add', - instance_ref['mac_address'], - private_ip, 'fake') - env = {'DNSMASQ_INTERFACE': network_ref['bridge'], - 'TESTING': '1', - 'FLAGFILE': FLAGS.dhcpbridge_flagfile} - (out, err) = utils.execute(*cmd, addl_env=env) - LOG.debug("ISSUE_IP: %s, %s ", out, err) - - -def release_ip(private_ip): - """Run del command on dhcpbridge""" - network_ref = db.fixed_ip_get_network(context.get_admin_context(), - private_ip) - instance_ref = db.fixed_ip_get_instance(context.get_admin_context(), - private_ip) - cmd = (binpath('nova-dhcpbridge'), 'del', - instance_ref['mac_address'], - private_ip, 'fake') - env = {'DNSMASQ_INTERFACE': network_ref['bridge'], - 'TESTING': '1', - 'FLAGFILE': FLAGS.dhcpbridge_flagfile} - (out, err) = utils.execute(*cmd, addl_env=env) - LOG.debug("RELEASE_IP: %s, %s ", out, err) diff --git a/nova/tests/network/base.py b/nova/tests/network/base.py deleted file mode 100644 index f65416824..000000000 --- a/nova/tests/network/base.py +++ /dev/null @@ -1,155 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Base class of Unit Tests for all network models -""" -import netaddr -import os - -from nova import context -from nova import db -from nova import exception -from nova import flags -from nova import ipv6 -from nova import log as logging -from nova import test -from nova import utils -from nova.auth import manager - -FLAGS = flags.FLAGS -LOG = logging.getLogger('nova.tests.network') - - -class NetworkTestCase(test.TestCase): - """Test cases for network code""" - def setUp(self): - super(NetworkTestCase, self).setUp() - # NOTE(vish): if you change these flags, make sure to change the - # flags in the corresponding section in nova-dhcpbridge - self.flags(connection_type='fake', - fake_call=True, - fake_network=True) - self.manager = manager.AuthManager() - self.user = self.manager.create_user('netuser', 'netuser', 'netuser') - self.projects = [] - self.network = utils.import_object(FLAGS.network_manager) - self.context = context.RequestContext(project=None, user=self.user) - for i in range(FLAGS.num_networks): - name = 'project%s' % i - project = self.manager.create_project(name, 'netuser', name) - self.projects.append(project) - # create the necessary network data for the project - user_context = context.RequestContext(project=self.projects[i], - user=self.user) - host = self.network.get_network_host(user_context.elevated()) - instance_ref = self._create_instance(0) - self.instance_id = instance_ref['id'] - instance_ref = self._create_instance(1) - self.instance2_id = instance_ref['id'] - - def tearDown(self): - # TODO(termie): this should really be instantiating clean datastores - # in between runs, one failure kills all the tests - db.instance_destroy(context.get_admin_context(), self.instance_id) - db.instance_destroy(context.get_admin_context(), self.instance2_id) - for project in self.projects: - self.manager.delete_project(project) - self.manager.delete_user(self.user) - super(NetworkTestCase, self).tearDown() - - def _create_instance(self, project_num, mac=None): - if not mac: - mac = utils.generate_mac() - project = self.projects[project_num] - self.context._project = project - self.context.project_id = project.id - return db.instance_create(self.context, - {'project_id': project.id, - 'mac_address': mac}) - - def _create_address(self, project_num, instance_id=None): - """Create an address in given project num""" - if instance_id is None: - instance_id = self.instance_id - self.context._project = self.projects[project_num] - self.context.project_id = self.projects[project_num].id - return self.network.allocate_fixed_ip(self.context, instance_id) - - def _deallocate_address(self, project_num, address): - self.context._project = self.projects[project_num] - self.context.project_id = self.projects[project_num].id - self.network.deallocate_fixed_ip(self.context, address) - - def _is_allocated_in_project(self, address, project_id): - """Returns true if address is in specified project""" - project_net = db.network_get_by_bridge(context.get_admin_context(), - FLAGS.flat_network_bridge) - network = db.fixed_ip_get_network(context.get_admin_context(), - address) - instance = db.fixed_ip_get_instance(context.get_admin_context(), - address) - # instance exists until release - return instance is not None and network['id'] == project_net['id'] - - def test_private_ipv6(self): - """Make sure ipv6 is OK""" - if FLAGS.use_ipv6: - instance_ref = self._create_instance(0) - address = self._create_address(0, instance_ref['id']) - network_ref = db.project_get_network( - context.get_admin_context(), - self.context.project_id) - address_v6 = db.instance_get_fixed_address_v6( - context.get_admin_context(), - instance_ref['id']) - self.assertEqual(instance_ref['mac_address'], - ipv6.to_mac(address_v6)) - instance_ref2 = db.fixed_ip_get_instance_v6( - context.get_admin_context(), - address_v6) - self.assertEqual(instance_ref['id'], instance_ref2['id']) - self.assertEqual(address_v6, - ipv6.to_global(network_ref['cidr_v6'], - instance_ref['mac_address'], - 'test')) - self._deallocate_address(0, address) - db.instance_destroy(context.get_admin_context(), - instance_ref['id']) - - def test_available_ips(self): - """Make sure the number of available ips for the network is correct - - The number of available IP addresses depends on the test - environment's setup. - - Network size is set in test fixture's setUp method. - - There are ips reserved at the bottom and top of the range. - services (network, gateway, CloudPipe, broadcast) - """ - network = db.project_get_network(context.get_admin_context(), - self.projects[0].id) - net_size = flags.FLAGS.network_size - admin_context = context.get_admin_context() - total_ips = (db.network_count_available_ips(admin_context, - network['id']) + - db.network_count_reserved_ips(admin_context, - network['id']) + - db.network_count_allocated_ips(admin_context, - network['id'])) - self.assertEqual(total_ips, net_size) diff --git a/nova/tests/scheduler/__init__.py b/nova/tests/scheduler/__init__.py index e69de29bb..6dab802f2 100644 --- a/nova/tests/scheduler/__init__.py +++ b/nova/tests/scheduler/__init__.py @@ -0,0 +1,19 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Openstack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work +from nova.tests import * diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py index 9a5318aee..49791053e 100644 --- a/nova/tests/scheduler/test_least_cost_scheduler.py +++ b/nova/tests/scheduler/test_least_cost_scheduler.py @@ -122,15 +122,16 @@ class LeastCostSchedulerTestCase(test.TestCase): for hostname, caps in hosts] self.assertWeights(expected, num, request_spec, hosts) - def test_fill_first_cost_fn(self): + def test_compute_fill_first_cost_fn(self): FLAGS.least_cost_scheduler_cost_functions = [ - 'nova.scheduler.least_cost.fill_first_cost_fn', + 'nova.scheduler.least_cost.compute_fill_first_cost_fn', ] - FLAGS.fill_first_cost_fn_weight = 1 + FLAGS.compute_fill_first_cost_fn_weight = 1 num = 1 - request_spec = {} - hosts = self.sched.filter_hosts(num, request_spec) + instance_type = {'memory_mb': 1024} + request_spec = {'instance_type': instance_type} + hosts = self.sched.filter_hosts('compute', request_spec, None) expected = [] for idx, (hostname, caps) in enumerate(hosts): diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index 4be59d411..daea826fd 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -268,7 +268,6 @@ class SimpleDriverTestCase(test.TestCase): inst['user_id'] = self.user.id inst['project_id'] = self.project.id inst['instance_type_id'] = '1' - inst['mac_address'] = utils.generate_mac() inst['vcpus'] = kwargs.get('vcpus', 1) inst['ami_launch_index'] = 0 inst['availability_zone'] = kwargs.get('availability_zone', None) @@ -1074,7 +1073,7 @@ class DynamicNovaClientTest(test.TestCase): self.assertEquals(api._issue_novaclient_command( FakeNovaClient(FakeServerCollection()), - zone, "servers", "find", "name").b, 22) + zone, "servers", "find", name="test").b, 22) self.assertEquals(api._issue_novaclient_command( FakeNovaClient(FakeServerCollection()), @@ -1088,7 +1087,7 @@ class DynamicNovaClientTest(test.TestCase): self.assertEquals(api._issue_novaclient_command( FakeNovaClient(FakeEmptyServerCollection()), - zone, "servers", "find", "name"), None) + zone, "servers", "find", name="test"), None) self.assertEquals(api._issue_novaclient_command( FakeNovaClient(FakeEmptyServerCollection()), diff --git a/nova/tests/scheduler/test_zone_aware_scheduler.py b/nova/tests/scheduler/test_zone_aware_scheduler.py index 37c6488cc..d74b71fb6 100644 --- a/nova/tests/scheduler/test_zone_aware_scheduler.py +++ b/nova/tests/scheduler/test_zone_aware_scheduler.py @@ -16,6 +16,8 @@ Tests For Zone Aware Scheduler. """ +import nova.db + from nova import exception from nova import test from nova.scheduler import driver @@ -55,29 +57,21 @@ def fake_zone_manager_service_states(num_hosts): class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler): - def filter_hosts(self, num, specs): - # NOTE(sirp): this is returning [(hostname, services)] - return self.zone_manager.service_states.items() - - def weigh_hosts(self, num, specs, hosts): - fake_weight = 99 - weighted = [] - for hostname, caps in hosts: - weighted.append(dict(weight=fake_weight, name=hostname)) - return weighted + # No need to stub anything at the moment + pass class FakeZoneManager(zone_manager.ZoneManager): def __init__(self): self.service_states = { 'host1': { - 'compute': {'ram': 1000}, + 'compute': {'host_memory_free': 1073741824}, }, 'host2': { - 'compute': {'ram': 2000}, + 'compute': {'host_memory_free': 2147483648}, }, 'host3': { - 'compute': {'ram': 3000}, + 'compute': {'host_memory_free': 3221225472}, }, } @@ -87,7 +81,7 @@ class FakeEmptyZoneManager(zone_manager.ZoneManager): self.service_states = {} -def fake_empty_call_zone_method(context, method, specs): +def fake_empty_call_zone_method(context, method, specs, zones): return [] @@ -106,7 +100,7 @@ def fake_ask_child_zone_to_create_instance(context, zone_info, was_called = True -def fake_provision_resource_locally(context, item, instance_id, kwargs): +def fake_provision_resource_locally(context, build_plan, request_spec, kwargs): global was_called was_called = True @@ -126,21 +120,21 @@ def fake_decrypt_blob_returns_child_info(blob): 'child_blob': True} # values aren't important. Keys are. -def fake_call_zone_method(context, method, specs): +def fake_call_zone_method(context, method, specs, zones): return [ - ('zone1', [ + (1, [ dict(weight=1, blob='AAAAAAA'), dict(weight=111, blob='BBBBBBB'), dict(weight=112, blob='CCCCCCC'), dict(weight=113, blob='DDDDDDD'), ]), - ('zone2', [ + (2, [ dict(weight=120, blob='EEEEEEE'), dict(weight=2, blob='FFFFFFF'), dict(weight=122, blob='GGGGGGG'), dict(weight=123, blob='HHHHHHH'), ]), - ('zone3', [ + (3, [ dict(weight=130, blob='IIIIIII'), dict(weight=131, blob='JJJJJJJ'), dict(weight=132, blob='KKKKKKK'), @@ -149,28 +143,67 @@ def fake_call_zone_method(context, method, specs): ] +def fake_zone_get_all(context): + return [ + dict(id=1, api_url='zone1', + username='admin', password='password', + weight_offset=0.0, weight_scale=1.0), + dict(id=2, api_url='zone2', + username='admin', password='password', + weight_offset=1000.0, weight_scale=1.0), + dict(id=3, api_url='zone3', + username='admin', password='password', + weight_offset=0.0, weight_scale=1000.0), + ] + + class ZoneAwareSchedulerTestCase(test.TestCase): """Test case for Zone Aware Scheduler.""" def test_zone_aware_scheduler(self): """ - Create a nested set of FakeZones, ensure that a select call returns the - appropriate build plan. + Create a nested set of FakeZones, try to build multiple instances + and ensure that a select call returns the appropriate build plan. """ sched = FakeZoneAwareScheduler() self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method) + self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all) zm = FakeZoneManager() sched.set_zone_manager(zm) fake_context = {} - build_plan = sched.select(fake_context, {}) - - self.assertEqual(15, len(build_plan)) - - hostnames = [plan_item['name'] - for plan_item in build_plan if 'name' in plan_item] - self.assertEqual(3, len(hostnames)) + build_plan = sched.select(fake_context, + {'instance_type': {'memory_mb': 512}, + 'num_instances': 4}) + + # 4 from local zones, 12 from remotes + self.assertEqual(16, len(build_plan)) + + hostnames = [plan_item['hostname'] + for plan_item in build_plan if 'hostname' in plan_item] + # 4 local hosts + self.assertEqual(4, len(hostnames)) + + def test_adjust_child_weights(self): + """Make sure the weights returned by child zones are + properly adjusted based on the scale/offset in the zone + db entries. + """ + sched = FakeZoneAwareScheduler() + child_results = fake_call_zone_method(None, None, None, None) + zones = fake_zone_get_all(None) + sched._adjust_child_weights(child_results, zones) + scaled = [130000, 131000, 132000, 3000] + for zone, results in child_results: + for item in results: + w = item['weight'] + if zone == 'zone1': # No change + self.assertTrue(w < 1000.0) + if zone == 'zone2': # Offset +1000 + self.assertTrue(w >= 1000.0 and w < 2000) + if zone == 'zone3': # Scale x1000 + self.assertEqual(scaled.pop(0), w) def test_empty_zone_aware_scheduler(self): """ @@ -178,6 +211,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase): """ sched = FakeZoneAwareScheduler() self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method) + self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all) zm = FakeEmptyZoneManager() sched.set_zone_manager(zm) @@ -185,8 +219,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase): fake_context = {} self.assertRaises(driver.NoValidHost, sched.schedule_run_instance, fake_context, 1, - dict(host_filter=None, - request_spec={'instance_type': {}})) + dict(host_filter=None, instance_type={})) def test_schedule_do_not_schedule_with_hint(self): """ diff --git a/nova/tests/test_adminapi.py b/nova/tests/test_adminapi.py index ce826fd5b..877cf4ea1 100644 --- a/nova/tests/test_adminapi.py +++ b/nova/tests/test_adminapi.py @@ -56,7 +56,6 @@ class AdminApiTestCase(test.TestCase): self.project = self.manager.create_project('proj', 'admin', 'proj') self.context = context.RequestContext(user=self.user, project=self.project) - host = self.network.get_network_host(self.context.elevated()) def fake_show(meh, context, id): return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, @@ -75,9 +74,6 @@ class AdminApiTestCase(test.TestCase): self.stubs.Set(rpc, 'cast', finish_cast) def tearDown(self): - network_ref = db.project_get_network(self.context, - self.project.id) - db.network_disassociate(self.context, network_ref['id']) self.manager.delete_project(self.project) self.manager.delete_user(self.user) super(AdminApiTestCase, self).tearDown() diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py index 20b20fcbf..26ac5ff24 100644 --- a/nova/tests/test_api.py +++ b/nova/tests/test_api.py @@ -92,7 +92,9 @@ class XmlConversionTestCase(test.TestCase): conv = ec2utils._try_convert self.assertEqual(conv('None'), None) self.assertEqual(conv('True'), True) + self.assertEqual(conv('true'), True) self.assertEqual(conv('False'), False) + self.assertEqual(conv('false'), False) self.assertEqual(conv('0'), 0) self.assertEqual(conv('42'), 42) self.assertEqual(conv('3.14'), 3.14) @@ -107,6 +109,8 @@ class Ec2utilsTestCase(test.TestCase): def test_ec2_id_to_id(self): self.assertEqual(ec2utils.ec2_id_to_id('i-0000001e'), 30) self.assertEqual(ec2utils.ec2_id_to_id('ami-1d'), 29) + self.assertEqual(ec2utils.ec2_id_to_id('snap-0000001c'), 28) + self.assertEqual(ec2utils.ec2_id_to_id('vol-0000001b'), 27) def test_bad_ec2_id(self): self.assertRaises(exception.InvalidEc2Id, @@ -116,6 +120,72 @@ class Ec2utilsTestCase(test.TestCase): def test_id_to_ec2_id(self): self.assertEqual(ec2utils.id_to_ec2_id(30), 'i-0000001e') self.assertEqual(ec2utils.id_to_ec2_id(29, 'ami-%08x'), 'ami-0000001d') + self.assertEqual(ec2utils.id_to_ec2_snap_id(28), 'snap-0000001c') + self.assertEqual(ec2utils.id_to_ec2_vol_id(27), 'vol-0000001b') + + def test_dict_from_dotted_str(self): + in_str = [('BlockDeviceMapping.1.DeviceName', '/dev/sda1'), + ('BlockDeviceMapping.1.Ebs.SnapshotId', 'snap-0000001c'), + ('BlockDeviceMapping.1.Ebs.VolumeSize', '80'), + ('BlockDeviceMapping.1.Ebs.DeleteOnTermination', 'false'), + ('BlockDeviceMapping.2.DeviceName', '/dev/sdc'), + ('BlockDeviceMapping.2.VirtualName', 'ephemeral0')] + expected_dict = { + 'block_device_mapping': { + '1': {'device_name': '/dev/sda1', + 'ebs': {'snapshot_id': 'snap-0000001c', + 'volume_size': 80, + 'delete_on_termination': False}}, + '2': {'device_name': '/dev/sdc', + 'virtual_name': 'ephemeral0'}}} + out_dict = ec2utils.dict_from_dotted_str(in_str) + + self.assertDictMatch(out_dict, expected_dict) + + def test_properties_root_defice_name(self): + mappings = [{"device": "/dev/sda1", "virtual": "root"}] + properties0 = {'mappings': mappings} + properties1 = {'root_device_name': '/dev/sdb', 'mappings': mappings} + + root_device_name = ec2utils.properties_root_device_name(properties0) + self.assertEqual(root_device_name, '/dev/sda1') + + root_device_name = ec2utils.properties_root_device_name(properties1) + self.assertEqual(root_device_name, '/dev/sdb') + + def test_mapping_prepend_dev(self): + mappings = [ + {'virtual': 'ami', + 'device': 'sda1'}, + {'virtual': 'root', + 'device': '/dev/sda1'}, + + {'virtual': 'swap', + 'device': 'sdb1'}, + {'virtual': 'swap', + 'device': '/dev/sdb2'}, + + {'virtual': 'ephemeral0', + 'device': 'sdc1'}, + {'virtual': 'ephemeral1', + 'device': '/dev/sdc1'}] + expected_result = [ + {'virtual': 'ami', + 'device': 'sda1'}, + {'virtual': 'root', + 'device': '/dev/sda1'}, + + {'virtual': 'swap', + 'device': '/dev/sdb1'}, + {'virtual': 'swap', + 'device': '/dev/sdb2'}, + + {'virtual': 'ephemeral0', + 'device': '/dev/sdc1'}, + {'virtual': 'ephemeral1', + 'device': '/dev/sdc1'}] + self.assertDictListMatch(ec2utils.mappings_prepend_dev(mappings), + expected_result) class ApiEc2TestCase(test.TestCase): diff --git a/nova/tests/test_bdm.py b/nova/tests/test_bdm.py new file mode 100644 index 000000000..b258f6a75 --- /dev/null +++ b/nova/tests/test_bdm.py @@ -0,0 +1,233 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Isaku Yamahata +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Tests for Block Device Mapping Code. +""" + +from nova.api.ec2 import cloud +from nova import test + + +class BlockDeviceMappingEc2CloudTestCase(test.TestCase): + """Test Case for Block Device Mapping""" + + def setUp(self): + super(BlockDeviceMappingEc2CloudTestCase, self).setUp() + + def tearDown(self): + super(BlockDeviceMappingEc2CloudTestCase, self).tearDown() + + def _assertApply(self, action, bdm_list): + for bdm, expected_result in bdm_list: + self.assertDictMatch(action(bdm), expected_result) + + def test_parse_block_device_mapping(self): + bdm_list = [ + ({'device_name': '/dev/fake0', + 'ebs': {'snapshot_id': 'snap-12345678', + 'volume_size': 1}}, + {'device_name': '/dev/fake0', + 'snapshot_id': 0x12345678, + 'volume_size': 1, + 'delete_on_termination': True}), + + ({'device_name': '/dev/fake1', + 'ebs': {'snapshot_id': 'snap-23456789', + 'delete_on_termination': False}}, + {'device_name': '/dev/fake1', + 'snapshot_id': 0x23456789, + 'delete_on_termination': False}), + + ({'device_name': '/dev/fake2', + 'ebs': {'snapshot_id': 'vol-87654321', + 'volume_size': 2}}, + {'device_name': '/dev/fake2', + 'volume_id': 0x87654321, + 'volume_size': 2, + 'delete_on_termination': True}), + + ({'device_name': '/dev/fake3', + 'ebs': {'snapshot_id': 'vol-98765432', + 'delete_on_termination': False}}, + {'device_name': '/dev/fake3', + 'volume_id': 0x98765432, + 'delete_on_termination': False}), + + ({'device_name': '/dev/fake4', + 'ebs': {'no_device': True}}, + {'device_name': '/dev/fake4', + 'no_device': True}), + + ({'device_name': '/dev/fake5', + 'virtual_name': 'ephemeral0'}, + {'device_name': '/dev/fake5', + 'virtual_name': 'ephemeral0'}), + + ({'device_name': '/dev/fake6', + 'virtual_name': 'swap'}, + {'device_name': '/dev/fake6', + 'virtual_name': 'swap'}), + ] + self._assertApply(cloud._parse_block_device_mapping, bdm_list) + + def test_format_block_device_mapping(self): + bdm_list = [ + ({'device_name': '/dev/fake0', + 'snapshot_id': 0x12345678, + 'volume_size': 1, + 'delete_on_termination': True}, + {'deviceName': '/dev/fake0', + 'ebs': {'snapshotId': 'snap-12345678', + 'volumeSize': 1, + 'deleteOnTermination': True}}), + + ({'device_name': '/dev/fake1', + 'snapshot_id': 0x23456789}, + {'deviceName': '/dev/fake1', + 'ebs': {'snapshotId': 'snap-23456789'}}), + + ({'device_name': '/dev/fake2', + 'snapshot_id': 0x23456789, + 'delete_on_termination': False}, + {'deviceName': '/dev/fake2', + 'ebs': {'snapshotId': 'snap-23456789', + 'deleteOnTermination': False}}), + + ({'device_name': '/dev/fake3', + 'volume_id': 0x12345678, + 'volume_size': 1, + 'delete_on_termination': True}, + {'deviceName': '/dev/fake3', + 'ebs': {'snapshotId': 'vol-12345678', + 'volumeSize': 1, + 'deleteOnTermination': True}}), + + ({'device_name': '/dev/fake4', + 'volume_id': 0x23456789}, + {'deviceName': '/dev/fake4', + 'ebs': {'snapshotId': 'vol-23456789'}}), + + ({'device_name': '/dev/fake5', + 'volume_id': 0x23456789, + 'delete_on_termination': False}, + {'deviceName': '/dev/fake5', + 'ebs': {'snapshotId': 'vol-23456789', + 'deleteOnTermination': False}}), + ] + self._assertApply(cloud._format_block_device_mapping, bdm_list) + + def test_format_mapping(self): + properties = { + 'mappings': [ + {'virtual': 'ami', + 'device': 'sda1'}, + {'virtual': 'root', + 'device': '/dev/sda1'}, + + {'virtual': 'swap', + 'device': 'sdb1'}, + {'virtual': 'swap', + 'device': 'sdb2'}, + {'virtual': 'swap', + 'device': 'sdb3'}, + {'virtual': 'swap', + 'device': 'sdb4'}, + + {'virtual': 'ephemeral0', + 'device': 'sdc1'}, + {'virtual': 'ephemeral1', + 'device': 'sdc2'}, + {'virtual': 'ephemeral2', + 'device': 'sdc3'}, + ], + + 'block_device_mapping': [ + # root + {'device_name': '/dev/sda1', + 'snapshot_id': 0x12345678, + 'delete_on_termination': False}, + + + # overwrite swap + {'device_name': '/dev/sdb2', + 'snapshot_id': 0x23456789, + 'delete_on_termination': False}, + {'device_name': '/dev/sdb3', + 'snapshot_id': 0x3456789A}, + {'device_name': '/dev/sdb4', + 'no_device': True}, + + # overwrite ephemeral + {'device_name': '/dev/sdc2', + 'snapshot_id': 0x3456789A, + 'delete_on_termination': False}, + {'device_name': '/dev/sdc3', + 'snapshot_id': 0x456789AB}, + {'device_name': '/dev/sdc4', + 'no_device': True}, + + # volume + {'device_name': '/dev/sdd1', + 'snapshot_id': 0x87654321, + 'delete_on_termination': False}, + {'device_name': '/dev/sdd2', + 'snapshot_id': 0x98765432}, + {'device_name': '/dev/sdd3', + 'snapshot_id': 0xA9875463}, + {'device_name': '/dev/sdd4', + 'no_device': True}]} + + expected_result = { + 'blockDeviceMapping': [ + # root + {'deviceName': '/dev/sda1', + 'ebs': {'snapshotId': 'snap-12345678', + 'deleteOnTermination': False}}, + + # swap + {'deviceName': '/dev/sdb1', + 'virtualName': 'swap'}, + {'deviceName': '/dev/sdb2', + 'ebs': {'snapshotId': 'snap-23456789', + 'deleteOnTermination': False}}, + {'deviceName': '/dev/sdb3', + 'ebs': {'snapshotId': 'snap-3456789a'}}, + + # ephemeral + {'deviceName': '/dev/sdc1', + 'virtualName': 'ephemeral0'}, + {'deviceName': '/dev/sdc2', + 'ebs': {'snapshotId': 'snap-3456789a', + 'deleteOnTermination': False}}, + {'deviceName': '/dev/sdc3', + 'ebs': {'snapshotId': 'snap-456789ab'}}, + + # volume + {'deviceName': '/dev/sdd1', + 'ebs': {'snapshotId': 'snap-87654321', + 'deleteOnTermination': False}}, + {'deviceName': '/dev/sdd2', + 'ebs': {'snapshotId': 'snap-98765432'}}, + {'deviceName': '/dev/sdd3', + 'ebs': {'snapshotId': 'snap-a9875463'}}]} + + result = {} + cloud._format_mappings(properties, result) + print result + self.assertEqual(result['blockDeviceMapping'].sort(), + expected_result['blockDeviceMapping'].sort()) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 6327734f5..136082cc1 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -15,6 +15,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import mox from base64 import b64decode from M2Crypto import BIO @@ -29,6 +30,7 @@ from nova import db from nova import exception from nova import flags from nova import log as logging +from nova import network from nova import rpc from nova import test from nova import utils @@ -45,7 +47,8 @@ LOG = logging.getLogger('nova.tests.cloud') class CloudTestCase(test.TestCase): def setUp(self): super(CloudTestCase, self).setUp() - self.flags(connection_type='fake') + self.flags(connection_type='fake', + stub_network=True) self.conn = rpc.Connection.instance() @@ -64,10 +67,11 @@ class CloudTestCase(test.TestCase): self.project = self.manager.create_project('proj', 'admin', 'proj') self.context = context.RequestContext(user=self.user, project=self.project) - host = self.network.get_network_host(self.context.elevated()) + host = self.network.host def fake_show(meh, context, id): - return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, + return {'id': 1, 'container_format': 'ami', + 'properties': {'kernel_id': 1, 'ramdisk_id': 1, 'type': 'machine', 'image_state': 'available'}} self.stubs.Set(fake._FakeImageService, 'show', fake_show) @@ -83,9 +87,10 @@ class CloudTestCase(test.TestCase): self.stubs.Set(rpc, 'cast', finish_cast) def tearDown(self): - network_ref = db.project_get_network(self.context, - self.project.id) - db.network_disassociate(self.context, network_ref['id']) + networks = db.project_get_networks(self.context, self.project.id, + associate=False) + for network in networks: + db.network_disassociate(self.context, network['id']) self.manager.delete_project(self.project) self.manager.delete_user(self.user) super(CloudTestCase, self).tearDown() @@ -116,6 +121,7 @@ class CloudTestCase(test.TestCase): public_ip=address) db.floating_ip_destroy(self.context, address) + @test.skip_test("Skipping this pending future merge") def test_allocate_address(self): address = "10.10.10.10" allocate = self.cloud.allocate_address @@ -128,6 +134,34 @@ class CloudTestCase(test.TestCase): allocate, self.context) + def test_release_address(self): + address = "10.10.10.10" + allocate = self.cloud.allocate_address + db.floating_ip_create(self.context, + {'address': address, + 'host': self.network.host}) + result = self.cloud.release_address(self.context, address) + self.assertEqual(result['releaseResponse'], ['Address released.']) + + def test_release_address_still_associated(self): + address = "10.10.10.10" + fixed_ip = {'instance': {'id': 1}} + floating_ip = {'id': 0, + 'address': address, + 'fixed_ip_id': 0, + 'fixed_ip': fixed_ip, + 'project_id': None, + 'auto_assigned': False} + network_api = network.api.API() + self.mox.StubOutWithMock(network_api.db, 'floating_ip_get_by_address') + network_api.db.floating_ip_get_by_address(mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn(floating_ip) + self.mox.ReplayAll() + release = self.cloud.release_address + # ApiError: Floating ip is in use. Disassociate it before releasing. + self.assertRaises(exception.ApiError, release, self.context, address) + + @test.skip_test("Skipping this pending future merge") def test_associate_disassociate_address(self): """Verifies associate runs cleanly without raising an exception""" address = "10.10.10.10" @@ -135,8 +169,27 @@ class CloudTestCase(test.TestCase): {'address': address, 'host': self.network.host}) self.cloud.allocate_address(self.context) - inst = db.instance_create(self.context, {'host': self.compute.host}) - fixed = self.network.allocate_fixed_ip(self.context, inst['id']) + # TODO(jkoelker) Probably need to query for instance_type_id and + # make sure we get a valid one + inst = db.instance_create(self.context, {'host': self.compute.host, + 'instance_type_id': 1}) + networks = db.network_get_all(self.context) + for network in networks: + self.network.set_network_host(self.context, network['id']) + project_id = self.context.project_id + type_id = inst['instance_type_id'] + ips = self.network.allocate_for_instance(self.context, + instance_id=inst['id'], + instance_type_id=type_id, + project_id=project_id) + # TODO(jkoelker) Make this mas bueno + self.assertTrue(ips) + self.assertTrue('ips' in ips[0][1]) + self.assertTrue(ips[0][1]['ips']) + self.assertTrue('ip' in ips[0][1]['ips'][0]) + + fixed = ips[0][1]['ips'][0]['ip'] + ec2_id = ec2utils.id_to_ec2_id(inst['id']) self.cloud.associate_address(self.context, instance_id=ec2_id, @@ -165,13 +218,148 @@ class CloudTestCase(test.TestCase): sec['name']) db.security_group_destroy(self.context, sec['id']) + def test_describe_security_groups_by_id(self): + sec = db.security_group_create(self.context, + {'project_id': self.context.project_id, + 'name': 'test'}) + result = self.cloud.describe_security_groups(self.context, + group_id=[sec['id']]) + self.assertEqual(len(result['securityGroupInfo']), 1) + self.assertEqual( + result['securityGroupInfo'][0]['groupName'], + sec['name']) + default = db.security_group_get_by_name(self.context, + self.context.project_id, + 'default') + result = self.cloud.describe_security_groups(self.context, + group_id=[default['id']]) + self.assertEqual(len(result['securityGroupInfo']), 1) + self.assertEqual( + result['securityGroupInfo'][0]['groupName'], + 'default') + db.security_group_destroy(self.context, sec['id']) + + def test_create_delete_security_group(self): + descript = 'test description' + create = self.cloud.create_security_group + result = create(self.context, 'testgrp', descript) + group_descript = result['securityGroupSet'][0]['groupDescription'] + self.assertEqual(descript, group_descript) + delete = self.cloud.delete_security_group + self.assertTrue(delete(self.context, 'testgrp')) + + def test_delete_security_group_by_id(self): + sec = db.security_group_create(self.context, + {'project_id': self.context.project_id, + 'name': 'test'}) + delete = self.cloud.delete_security_group + self.assertTrue(delete(self.context, group_id=sec['id'])) + + def test_delete_security_group_with_bad_name(self): + delete = self.cloud.delete_security_group + notfound = exception.SecurityGroupNotFound + self.assertRaises(notfound, delete, self.context, 'badname') + + def test_delete_security_group_with_bad_group_id(self): + delete = self.cloud.delete_security_group + notfound = exception.SecurityGroupNotFound + self.assertRaises(notfound, delete, self.context, group_id=999) + + def test_delete_security_group_no_params(self): + delete = self.cloud.delete_security_group + self.assertRaises(exception.ApiError, delete, self.context) + + def test_authorize_security_group_ingress(self): + kwargs = {'project_id': self.context.project_id, 'name': 'test'} + sec = db.security_group_create(self.context, kwargs) + authz = self.cloud.authorize_security_group_ingress + kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'} + self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs)) + + def test_authorize_security_group_ingress_ip_permissions_ip_ranges(self): + kwargs = {'project_id': self.context.project_id, 'name': 'test'} + sec = db.security_group_create(self.context, kwargs) + authz = self.cloud.authorize_security_group_ingress + kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81, + 'ip_ranges': + {'1': {'cidr_ip': u'0.0.0.0/0'}, + '2': {'cidr_ip': u'10.10.10.10/32'}}, + 'ip_protocol': u'tcp'}]} + self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs)) + + def test_authorize_security_group_ingress_ip_permissions_groups(self): + kwargs = {'project_id': self.context.project_id, 'name': 'test'} + sec = db.security_group_create(self.context, kwargs) + authz = self.cloud.authorize_security_group_ingress + kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81, + 'ip_ranges':{'1': {'cidr_ip': u'0.0.0.0/0'}, + '2': {'cidr_ip': u'10.10.10.10/32'}}, + 'groups': {'1': {'user_id': u'someuser', + 'group_name': u'somegroup1'}, + '2': {'user_id': u'someuser', + 'group_name': u'othergroup2'}}, + 'ip_protocol': u'tcp'}]} + self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs)) + + def test_revoke_security_group_ingress(self): + kwargs = {'project_id': self.context.project_id, 'name': 'test'} + sec = db.security_group_create(self.context, kwargs) + authz = self.cloud.authorize_security_group_ingress + kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'} + authz(self.context, group_id=sec['id'], **kwargs) + revoke = self.cloud.revoke_security_group_ingress + self.assertTrue(revoke(self.context, group_name=sec['name'], **kwargs)) + + def test_revoke_security_group_ingress_by_id(self): + kwargs = {'project_id': self.context.project_id, 'name': 'test'} + sec = db.security_group_create(self.context, kwargs) + authz = self.cloud.authorize_security_group_ingress + kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'} + authz(self.context, group_id=sec['id'], **kwargs) + revoke = self.cloud.revoke_security_group_ingress + self.assertTrue(revoke(self.context, group_id=sec['id'], **kwargs)) + + def test_authorize_security_group_ingress_by_id(self): + sec = db.security_group_create(self.context, + {'project_id': self.context.project_id, + 'name': 'test'}) + authz = self.cloud.authorize_security_group_ingress + kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'} + self.assertTrue(authz(self.context, group_id=sec['id'], **kwargs)) + + def test_authorize_security_group_ingress_missing_protocol_params(self): + sec = db.security_group_create(self.context, + {'project_id': self.context.project_id, + 'name': 'test'}) + authz = self.cloud.authorize_security_group_ingress + self.assertRaises(exception.ApiError, authz, self.context, 'test') + + def test_authorize_security_group_ingress_missing_group_name_or_id(self): + kwargs = {'project_id': self.context.project_id, 'name': 'test'} + authz = self.cloud.authorize_security_group_ingress + self.assertRaises(exception.ApiError, authz, self.context, **kwargs) + + def test_authorize_security_group_ingress_already_exists(self): + kwargs = {'project_id': self.context.project_id, 'name': 'test'} + sec = db.security_group_create(self.context, kwargs) + authz = self.cloud.authorize_security_group_ingress + kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'} + authz(self.context, group_name=sec['name'], **kwargs) + self.assertRaises(exception.ApiError, authz, self.context, + group_name=sec['name'], **kwargs) + + def test_revoke_security_group_ingress_missing_group_name_or_id(self): + kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'} + revoke = self.cloud.revoke_security_group_ingress + self.assertRaises(exception.ApiError, revoke, self.context, **kwargs) + def test_describe_volumes(self): """Makes sure describe_volumes works and filters results.""" vol1 = db.volume_create(self.context, {}) vol2 = db.volume_create(self.context, {}) result = self.cloud.describe_volumes(self.context) self.assertEqual(len(result['volumeSet']), 2) - volume_id = ec2utils.id_to_ec2_id(vol2['id'], 'vol-%08x') + volume_id = ec2utils.id_to_ec2_vol_id(vol2['id']) result = self.cloud.describe_volumes(self.context, volume_id=[volume_id]) self.assertEqual(len(result['volumeSet']), 1) @@ -187,7 +375,7 @@ class CloudTestCase(test.TestCase): snap = db.snapshot_create(self.context, {'volume_id': vol['id'], 'volume_size': vol['size'], 'status': "available"}) - snapshot_id = ec2utils.id_to_ec2_id(snap['id'], 'snap-%08x') + snapshot_id = ec2utils.id_to_ec2_snap_id(snap['id']) result = self.cloud.create_volume(self.context, snapshot_id=snapshot_id) @@ -217,6 +405,8 @@ class CloudTestCase(test.TestCase): db.service_destroy(self.context, service1['id']) db.service_destroy(self.context, service2['id']) + # NOTE(jkoelker): this test relies on fixed_ip being in instances + @test.skip_test("EC2 stuff needs fixed_ip in instance_ref") def test_describe_snapshots(self): """Makes sure describe_snapshots works and filters results.""" vol = db.volume_create(self.context, {}) @@ -224,7 +414,7 @@ class CloudTestCase(test.TestCase): snap2 = db.snapshot_create(self.context, {'volume_id': vol['id']}) result = self.cloud.describe_snapshots(self.context) self.assertEqual(len(result['snapshotSet']), 2) - snapshot_id = ec2utils.id_to_ec2_id(snap2['id'], 'snap-%08x') + snapshot_id = ec2utils.id_to_ec2_snap_id(snap2['id']) result = self.cloud.describe_snapshots(self.context, snapshot_id=[snapshot_id]) self.assertEqual(len(result['snapshotSet']), 1) @@ -238,7 +428,7 @@ class CloudTestCase(test.TestCase): def test_create_snapshot(self): """Makes sure create_snapshot works.""" vol = db.volume_create(self.context, {'status': "available"}) - volume_id = ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x') + volume_id = ec2utils.id_to_ec2_vol_id(vol['id']) result = self.cloud.create_snapshot(self.context, volume_id=volume_id) @@ -255,7 +445,7 @@ class CloudTestCase(test.TestCase): vol = db.volume_create(self.context, {'status': "available"}) snap = db.snapshot_create(self.context, {'volume_id': vol['id'], 'status': "available"}) - snapshot_id = ec2utils.id_to_ec2_id(snap['id'], 'snap-%08x') + snapshot_id = ec2utils.id_to_ec2_snap_id(snap['id']) result = self.cloud.delete_snapshot(self.context, snapshot_id=snapshot_id) @@ -294,11 +484,191 @@ class CloudTestCase(test.TestCase): db.service_destroy(self.context, comp1['id']) db.service_destroy(self.context, comp2['id']) + def _block_device_mapping_create(self, instance_id, mappings): + volumes = [] + for bdm in mappings: + db.block_device_mapping_create(self.context, bdm) + if 'volume_id' in bdm: + values = {'id': bdm['volume_id']} + for bdm_key, vol_key in [('snapshot_id', 'snapshot_id'), + ('snapshot_size', 'volume_size'), + ('delete_on_termination', + 'delete_on_termination')]: + if bdm_key in bdm: + values[vol_key] = bdm[bdm_key] + vol = db.volume_create(self.context, values) + db.volume_attached(self.context, vol['id'], + instance_id, bdm['device_name']) + volumes.append(vol) + return volumes + + def _setUpBlockDeviceMapping(self): + inst1 = db.instance_create(self.context, + {'image_ref': 1, + 'root_device_name': '/dev/sdb1'}) + inst2 = db.instance_create(self.context, + {'image_ref': 2, + 'root_device_name': '/dev/sdc1'}) + + instance_id = inst1['id'] + mappings0 = [ + {'instance_id': instance_id, + 'device_name': '/dev/sdb1', + 'snapshot_id': '1', + 'volume_id': '2'}, + {'instance_id': instance_id, + 'device_name': '/dev/sdb2', + 'volume_id': '3', + 'volume_size': 1}, + {'instance_id': instance_id, + 'device_name': '/dev/sdb3', + 'delete_on_termination': True, + 'snapshot_id': '4', + 'volume_id': '5'}, + {'instance_id': instance_id, + 'device_name': '/dev/sdb4', + 'delete_on_termination': False, + 'snapshot_id': '6', + 'volume_id': '7'}, + {'instance_id': instance_id, + 'device_name': '/dev/sdb5', + 'snapshot_id': '8', + 'volume_id': '9', + 'volume_size': 0}, + {'instance_id': instance_id, + 'device_name': '/dev/sdb6', + 'snapshot_id': '10', + 'volume_id': '11', + 'volume_size': 1}, + {'instance_id': instance_id, + 'device_name': '/dev/sdb7', + 'no_device': True}, + {'instance_id': instance_id, + 'device_name': '/dev/sdb8', + 'virtual_name': 'swap'}, + {'instance_id': instance_id, + 'device_name': '/dev/sdb9', + 'virtual_name': 'ephemeral3'}] + + volumes = self._block_device_mapping_create(instance_id, mappings0) + return (inst1, inst2, volumes) + + def _tearDownBlockDeviceMapping(self, inst1, inst2, volumes): + for vol in volumes: + db.volume_destroy(self.context, vol['id']) + for id in (inst1['id'], inst2['id']): + for bdm in db.block_device_mapping_get_all_by_instance( + self.context, id): + db.block_device_mapping_destroy(self.context, bdm['id']) + db.instance_destroy(self.context, inst2['id']) + db.instance_destroy(self.context, inst1['id']) + + _expected_instance_bdm1 = { + 'instanceId': 'i-00000001', + 'rootDeviceName': '/dev/sdb1', + 'rootDeviceType': 'ebs'} + + _expected_block_device_mapping0 = [ + {'deviceName': '/dev/sdb1', + 'ebs': {'status': 'in-use', + 'deleteOnTermination': False, + 'volumeId': 2, + }}, + {'deviceName': '/dev/sdb2', + 'ebs': {'status': 'in-use', + 'deleteOnTermination': False, + 'volumeId': 3, + }}, + {'deviceName': '/dev/sdb3', + 'ebs': {'status': 'in-use', + 'deleteOnTermination': True, + 'volumeId': 5, + }}, + {'deviceName': '/dev/sdb4', + 'ebs': {'status': 'in-use', + 'deleteOnTermination': False, + 'volumeId': 7, + }}, + {'deviceName': '/dev/sdb5', + 'ebs': {'status': 'in-use', + 'deleteOnTermination': False, + 'volumeId': 9, + }}, + {'deviceName': '/dev/sdb6', + 'ebs': {'status': 'in-use', + 'deleteOnTermination': False, + 'volumeId': 11, }}] + # NOTE(yamahata): swap/ephemeral device case isn't supported yet. + + _expected_instance_bdm2 = { + 'instanceId': 'i-00000002', + 'rootDeviceName': '/dev/sdc1', + 'rootDeviceType': 'instance-store'} + + def test_format_instance_bdm(self): + (inst1, inst2, volumes) = self._setUpBlockDeviceMapping() + + result = {} + self.cloud._format_instance_bdm(self.context, inst1['id'], '/dev/sdb1', + result) + self.assertSubDictMatch( + {'rootDeviceType': self._expected_instance_bdm1['rootDeviceType']}, + result) + self._assertEqualBlockDeviceMapping( + self._expected_block_device_mapping0, result['blockDeviceMapping']) + + result = {} + self.cloud._format_instance_bdm(self.context, inst2['id'], '/dev/sdc1', + result) + self.assertSubDictMatch( + {'rootDeviceType': self._expected_instance_bdm2['rootDeviceType']}, + result) + + self._tearDownBlockDeviceMapping(inst1, inst2, volumes) + + def _assertInstance(self, instance_id): + ec2_instance_id = ec2utils.id_to_ec2_id(instance_id) + result = self.cloud.describe_instances(self.context, + instance_id=[ec2_instance_id]) + result = result['reservationSet'][0] + self.assertEqual(len(result['instancesSet']), 1) + result = result['instancesSet'][0] + self.assertEqual(result['instanceId'], ec2_instance_id) + return result + + def _assertEqualBlockDeviceMapping(self, expected, result): + self.assertEqual(len(expected), len(result)) + for x in expected: + found = False + for y in result: + if x['deviceName'] == y['deviceName']: + self.assertSubDictMatch(x, y) + found = True + break + self.assertTrue(found) + + def test_describe_instances_bdm(self): + """Make sure describe_instances works with root_device_name and + block device mappings + """ + (inst1, inst2, volumes) = self._setUpBlockDeviceMapping() + + result = self._assertInstance(inst1['id']) + self.assertSubDictMatch(self._expected_instance_bdm1, result) + self._assertEqualBlockDeviceMapping( + self._expected_block_device_mapping0, result['blockDeviceMapping']) + + result = self._assertInstance(inst2['id']) + self.assertSubDictMatch(self._expected_instance_bdm2, result) + + self._tearDownBlockDeviceMapping(inst1, inst2, volumes) + def test_describe_images(self): describe_images = self.cloud.describe_images def fake_detail(meh, context): - return [{'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, + return [{'id': 1, 'container_format': 'ami', + 'properties': {'kernel_id': 1, 'ramdisk_id': 1, 'type': 'machine'}}] def fake_show_none(meh, context, id): @@ -323,12 +693,168 @@ class CloudTestCase(test.TestCase): self.assertRaises(exception.ImageNotFound, describe_images, self.context, ['ami-fake']) + def assertDictListUnorderedMatch(self, L1, L2, key): + self.assertEqual(len(L1), len(L2)) + for d1 in L1: + self.assertTrue(key in d1) + for d2 in L2: + self.assertTrue(key in d2) + if d1[key] == d2[key]: + self.assertDictMatch(d1, d2) + + def _setUpImageSet(self, create_volumes_and_snapshots=False): + mappings1 = [ + {'device': '/dev/sda1', 'virtual': 'root'}, + + {'device': 'sdb0', 'virtual': 'ephemeral0'}, + {'device': 'sdb1', 'virtual': 'ephemeral1'}, + {'device': 'sdb2', 'virtual': 'ephemeral2'}, + {'device': 'sdb3', 'virtual': 'ephemeral3'}, + {'device': 'sdb4', 'virtual': 'ephemeral4'}, + + {'device': 'sdc0', 'virtual': 'swap'}, + {'device': 'sdc1', 'virtual': 'swap'}, + {'device': 'sdc2', 'virtual': 'swap'}, + {'device': 'sdc3', 'virtual': 'swap'}, + {'device': 'sdc4', 'virtual': 'swap'}] + block_device_mapping1 = [ + {'device_name': '/dev/sdb1', 'snapshot_id': 01234567}, + {'device_name': '/dev/sdb2', 'volume_id': 01234567}, + {'device_name': '/dev/sdb3', 'virtual_name': 'ephemeral5'}, + {'device_name': '/dev/sdb4', 'no_device': True}, + + {'device_name': '/dev/sdc1', 'snapshot_id': 12345678}, + {'device_name': '/dev/sdc2', 'volume_id': 12345678}, + {'device_name': '/dev/sdc3', 'virtual_name': 'ephemeral6'}, + {'device_name': '/dev/sdc4', 'no_device': True}] + image1 = { + 'id': 1, + 'properties': { + 'kernel_id': 1, + 'type': 'machine', + 'image_state': 'available', + 'mappings': mappings1, + 'block_device_mapping': block_device_mapping1, + } + } + + mappings2 = [{'device': '/dev/sda1', 'virtual': 'root'}] + block_device_mapping2 = [{'device_name': '/dev/sdb1', + 'snapshot_id': 01234567}] + image2 = { + 'id': 2, + 'properties': { + 'kernel_id': 2, + 'type': 'machine', + 'root_device_name': '/dev/sdb1', + 'mappings': mappings2, + 'block_device_mapping': block_device_mapping2}} + + def fake_show(meh, context, image_id): + for i in [image1, image2]: + if i['id'] == image_id: + return i + raise exception.ImageNotFound(image_id=image_id) + + def fake_detail(meh, context): + return [image1, image2] + + self.stubs.Set(fake._FakeImageService, 'show', fake_show) + self.stubs.Set(fake._FakeImageService, 'detail', fake_detail) + + volumes = [] + snapshots = [] + if create_volumes_and_snapshots: + for bdm in block_device_mapping1: + if 'volume_id' in bdm: + vol = self._volume_create(bdm['volume_id']) + volumes.append(vol['id']) + if 'snapshot_id' in bdm: + snap = db.snapshot_create(self.context, + {'id': bdm['snapshot_id'], + 'volume_id': 76543210, + 'status': "available", + 'volume_size': 1}) + snapshots.append(snap['id']) + return (volumes, snapshots) + + def _assertImageSet(self, result, root_device_type, root_device_name): + self.assertEqual(1, len(result['imagesSet'])) + result = result['imagesSet'][0] + self.assertTrue('rootDeviceType' in result) + self.assertEqual(result['rootDeviceType'], root_device_type) + self.assertTrue('rootDeviceName' in result) + self.assertEqual(result['rootDeviceName'], root_device_name) + self.assertTrue('blockDeviceMapping' in result) + + return result + + _expected_root_device_name1 = '/dev/sda1' + # NOTE(yamahata): noDevice doesn't make sense when returning mapping + # It makes sense only when user overriding existing + # mapping. + _expected_bdms1 = [ + {'deviceName': '/dev/sdb0', 'virtualName': 'ephemeral0'}, + {'deviceName': '/dev/sdb1', 'ebs': {'snapshotId': + 'snap-00053977'}}, + {'deviceName': '/dev/sdb2', 'ebs': {'snapshotId': + 'vol-00053977'}}, + {'deviceName': '/dev/sdb3', 'virtualName': 'ephemeral5'}, + # {'deviceName': '/dev/sdb4', 'noDevice': True}, + + {'deviceName': '/dev/sdc0', 'virtualName': 'swap'}, + {'deviceName': '/dev/sdc1', 'ebs': {'snapshotId': + 'snap-00bc614e'}}, + {'deviceName': '/dev/sdc2', 'ebs': {'snapshotId': + 'vol-00bc614e'}}, + {'deviceName': '/dev/sdc3', 'virtualName': 'ephemeral6'}, + # {'deviceName': '/dev/sdc4', 'noDevice': True} + ] + + _expected_root_device_name2 = '/dev/sdb1' + _expected_bdms2 = [{'deviceName': '/dev/sdb1', + 'ebs': {'snapshotId': 'snap-00053977'}}] + + # NOTE(yamahata): + # InstanceBlockDeviceMappingItemType + # rootDeviceType + # rootDeviceName + # blockDeviceMapping + # deviceName + # virtualName + # ebs + # snapshotId + # volumeSize + # deleteOnTermination + # noDevice + def test_describe_image_mapping(self): + """test for rootDeviceName and blockDeiceMapping""" + describe_images = self.cloud.describe_images + self._setUpImageSet() + + result = describe_images(self.context, ['ami-00000001']) + result = self._assertImageSet(result, 'instance-store', + self._expected_root_device_name1) + + self.assertDictListUnorderedMatch(result['blockDeviceMapping'], + self._expected_bdms1, 'deviceName') + + result = describe_images(self.context, ['ami-00000002']) + result = self._assertImageSet(result, 'ebs', + self._expected_root_device_name2) + + self.assertDictListUnorderedMatch(result['blockDeviceMapping'], + self._expected_bdms2, 'deviceName') + + self.stubs.UnsetAll() + def test_describe_image_attribute(self): describe_image_attribute = self.cloud.describe_image_attribute def fake_show(meh, context, id): return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, - 'type': 'machine'}, 'is_public': True} + 'type': 'machine'}, 'container_format': 'ami', + 'is_public': True} self.stubs.Set(fake._FakeImageService, 'show', fake_show) self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show) @@ -336,11 +862,38 @@ class CloudTestCase(test.TestCase): 'launchPermission') self.assertEqual([{'group': 'all'}], result['launchPermission']) + def test_describe_image_attribute_root_device_name(self): + describe_image_attribute = self.cloud.describe_image_attribute + self._setUpImageSet() + + result = describe_image_attribute(self.context, 'ami-00000001', + 'rootDeviceName') + self.assertEqual(result['rootDeviceName'], + self._expected_root_device_name1) + result = describe_image_attribute(self.context, 'ami-00000002', + 'rootDeviceName') + self.assertEqual(result['rootDeviceName'], + self._expected_root_device_name2) + + def test_describe_image_attribute_block_device_mapping(self): + describe_image_attribute = self.cloud.describe_image_attribute + self._setUpImageSet() + + result = describe_image_attribute(self.context, 'ami-00000001', + 'blockDeviceMapping') + self.assertDictListUnorderedMatch(result['blockDeviceMapping'], + self._expected_bdms1, 'deviceName') + result = describe_image_attribute(self.context, 'ami-00000002', + 'blockDeviceMapping') + self.assertDictListUnorderedMatch(result['blockDeviceMapping'], + self._expected_bdms2, 'deviceName') + def test_modify_image_attribute(self): modify_image_attribute = self.cloud.modify_image_attribute def fake_show(meh, context, id): - return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, + return {'id': 1, 'container_format': 'ami', + 'properties': {'kernel_id': 1, 'ramdisk_id': 1, 'type': 'machine'}, 'is_public': False} def fake_update(meh, context, image_id, metadata, data=None): @@ -374,6 +927,16 @@ class CloudTestCase(test.TestCase): self.assertRaises(exception.ImageNotFound, deregister_image, self.context, 'ami-bad001') + def test_deregister_image_wrong_container_type(self): + deregister_image = self.cloud.deregister_image + + def fake_delete(self, context, id): + return None + + self.stubs.Set(fake._FakeImageService, 'delete', fake_delete) + self.assertRaises(exception.NotFound, deregister_image, self.context, + 'aki-00000001') + def _run_instance(self, **kwargs): rv = self.cloud.run_instances(self.context, **kwargs) instance_id = rv['instancesSet'][0]['instanceId'] @@ -384,6 +947,21 @@ class CloudTestCase(test.TestCase): self._wait_for_running(ec2_instance_id) return ec2_instance_id + def test_rescue_unrescue_instance(self): + instance_id = self._run_instance( + image_id='ami-1', + instance_type=FLAGS.default_instance_type, + max_count=1) + self.cloud.rescue_instance(context=self.context, + instance_id=instance_id) + # NOTE(vish): This currently does no validation, it simply makes sure + # that the code path doesn't throw an exception. + self.cloud.unrescue_instance(context=self.context, + instance_id=instance_id) + # TODO(soren): We need this until we can stop polling in the rpc code + # for unit tests. + self.cloud.terminate_instances(self.context, [instance_id]) + def test_console_output(self): instance_id = self._run_instance( image_id='ami-1', @@ -489,7 +1067,7 @@ class CloudTestCase(test.TestCase): def fake_show_no_state(self, context, id): return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, - 'type': 'machine'}} + 'type': 'machine'}, 'container_format': 'ami'} self.stubs.UnsetAll() self.stubs.Set(fake._FakeImageService, 'show', fake_show_no_state) @@ -503,7 +1081,8 @@ class CloudTestCase(test.TestCase): run_instances = self.cloud.run_instances def fake_show_decrypt(self, context, id): - return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, + return {'id': 1, 'container_format': 'ami', + 'properties': {'kernel_id': 1, 'ramdisk_id': 1, 'type': 'machine', 'image_state': 'decrypting'}} self.stubs.UnsetAll() @@ -518,7 +1097,8 @@ class CloudTestCase(test.TestCase): run_instances = self.cloud.run_instances def fake_show_stat_active(self, context, id): - return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, + return {'id': 1, 'container_format': 'ami', + 'properties': {'kernel_id': 1, 'ramdisk_id': 1, 'type': 'machine'}, 'status': 'active'} self.stubs.Set(fake._FakeImageService, 'show', fake_show_stat_active) @@ -548,6 +1128,8 @@ class CloudTestCase(test.TestCase): self.assertEqual('c00l 1m4g3', inst['display_name']) db.instance_destroy(self.context, inst['id']) + # NOTE(jkoelker): This test relies on mac_address in instance + @test.skip_test("EC2 stuff needs mac_address in instance_ref") def test_update_of_instance_wont_update_private_fields(self): inst = db.instance_create(self.context, {}) ec2_id = ec2utils.id_to_ec2_id(inst['id']) @@ -561,7 +1143,7 @@ class CloudTestCase(test.TestCase): def test_update_of_volume_display_fields(self): vol = db.volume_create(self.context, {}) self.cloud.update_volume(self.context, - ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x'), + ec2utils.id_to_ec2_vol_id(vol['id']), display_name='c00l v0lum3') vol = db.volume_get(self.context, vol['id']) self.assertEqual('c00l v0lum3', vol['display_name']) @@ -570,7 +1152,7 @@ class CloudTestCase(test.TestCase): def test_update_of_volume_wont_update_private_fields(self): vol = db.volume_create(self.context, {}) self.cloud.update_volume(self.context, - ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x'), + ec2utils.id_to_ec2_vol_id(vol['id']), mountpoint='/not/here') vol = db.volume_get(self.context, vol['id']) self.assertEqual(None, vol['mountpoint']) @@ -611,6 +1193,7 @@ class CloudTestCase(test.TestCase): elevated = self.context.elevated(read_deleted=True) self._wait_for_state(elevated, instance_id, is_deleted) + @test.skip_test("skipping, test is hanging with multinic for rpc reasons") def test_stop_start_instance(self): """Makes sure stop/start instance works""" # enforce periodic tasks run in short time to avoid wait for 60s. @@ -647,11 +1230,13 @@ class CloudTestCase(test.TestCase): self._restart_compute_service() - def _volume_create(self): + def _volume_create(self, volume_id=None): kwargs = {'status': 'available', 'host': self.volume.host, 'size': 1, 'attach_status': 'detached', } + if volume_id: + kwargs['id'] = volume_id return db.volume_create(self.context, kwargs) def _assert_volume_attached(self, vol, instance_id, mountpoint): @@ -666,6 +1251,7 @@ class CloudTestCase(test.TestCase): self.assertEqual(vol['status'], "available") self.assertEqual(vol['attach_status'], "detached") + @test.skip_test("skipping, test is hanging with multinic for rpc reasons") def test_stop_start_with_volume(self): """Make sure run instance with block device mapping works""" @@ -679,10 +1265,10 @@ class CloudTestCase(test.TestCase): 'max_count': 1, 'block_device_mapping': [{'device_name': '/dev/vdb', 'volume_id': vol1['id'], - 'delete_on_termination': False, }, + 'delete_on_termination': False}, {'device_name': '/dev/vdc', 'volume_id': vol2['id'], - 'delete_on_termination': True, }, + 'delete_on_termination': True}, ]} ec2_instance_id = self._run_instance_wait(**kwargs) instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) @@ -734,6 +1320,7 @@ class CloudTestCase(test.TestCase): self._restart_compute_service() + @test.skip_test("skipping, test is hanging with multinic for rpc reasons") def test_stop_with_attached_volume(self): """Make sure attach info is reflected to block device mapping""" # enforce periodic tasks run in short time to avoid wait for 60s. @@ -809,10 +1396,11 @@ class CloudTestCase(test.TestCase): greenthread.sleep(0.3) return result['snapshotId'] + @test.skip_test("skipping, test is hanging with multinic for rpc reasons") def test_run_with_snapshot(self): """Makes sure run/stop/start instance with snapshot works.""" vol = self._volume_create() - ec2_volume_id = ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x') + ec2_volume_id = ec2utils.id_to_ec2_vol_id(vol['id']) ec2_snapshot1_id = self._create_snapshot(ec2_volume_id) snapshot1_id = ec2utils.ec2_id_to_id(ec2_snapshot1_id) @@ -871,3 +1459,33 @@ class CloudTestCase(test.TestCase): self.cloud.delete_snapshot(self.context, snapshot_id) greenthread.sleep(0.3) db.volume_destroy(self.context, vol['id']) + + def test_create_image(self): + """Make sure that CreateImage works""" + # enforce periodic tasks run in short time to avoid wait for 60s. + self._restart_compute_service(periodic_interval=0.3) + + (volumes, snapshots) = self._setUpImageSet( + create_volumes_and_snapshots=True) + + kwargs = {'image_id': 'ami-1', + 'instance_type': FLAGS.default_instance_type, + 'max_count': 1} + ec2_instance_id = self._run_instance_wait(**kwargs) + + # TODO(yamahata): s3._s3_create() can't be tested easily by unit test + # as there is no unit test for s3.create() + ## result = self.cloud.create_image(self.context, ec2_instance_id, + ## no_reboot=True) + ## ec2_image_id = result['imageId'] + ## created_image = self.cloud.describe_images(self.context, + ## [ec2_image_id]) + + self.cloud.terminate_instances(self.context, [ec2_instance_id]) + for vol in volumes: + db.volume_destroy(self.context, vol) + for snap in snapshots: + db.snapshot_destroy(self.context, snap) + # TODO(yamahata): clean up snapshot created by CreateImage. + + self._restart_compute_service() diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 439508b27..2a8f33dd3 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -37,6 +37,7 @@ from nova import log as logging from nova import rpc from nova import test from nova import utils +from nova.notifier import test_notifier LOG = logging.getLogger('nova.tests.compute') FLAGS = flags.FLAGS @@ -62,6 +63,7 @@ class ComputeTestCase(test.TestCase): super(ComputeTestCase, self).setUp() self.flags(connection_type='fake', stub_network=True, + notification_driver='nova.notifier.test_notifier', network_manager='nova.network.manager.FlatManager') self.compute = utils.import_object(FLAGS.compute_manager) self.compute_api = compute.API() @@ -69,6 +71,7 @@ class ComputeTestCase(test.TestCase): self.user = self.manager.create_user('fake', 'fake', 'fake') self.project = self.manager.create_project('fake', 'fake', 'fake') self.context = context.RequestContext('fake', 'fake', False) + test_notifier.NOTIFICATIONS = [] def fake_show(meh, context, id): return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}} @@ -90,7 +93,6 @@ class ComputeTestCase(test.TestCase): inst['project_id'] = self.project.id type_id = instance_types.get_instance_type_by_name('m1.tiny')['id'] inst['instance_type_id'] = type_id - inst['mac_address'] = utils.generate_mac() inst['ami_launch_index'] = 0 inst.update(params) return db.instance_create(self.context, inst)['id'] @@ -128,7 +130,7 @@ class ComputeTestCase(test.TestCase): instance_ref = models.Instance() instance_ref['id'] = 1 instance_ref['volumes'] = [vol1, vol2] - instance_ref['hostname'] = 'i-00000001' + instance_ref['hostname'] = 'hostname-1' instance_ref['host'] = 'dummy' return instance_ref @@ -160,6 +162,18 @@ class ComputeTestCase(test.TestCase): db.security_group_destroy(self.context, group['id']) db.instance_destroy(self.context, ref[0]['id']) + def test_default_hostname_generator(self): + cases = [(None, 'server_1'), ('Hello, Server!', 'hello_server'), + ('<}\x1fh\x10e\x08l\x02l\x05o\x12!{>', 'hello')] + for display_name, hostname in cases: + ref = self.compute_api.create(self.context, + instance_types.get_default_instance_type(), None, + display_name=display_name) + try: + self.assertEqual(ref[0]['hostname'], hostname) + finally: + db.instance_destroy(self.context, ref[0]['id']) + def test_destroy_instance_disassociates_security_groups(self): """Make sure destroying disassociates security groups""" group = self._create_group() @@ -327,6 +341,50 @@ class ComputeTestCase(test.TestCase): self.assert_(console) self.compute.terminate_instance(self.context, instance_id) + def test_run_instance_usage_notification(self): + """Ensure run instance generates apropriate usage notification""" + instance_id = self._create_instance() + self.compute.run_instance(self.context, instance_id) + self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) + msg = test_notifier.NOTIFICATIONS[0] + self.assertEquals(msg['priority'], 'INFO') + self.assertEquals(msg['event_type'], 'compute.instance.create') + payload = msg['payload'] + self.assertEquals(payload['tenant_id'], self.project.id) + self.assertEquals(payload['user_id'], self.user.id) + self.assertEquals(payload['instance_id'], instance_id) + self.assertEquals(payload['instance_type'], 'm1.tiny') + type_id = instance_types.get_instance_type_by_name('m1.tiny')['id'] + self.assertEquals(str(payload['instance_type_id']), str(type_id)) + self.assertTrue('display_name' in payload) + self.assertTrue('created_at' in payload) + self.assertTrue('launched_at' in payload) + self.assertEquals(payload['image_ref'], '1') + self.compute.terminate_instance(self.context, instance_id) + + def test_terminate_usage_notification(self): + """Ensure terminate_instance generates apropriate usage notification""" + instance_id = self._create_instance() + self.compute.run_instance(self.context, instance_id) + test_notifier.NOTIFICATIONS = [] + self.compute.terminate_instance(self.context, instance_id) + + self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) + msg = test_notifier.NOTIFICATIONS[0] + self.assertEquals(msg['priority'], 'INFO') + self.assertEquals(msg['event_type'], 'compute.instance.delete') + payload = msg['payload'] + self.assertEquals(payload['tenant_id'], self.project.id) + self.assertEquals(payload['user_id'], self.user.id) + self.assertEquals(payload['instance_id'], instance_id) + self.assertEquals(payload['instance_type'], 'm1.tiny') + type_id = instance_types.get_instance_type_by_name('m1.tiny')['id'] + self.assertEquals(str(payload['instance_type_id']), str(type_id)) + self.assertTrue('display_name' in payload) + self.assertTrue('created_at' in payload) + self.assertTrue('launched_at' in payload) + self.assertEquals(payload['image_ref'], '1') + def test_run_instance_existing(self): """Ensure failure when running an instance that already exists""" instance_id = self._create_instance() @@ -363,13 +421,15 @@ class ComputeTestCase(test.TestCase): pass self.stubs.Set(self.compute.driver, 'finish_resize', fake) + self.stubs.Set(self.compute.network_api, 'get_instance_nw_info', fake) context = self.context.elevated() instance_id = self._create_instance() - self.compute.prep_resize(context, instance_id, 1) + instance_ref = db.instance_get(context, instance_id) + self.compute.prep_resize(context, instance_ref['uuid'], 1) migration_ref = db.migration_get_by_instance_and_status(context, - instance_id, 'pre-migrating') + instance_ref['uuid'], 'pre-migrating') try: - self.compute.finish_resize(context, instance_id, + self.compute.finish_resize(context, instance_ref['uuid'], int(migration_ref['id']), {}) except KeyError, e: # Only catch key errors. We want other reasons for the test to @@ -378,17 +438,50 @@ class ComputeTestCase(test.TestCase): self.compute.terminate_instance(self.context, instance_id) + def test_resize_instance_notification(self): + """Ensure notifications on instance migrate/resize""" + instance_id = self._create_instance() + context = self.context.elevated() + inst_ref = db.instance_get(context, instance_id) + + self.compute.run_instance(self.context, instance_id) + test_notifier.NOTIFICATIONS = [] + + db.instance_update(self.context, instance_id, {'host': 'foo'}) + self.compute.prep_resize(context, inst_ref['uuid'], 1) + migration_ref = db.migration_get_by_instance_and_status(context, + inst_ref['uuid'], 'pre-migrating') + + self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) + msg = test_notifier.NOTIFICATIONS[0] + self.assertEquals(msg['priority'], 'INFO') + self.assertEquals(msg['event_type'], 'compute.instance.resize.prep') + payload = msg['payload'] + self.assertEquals(payload['tenant_id'], self.project.id) + self.assertEquals(payload['user_id'], self.user.id) + self.assertEquals(payload['instance_id'], instance_id) + self.assertEquals(payload['instance_type'], 'm1.tiny') + type_id = instance_types.get_instance_type_by_name('m1.tiny')['id'] + self.assertEquals(str(payload['instance_type_id']), str(type_id)) + self.assertTrue('display_name' in payload) + self.assertTrue('created_at' in payload) + self.assertTrue('launched_at' in payload) + self.assertEquals(payload['image_ref'], '1') + self.compute.terminate_instance(context, instance_id) + def test_resize_instance(self): """Ensure instance can be migrated/resized""" instance_id = self._create_instance() context = self.context.elevated() + inst_ref = db.instance_get(context, instance_id) self.compute.run_instance(self.context, instance_id) - db.instance_update(self.context, instance_id, {'host': 'foo'}) - self.compute.prep_resize(context, instance_id, 1) + db.instance_update(self.context, inst_ref['uuid'], + {'host': 'foo'}) + self.compute.prep_resize(context, inst_ref['uuid'], 1) migration_ref = db.migration_get_by_instance_and_status(context, - instance_id, 'pre-migrating') - self.compute.resize_instance(context, instance_id, + inst_ref['uuid'], 'pre-migrating') + self.compute.resize_instance(context, inst_ref['uuid'], migration_ref['id']) self.compute.terminate_instance(context, instance_id) @@ -430,6 +523,57 @@ class ComputeTestCase(test.TestCase): self.compute.terminate_instance(context, instance_id) + def test_finish_revert_resize(self): + """Ensure that the flavor is reverted to the original on revert""" + context = self.context.elevated() + instance_id = self._create_instance() + + def fake(*args, **kwargs): + pass + + self.stubs.Set(self.compute.driver, 'finish_resize', fake) + self.stubs.Set(self.compute.driver, 'revert_resize', fake) + self.stubs.Set(self.compute.network_api, 'get_instance_nw_info', fake) + + self.compute.run_instance(self.context, instance_id) + + # Confirm the instance size before the resize starts + inst_ref = db.instance_get(context, instance_id) + instance_type_ref = db.instance_type_get(context, + inst_ref['instance_type_id']) + self.assertEqual(instance_type_ref['flavorid'], 1) + + db.instance_update(self.context, instance_id, {'host': 'foo'}) + + self.compute.prep_resize(context, inst_ref['uuid'], 3) + + migration_ref = db.migration_get_by_instance_and_status(context, + inst_ref['uuid'], 'pre-migrating') + + self.compute.resize_instance(context, inst_ref['uuid'], + migration_ref['id']) + self.compute.finish_resize(context, inst_ref['uuid'], + int(migration_ref['id']), {}) + + # Prove that the instance size is now the new size + inst_ref = db.instance_get(context, instance_id) + instance_type_ref = db.instance_type_get(context, + inst_ref['instance_type_id']) + self.assertEqual(instance_type_ref['flavorid'], 3) + + # Finally, revert and confirm the old flavor has been applied + self.compute.revert_resize(context, inst_ref['uuid'], + migration_ref['id']) + self.compute.finish_revert_resize(context, inst_ref['uuid'], + migration_ref['id']) + + inst_ref = db.instance_get(context, instance_id) + instance_type_ref = db.instance_type_get(context, + inst_ref['instance_type_id']) + self.assertEqual(instance_type_ref['flavorid'], 1) + + self.compute.terminate_instance(context, instance_id) + def test_get_by_flavor_id(self): type = instance_types.get_instance_type_by_flavor_id(1) self.assertEqual(type['name'], 'm1.tiny') @@ -443,6 +587,14 @@ class ComputeTestCase(test.TestCase): self.context, instance_id, 1) self.compute.terminate_instance(self.context, instance_id) + def test_migrate(self): + context = self.context.elevated() + instance_id = self._create_instance() + self.compute.run_instance(self.context, instance_id) + # Migrate simply calls resize() without a flavor_id. + self.compute_api.resize(context, instance_id, None) + self.compute.terminate_instance(context, instance_id) + def _setup_other_managers(self): self.volume_manager = utils.import_object(FLAGS.volume_manager) self.network_manager = utils.import_object(FLAGS.network_manager) @@ -456,7 +608,7 @@ class ComputeTestCase(test.TestCase): dbmock = self.mox.CreateMock(db) dbmock.instance_get(c, i_id).AndReturn(instance_ref) - dbmock.instance_get_fixed_address(c, i_id).AndReturn(None) + dbmock.instance_get_fixed_addresses(c, i_id).AndReturn(None) self.compute.db = dbmock self.mox.ReplayAll() @@ -472,20 +624,18 @@ class ComputeTestCase(test.TestCase): self._setup_other_managers() dbmock = self.mox.CreateMock(db) volmock = self.mox.CreateMock(self.volume_manager) - netmock = self.mox.CreateMock(self.network_manager) drivermock = self.mox.CreateMock(self.compute_driver) dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref) - dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy') + dbmock.instance_get_fixed_addresses(c, i_ref['id']).AndReturn('dummy') for i in range(len(i_ref['volumes'])): vid = i_ref['volumes'][i]['id'] volmock.setup_compute_volume(c, vid).InAnyOrder('g1') - netmock.setup_compute_network(c, i_ref['id']) + drivermock.plug_vifs(i_ref, []) drivermock.ensure_filtering_rules_for_instance(i_ref) self.compute.db = dbmock self.compute.volume_manager = volmock - self.compute.network_manager = netmock self.compute.driver = drivermock self.mox.ReplayAll() @@ -500,18 +650,16 @@ class ComputeTestCase(test.TestCase): self._setup_other_managers() dbmock = self.mox.CreateMock(db) - netmock = self.mox.CreateMock(self.network_manager) drivermock = self.mox.CreateMock(self.compute_driver) dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref) - dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy') + dbmock.instance_get_fixed_addresses(c, i_ref['id']).AndReturn('dummy') self.mox.StubOutWithMock(compute_manager.LOG, 'info') compute_manager.LOG.info(_("%s has no volume."), i_ref['hostname']) - netmock.setup_compute_network(c, i_ref['id']) + drivermock.plug_vifs(i_ref, []) drivermock.ensure_filtering_rules_for_instance(i_ref) self.compute.db = dbmock - self.compute.network_manager = netmock self.compute.driver = drivermock self.mox.ReplayAll() @@ -532,18 +680,20 @@ class ComputeTestCase(test.TestCase): dbmock = self.mox.CreateMock(db) netmock = self.mox.CreateMock(self.network_manager) volmock = self.mox.CreateMock(self.volume_manager) + drivermock = self.mox.CreateMock(self.compute_driver) dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref) - dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy') + dbmock.instance_get_fixed_addresses(c, i_ref['id']).AndReturn('dummy') for i in range(len(i_ref['volumes'])): volmock.setup_compute_volume(c, i_ref['volumes'][i]['id']) for i in range(FLAGS.live_migration_retry_count): - netmock.setup_compute_network(c, i_ref['id']).\ + drivermock.plug_vifs(i_ref, []).\ AndRaise(exception.ProcessExecutionError()) self.compute.db = dbmock self.compute.network_manager = netmock self.compute.volume_manager = volmock + self.compute.driver = drivermock self.mox.ReplayAll() self.assertRaises(exception.ProcessExecutionError, @@ -678,7 +828,7 @@ class ComputeTestCase(test.TestCase): for v in i_ref['volumes']: self.compute.volume_manager.remove_compute_volume(c, v['id']) self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance') - self.compute.driver.unfilter_instance(i_ref) + self.compute.driver.unfilter_instance(i_ref, []) # executing self.mox.ReplayAll() @@ -721,3 +871,114 @@ class ComputeTestCase(test.TestCase): LOG.info(_("After force-killing instances: %s"), instances) self.assertEqual(len(instances), 1) self.assertEqual(power_state.SHUTOFF, instances[0]['state']) + + @staticmethod + def _parse_db_block_device_mapping(bdm_ref): + attr_list = ('delete_on_termination', 'device_name', 'no_device', + 'virtual_name', 'volume_id', 'volume_size', 'snapshot_id') + bdm = {} + for attr in attr_list: + val = bdm_ref.get(attr, None) + if val: + bdm[attr] = val + + return bdm + + def test_update_block_device_mapping(self): + instance_id = self._create_instance() + mappings = [ + {'virtual': 'ami', 'device': 'sda1'}, + {'virtual': 'root', 'device': '/dev/sda1'}, + + {'virtual': 'swap', 'device': 'sdb1'}, + {'virtual': 'swap', 'device': 'sdb2'}, + {'virtual': 'swap', 'device': 'sdb3'}, + {'virtual': 'swap', 'device': 'sdb4'}, + + {'virtual': 'ephemeral0', 'device': 'sdc1'}, + {'virtual': 'ephemeral1', 'device': 'sdc2'}, + {'virtual': 'ephemeral2', 'device': 'sdc3'}] + block_device_mapping = [ + # root + {'device_name': '/dev/sda1', + 'snapshot_id': 0x12345678, + 'delete_on_termination': False}, + + + # overwrite swap + {'device_name': '/dev/sdb2', + 'snapshot_id': 0x23456789, + 'delete_on_termination': False}, + {'device_name': '/dev/sdb3', + 'snapshot_id': 0x3456789A}, + {'device_name': '/dev/sdb4', + 'no_device': True}, + + # overwrite ephemeral + {'device_name': '/dev/sdc2', + 'snapshot_id': 0x456789AB, + 'delete_on_termination': False}, + {'device_name': '/dev/sdc3', + 'snapshot_id': 0x56789ABC}, + {'device_name': '/dev/sdc4', + 'no_device': True}, + + # volume + {'device_name': '/dev/sdd1', + 'snapshot_id': 0x87654321, + 'delete_on_termination': False}, + {'device_name': '/dev/sdd2', + 'snapshot_id': 0x98765432}, + {'device_name': '/dev/sdd3', + 'snapshot_id': 0xA9875463}, + {'device_name': '/dev/sdd4', + 'no_device': True}] + + self.compute_api._update_image_block_device_mapping( + self.context, instance_id, mappings) + + bdms = [self._parse_db_block_device_mapping(bdm_ref) + for bdm_ref in db.block_device_mapping_get_all_by_instance( + self.context, instance_id)] + expected_result = [ + {'virtual_name': 'swap', 'device_name': '/dev/sdb1'}, + {'virtual_name': 'swap', 'device_name': '/dev/sdb2'}, + {'virtual_name': 'swap', 'device_name': '/dev/sdb3'}, + {'virtual_name': 'swap', 'device_name': '/dev/sdb4'}, + {'virtual_name': 'ephemeral0', 'device_name': '/dev/sdc1'}, + {'virtual_name': 'ephemeral1', 'device_name': '/dev/sdc2'}, + {'virtual_name': 'ephemeral2', 'device_name': '/dev/sdc3'}] + bdms.sort() + expected_result.sort() + self.assertDictListMatch(bdms, expected_result) + + self.compute_api._update_block_device_mapping( + self.context, instance_id, block_device_mapping) + bdms = [self._parse_db_block_device_mapping(bdm_ref) + for bdm_ref in db.block_device_mapping_get_all_by_instance( + self.context, instance_id)] + expected_result = [ + {'snapshot_id': 0x12345678, 'device_name': '/dev/sda1'}, + + {'virtual_name': 'swap', 'device_name': '/dev/sdb1'}, + {'snapshot_id': 0x23456789, 'device_name': '/dev/sdb2'}, + {'snapshot_id': 0x3456789A, 'device_name': '/dev/sdb3'}, + {'no_device': True, 'device_name': '/dev/sdb4'}, + + {'virtual_name': 'ephemeral0', 'device_name': '/dev/sdc1'}, + {'snapshot_id': 0x456789AB, 'device_name': '/dev/sdc2'}, + {'snapshot_id': 0x56789ABC, 'device_name': '/dev/sdc3'}, + {'no_device': True, 'device_name': '/dev/sdc4'}, + + {'snapshot_id': 0x87654321, 'device_name': '/dev/sdd1'}, + {'snapshot_id': 0x98765432, 'device_name': '/dev/sdd2'}, + {'snapshot_id': 0xA9875463, 'device_name': '/dev/sdd3'}, + {'no_device': True, 'device_name': '/dev/sdd4'}] + bdms.sort() + expected_result.sort() + self.assertDictListMatch(bdms, expected_result) + + for bdm in db.block_device_mapping_get_all_by_instance( + self.context, instance_id): + db.block_device_mapping_destroy(self.context, bdm['id']) + self.compute.terminate_instance(self.context, instance_id) diff --git a/nova/tests/test_console.py b/nova/tests/test_console.py index 831e7670f..1806cc1ea 100644 --- a/nova/tests/test_console.py +++ b/nova/tests/test_console.py @@ -61,7 +61,6 @@ class ConsoleTestCase(test.TestCase): inst['user_id'] = self.user.id inst['project_id'] = self.project.id inst['instance_type_id'] = 1 - inst['mac_address'] = utils.generate_mac() inst['ami_launch_index'] = 0 return db.instance_create(self.context, inst)['id'] diff --git a/nova/tests/test_direct.py b/nova/tests/test_direct.py index 588a24b35..4ed0c2aa5 100644 --- a/nova/tests/test_direct.py +++ b/nova/tests/test_direct.py @@ -105,24 +105,25 @@ class DirectTestCase(test.TestCase): self.assertEqual(rv['data'], 'baz') -class DirectCloudTestCase(test_cloud.CloudTestCase): - def setUp(self): - super(DirectCloudTestCase, self).setUp() - compute_handle = compute.API(image_service=self.cloud.image_service) - volume_handle = volume.API() - network_handle = network.API() - direct.register_service('compute', compute_handle) - direct.register_service('volume', volume_handle) - direct.register_service('network', network_handle) - - self.router = direct.JsonParamsMiddleware(direct.Router()) - proxy = direct.Proxy(self.router) - self.cloud.compute_api = proxy.compute - self.cloud.volume_api = proxy.volume - self.cloud.network_api = proxy.network - compute_handle.volume_api = proxy.volume - compute_handle.network_api = proxy.network - - def tearDown(self): - super(DirectCloudTestCase, self).tearDown() - direct.ROUTES = {} +# NOTE(jkoelker): This fails using the EC2 api +#class DirectCloudTestCase(test_cloud.CloudTestCase): +# def setUp(self): +# super(DirectCloudTestCase, self).setUp() +# compute_handle = compute.API(image_service=self.cloud.image_service) +# volume_handle = volume.API() +# network_handle = network.API() +# direct.register_service('compute', compute_handle) +# direct.register_service('volume', volume_handle) +# direct.register_service('network', network_handle) +# +# self.router = direct.JsonParamsMiddleware(direct.Router()) +# proxy = direct.Proxy(self.router) +# self.cloud.compute_api = proxy.compute +# self.cloud.volume_api = proxy.volume +# self.cloud.network_api = proxy.network +# compute_handle.volume_api = proxy.volume +# compute_handle.network_api = proxy.network +# +# def tearDown(self): +# super(DirectCloudTestCase, self).tearDown() +# direct.ROUTES = {} diff --git a/nova/tests/test_exception.py b/nova/tests/test_exception.py index 4d3b9cc73..cd74f8871 100644 --- a/nova/tests/test_exception.py +++ b/nova/tests/test_exception.py @@ -32,3 +32,66 @@ class ApiErrorTestCase(test.TestCase): self.assertEqual(err.__str__(), 'blah code: fake error') self.assertEqual(err.code, 'blah code') self.assertEqual(err.msg, 'fake error') + + +class FakeNotifier(object): + """Acts like the nova.notifier.api module.""" + ERROR = 88 + + def __init__(self): + self.provided_publisher = None + self.provided_event = None + self.provided_priority = None + self.provided_payload = None + + def notify(self, publisher, event, priority, payload): + self.provided_publisher = publisher + self.provided_event = event + self.provided_priority = priority + self.provided_payload = payload + + +def good_function(): + return 99 + + +def bad_function_error(): + raise exception.Error() + + +def bad_function_exception(): + raise Exception() + + +class WrapExceptionTestCase(test.TestCase): + def test_wrap_exception_good_return(self): + wrapped = exception.wrap_exception() + self.assertEquals(99, wrapped(good_function)()) + + def test_wrap_exception_throws_error(self): + wrapped = exception.wrap_exception() + self.assertRaises(exception.Error, wrapped(bad_function_error)) + + def test_wrap_exception_throws_exception(self): + wrapped = exception.wrap_exception() + # Note that Exception is converted to Error ... + self.assertRaises(exception.Error, wrapped(bad_function_exception)) + + def test_wrap_exception_with_notifier(self): + notifier = FakeNotifier() + wrapped = exception.wrap_exception(notifier, "publisher", "event", + "level") + self.assertRaises(exception.Error, wrapped(bad_function_exception)) + self.assertEquals(notifier.provided_publisher, "publisher") + self.assertEquals(notifier.provided_event, "event") + self.assertEquals(notifier.provided_priority, "level") + for key in ['exception', 'args']: + self.assertTrue(key in notifier.provided_payload.keys()) + + def test_wrap_exception_with_notifier_defaults(self): + notifier = FakeNotifier() + wrapped = exception.wrap_exception(notifier) + self.assertRaises(exception.Error, wrapped(bad_function_exception)) + self.assertEquals(notifier.provided_publisher, None) + self.assertEquals(notifier.provided_event, "bad_function_exception") + self.assertEquals(notifier.provided_priority, notifier.ERROR) diff --git a/nova/tests/test_flat_network.py b/nova/tests/test_flat_network.py deleted file mode 100644 index 8544019c0..000000000 --- a/nova/tests/test_flat_network.py +++ /dev/null @@ -1,161 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Unit Tests for flat network code -""" -import netaddr -import os -import unittest - -from nova import context -from nova import db -from nova import exception -from nova import flags -from nova import log as logging -from nova import test -from nova import utils -from nova.auth import manager -from nova.tests.network import base - - -FLAGS = flags.FLAGS -LOG = logging.getLogger('nova.tests.network') - - -class FlatNetworkTestCase(base.NetworkTestCase): - """Test cases for network code""" - def test_public_network_association(self): - """Makes sure that we can allocate a public ip""" - # TODO(vish): better way of adding floating ips - - self.context._project = self.projects[0] - self.context.project_id = self.projects[0].id - pubnet = netaddr.IPRange(flags.FLAGS.floating_range) - address = str(list(pubnet)[0]) - try: - db.floating_ip_get_by_address(context.get_admin_context(), address) - except exception.NotFound: - db.floating_ip_create(context.get_admin_context(), - {'address': address, - 'host': FLAGS.host}) - - self.assertRaises(NotImplementedError, - self.network.allocate_floating_ip, - self.context, self.projects[0].id) - - fix_addr = self._create_address(0) - float_addr = address - self.assertRaises(NotImplementedError, - self.network.associate_floating_ip, - self.context, float_addr, fix_addr) - - address = db.instance_get_floating_address(context.get_admin_context(), - self.instance_id) - self.assertEqual(address, None) - - self.assertRaises(NotImplementedError, - self.network.disassociate_floating_ip, - self.context, float_addr) - - address = db.instance_get_floating_address(context.get_admin_context(), - self.instance_id) - self.assertEqual(address, None) - - self.assertRaises(NotImplementedError, - self.network.deallocate_floating_ip, - self.context, float_addr) - - self.network.deallocate_fixed_ip(self.context, fix_addr) - db.floating_ip_destroy(context.get_admin_context(), float_addr) - - def test_allocate_deallocate_fixed_ip(self): - """Makes sure that we can allocate and deallocate a fixed ip""" - address = self._create_address(0) - self.assertTrue(self._is_allocated_in_project(address, - self.projects[0].id)) - self._deallocate_address(0, address) - - # check if the fixed ip address is really deallocated - self.assertFalse(self._is_allocated_in_project(address, - self.projects[0].id)) - - def test_side_effects(self): - """Ensures allocating and releasing has no side effects""" - address = self._create_address(0) - address2 = self._create_address(1, self.instance2_id) - - self.assertTrue(self._is_allocated_in_project(address, - self.projects[0].id)) - self.assertTrue(self._is_allocated_in_project(address2, - self.projects[1].id)) - - self._deallocate_address(0, address) - self.assertFalse(self._is_allocated_in_project(address, - self.projects[0].id)) - - # First address release shouldn't affect the second - self.assertTrue(self._is_allocated_in_project(address2, - self.projects[0].id)) - - self._deallocate_address(1, address2) - self.assertFalse(self._is_allocated_in_project(address2, - self.projects[1].id)) - - def test_ips_are_reused(self): - """Makes sure that ip addresses that are deallocated get reused""" - address = self._create_address(0) - self.network.deallocate_fixed_ip(self.context, address) - - address2 = self._create_address(0) - self.assertEqual(address, address2) - - self.network.deallocate_fixed_ip(self.context, address2) - - def test_too_many_addresses(self): - """Test for a NoMoreAddresses exception when all fixed ips are used. - """ - admin_context = context.get_admin_context() - network = db.project_get_network(admin_context, self.projects[0].id) - num_available_ips = db.network_count_available_ips(admin_context, - network['id']) - addresses = [] - instance_ids = [] - for i in range(num_available_ips): - instance_ref = self._create_instance(0) - instance_ids.append(instance_ref['id']) - address = self._create_address(0, instance_ref['id']) - addresses.append(address) - - ip_count = db.network_count_available_ips(context.get_admin_context(), - network['id']) - self.assertEqual(ip_count, 0) - self.assertRaises(db.NoMoreAddresses, - self.network.allocate_fixed_ip, - self.context, - 'foo') - - for i in range(num_available_ips): - self.network.deallocate_fixed_ip(self.context, addresses[i]) - db.instance_destroy(context.get_admin_context(), instance_ids[i]) - ip_count = db.network_count_available_ips(context.get_admin_context(), - network['id']) - self.assertEqual(ip_count, num_available_ips) - - def run(self, result=None): - if(FLAGS.network_manager == 'nova.network.manager.FlatManager'): - super(FlatNetworkTestCase, self).run(result) diff --git a/nova/tests/test_hosts.py b/nova/tests/test_hosts.py new file mode 100644 index 000000000..548f81f8b --- /dev/null +++ b/nova/tests/test_hosts.py @@ -0,0 +1,102 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import stubout +import webob.exc + +from nova import context +from nova import exception +from nova import flags +from nova import log as logging +from nova import test +from nova.api.openstack.contrib import hosts as os_hosts +from nova.scheduler import api as scheduler_api + + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.hosts') +# Simulate the hosts returned by the zone manager. +HOST_LIST = [ + {"host_name": "host_c1", "service": "compute"}, + {"host_name": "host_c2", "service": "compute"}, + {"host_name": "host_v1", "service": "volume"}, + {"host_name": "host_v2", "service": "volume"}] + + +def stub_get_host_list(req): + return HOST_LIST + + +def stub_set_host_enabled(context, host, enabled): + # We'll simulate success and failure by assuming + # that 'host_c1' always succeeds, and 'host_c2' + # always fails + fail = (host == "host_c2") + status = "enabled" if (enabled ^ fail) else "disabled" + return status + + +class FakeRequest(object): + environ = {"nova.context": context.get_admin_context()} + + +class HostTestCase(test.TestCase): + """Test Case for hosts.""" + + def setUp(self): + super(HostTestCase, self).setUp() + self.controller = os_hosts.HostController() + self.req = FakeRequest() + self.stubs.Set(scheduler_api, 'get_host_list', stub_get_host_list) + self.stubs.Set(self.controller.compute_api, 'set_host_enabled', + stub_set_host_enabled) + + def test_list_hosts(self): + """Verify that the compute hosts are returned.""" + hosts = os_hosts._list_hosts(self.req) + self.assertEqual(hosts, HOST_LIST) + + compute_hosts = os_hosts._list_hosts(self.req, "compute") + expected = [host for host in HOST_LIST + if host["service"] == "compute"] + self.assertEqual(compute_hosts, expected) + + def test_disable_host(self): + dis_body = {"status": "disable"} + result_c1 = self.controller.update(self.req, "host_c1", body=dis_body) + self.assertEqual(result_c1["status"], "disabled") + result_c2 = self.controller.update(self.req, "host_c2", body=dis_body) + self.assertEqual(result_c2["status"], "enabled") + + def test_enable_host(self): + en_body = {"status": "enable"} + result_c1 = self.controller.update(self.req, "host_c1", body=en_body) + self.assertEqual(result_c1["status"], "enabled") + result_c2 = self.controller.update(self.req, "host_c2", body=en_body) + self.assertEqual(result_c2["status"], "disabled") + + def test_bad_status_value(self): + bad_body = {"status": "bad"} + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, + self.req, "host_c1", body=bad_body) + + def test_bad_update_key(self): + bad_body = {"crazy": "bad"} + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, + self.req, "host_c1", body=bad_body) + + def test_bad_host(self): + self.assertRaises(exception.HostNotFound, self.controller.update, + self.req, "bogus_host_name", body={"status": "disable"}) diff --git a/nova/tests/test_instance_types_extra_specs.py b/nova/tests/test_instance_types_extra_specs.py index c26cf82ff..393ed1e36 100644 --- a/nova/tests/test_instance_types_extra_specs.py +++ b/nova/tests/test_instance_types_extra_specs.py @@ -105,8 +105,8 @@ class InstanceTypeExtraSpecsTestCase(test.TestCase): self.instance_type_id) self.assertEquals(expected_specs, actual_specs) - def test_instance_type_get_by_id_with_extra_specs(self): - instance_type = db.api.instance_type_get_by_id( + def test_instance_type_get_with_extra_specs(self): + instance_type = db.api.instance_type_get( context.get_admin_context(), self.instance_type_id) self.assertEquals(instance_type['extra_specs'], @@ -115,7 +115,7 @@ class InstanceTypeExtraSpecsTestCase(test.TestCase): xpu_arch="fermi", xpus="2", xpu_model="Tesla 2050")) - instance_type = db.api.instance_type_get_by_id( + instance_type = db.api.instance_type_get( context.get_admin_context(), 5) self.assertEquals(instance_type['extra_specs'], {}) @@ -136,7 +136,7 @@ class InstanceTypeExtraSpecsTestCase(test.TestCase): "m1.small") self.assertEquals(instance_type['extra_specs'], {}) - def test_instance_type_get_by_id_with_extra_specs(self): + def test_instance_type_get_with_extra_specs(self): instance_type = db.api.instance_type_get_by_flavor_id( context.get_admin_context(), 105) diff --git a/nova/tests/test_iptables_network.py b/nova/tests/test_iptables_network.py new file mode 100644 index 000000000..918034269 --- /dev/null +++ b/nova/tests/test_iptables_network.py @@ -0,0 +1,164 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Unit Tests for network code.""" + +import os + +from nova import test +from nova.network import linux_net + + +class IptablesManagerTestCase(test.TestCase): + sample_filter = ['#Generated by iptables-save on Fri Feb 18 15:17:05 2011', + '*filter', + ':INPUT ACCEPT [2223527:305688874]', + ':FORWARD ACCEPT [0:0]', + ':OUTPUT ACCEPT [2172501:140856656]', + ':nova-compute-FORWARD - [0:0]', + ':nova-compute-INPUT - [0:0]', + ':nova-compute-local - [0:0]', + ':nova-compute-OUTPUT - [0:0]', + ':nova-filter-top - [0:0]', + '-A FORWARD -j nova-filter-top ', + '-A OUTPUT -j nova-filter-top ', + '-A nova-filter-top -j nova-compute-local ', + '-A INPUT -j nova-compute-INPUT ', + '-A OUTPUT -j nova-compute-OUTPUT ', + '-A FORWARD -j nova-compute-FORWARD ', + '-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT ', + '-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT ', + '-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT ', + '-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ', + '-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ', + '-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ', + '-A FORWARD -o virbr0 -j REJECT --reject-with ' + 'icmp-port-unreachable ', + '-A FORWARD -i virbr0 -j REJECT --reject-with ' + 'icmp-port-unreachable ', + 'COMMIT', + '# Completed on Fri Feb 18 15:17:05 2011'] + + sample_nat = ['# Generated by iptables-save on Fri Feb 18 15:17:05 2011', + '*nat', + ':PREROUTING ACCEPT [3936:762355]', + ':INPUT ACCEPT [2447:225266]', + ':OUTPUT ACCEPT [63491:4191863]', + ':POSTROUTING ACCEPT [63112:4108641]', + ':nova-compute-OUTPUT - [0:0]', + ':nova-compute-floating-ip-snat - [0:0]', + ':nova-compute-SNATTING - [0:0]', + ':nova-compute-PREROUTING - [0:0]', + ':nova-compute-POSTROUTING - [0:0]', + ':nova-postrouting-bottom - [0:0]', + '-A PREROUTING -j nova-compute-PREROUTING ', + '-A OUTPUT -j nova-compute-OUTPUT ', + '-A POSTROUTING -j nova-compute-POSTROUTING ', + '-A POSTROUTING -j nova-postrouting-bottom ', + '-A nova-postrouting-bottom -j nova-compute-SNATTING ', + '-A nova-compute-SNATTING -j nova-compute-floating-ip-snat ', + 'COMMIT', + '# Completed on Fri Feb 18 15:17:05 2011'] + + def setUp(self): + super(IptablesManagerTestCase, self).setUp() + self.manager = linux_net.IptablesManager() + + def test_filter_rules_are_wrapped(self): + current_lines = self.sample_filter + + table = self.manager.ipv4['filter'] + table.add_rule('FORWARD', '-s 1.2.3.4/5 -j DROP') + new_lines = self.manager._modify_rules(current_lines, table) + self.assertTrue('-A run_tests.py-FORWARD ' + '-s 1.2.3.4/5 -j DROP' in new_lines) + + table.remove_rule('FORWARD', '-s 1.2.3.4/5 -j DROP') + new_lines = self.manager._modify_rules(current_lines, table) + self.assertTrue('-A run_tests.py-FORWARD ' + '-s 1.2.3.4/5 -j DROP' not in new_lines) + + def test_nat_rules(self): + current_lines = self.sample_nat + new_lines = self.manager._modify_rules(current_lines, + self.manager.ipv4['nat']) + + for line in [':nova-compute-OUTPUT - [0:0]', + ':nova-compute-floating-ip-snat - [0:0]', + ':nova-compute-SNATTING - [0:0]', + ':nova-compute-PREROUTING - [0:0]', + ':nova-compute-POSTROUTING - [0:0]']: + self.assertTrue(line in new_lines, "One of nova-compute's chains " + "went missing.") + + seen_lines = set() + for line in new_lines: + line = line.strip() + self.assertTrue(line not in seen_lines, + "Duplicate line: %s" % line) + seen_lines.add(line) + + last_postrouting_line = '' + + for line in new_lines: + if line.startswith('-A POSTROUTING'): + last_postrouting_line = line + + self.assertTrue('-j nova-postrouting-bottom' in last_postrouting_line, + "Last POSTROUTING rule does not jump to " + "nova-postouting-bottom: %s" % last_postrouting_line) + + for chain in ['POSTROUTING', 'PREROUTING', 'OUTPUT']: + self.assertTrue('-A %s -j run_tests.py-%s' \ + % (chain, chain) in new_lines, + "Built-in chain %s not wrapped" % (chain,)) + + def test_filter_rules(self): + current_lines = self.sample_filter + new_lines = self.manager._modify_rules(current_lines, + self.manager.ipv4['filter']) + + for line in [':nova-compute-FORWARD - [0:0]', + ':nova-compute-INPUT - [0:0]', + ':nova-compute-local - [0:0]', + ':nova-compute-OUTPUT - [0:0]']: + self.assertTrue(line in new_lines, "One of nova-compute's chains" + " went missing.") + + seen_lines = set() + for line in new_lines: + line = line.strip() + self.assertTrue(line not in seen_lines, + "Duplicate line: %s" % line) + seen_lines.add(line) + + for chain in ['FORWARD', 'OUTPUT']: + for line in new_lines: + if line.startswith('-A %s' % chain): + self.assertTrue('-j nova-filter-top' in line, + "First %s rule does not " + "jump to nova-filter-top" % chain) + break + + self.assertTrue('-A nova-filter-top ' + '-j run_tests.py-local' in new_lines, + "nova-filter-top does not jump to wrapped local chain") + + for chain in ['INPUT', 'OUTPUT', 'FORWARD']: + self.assertTrue('-A %s -j run_tests.py-%s' \ + % (chain, chain) in new_lines, + "Built-in chain %s not wrapped" % (chain,)) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index d12e21063..ad0931a89 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -54,12 +54,17 @@ def _create_network_info(count=1, ipv6=None): fake_ip = '0.0.0.0/0' fake_ip_2 = '0.0.0.1/0' fake_ip_3 = '0.0.0.1/0' - network = {'gateway': fake, - 'gateway_v6': fake, - 'bridge': fake, + fake_vlan = 100 + fake_bridge_interface = 'eth0' + network = {'bridge': fake, 'cidr': fake_ip, - 'cidr_v6': fake_ip} + 'cidr_v6': fake_ip, + 'vlan': fake_vlan, + 'bridge_interface': fake_bridge_interface} mapping = {'mac': fake, + 'dhcp_server': fake, + 'gateway': fake, + 'gateway6': fake, 'ips': [{'ip': fake_ip}, {'ip': fake_ip}]} if ipv6: mapping['ip6s'] = [{'ip': fake_ip}, @@ -68,6 +73,24 @@ def _create_network_info(count=1, ipv6=None): return [(network, mapping) for x in xrange(0, count)] +def _setup_networking(instance_id, ip='1.2.3.4'): + ctxt = context.get_admin_context() + network_ref = db.project_get_networks(ctxt, + 'fake', + associate=True)[0] + vif = {'address': '56:12:12:12:12:12', + 'network_id': network_ref['id'], + 'instance_id': instance_id} + vif_ref = db.virtual_interface_create(ctxt, vif) + + fixed_ip = {'address': ip, + 'network_id': network_ref['id'], + 'virtual_interface_id': vif_ref['id']} + db.fixed_ip_create(ctxt, fixed_ip) + db.fixed_ip_update(ctxt, ip, {'allocated': True, + 'instance_id': instance_id}) + + class CacheConcurrencyTestCase(test.TestCase): def setUp(self): super(CacheConcurrencyTestCase, self).setUp() @@ -155,11 +178,15 @@ class LibvirtConnTestCase(test.TestCase): FLAGS.instances_path = '' self.call_libvirt_dependant_setup = False + def tearDown(self): + self.manager.delete_project(self.project) + self.manager.delete_user(self.user) + super(LibvirtConnTestCase, self).tearDown() + test_ip = '10.11.12.13' test_instance = {'memory_kb': '1024000', 'basepath': '/some/path', 'bridge_name': 'br100', - 'mac_address': '02:12:34:46:56:67', 'vcpus': 2, 'project_id': 'fake', 'bridge': 'br101', @@ -196,9 +223,19 @@ class LibvirtConnTestCase(test.TestCase): def setattr(self, key, val): self.__setattr__(key, val) + # A fake VIF driver + class FakeVIFDriver(object): + + def __init__(self, **kwargs): + pass + + def setattr(self, key, val): + self.__setattr__(key, val) + # Creating mocks fake = FakeLibvirtConnection() fakeip = FakeIptablesFirewallDriver + fakevif = FakeVIFDriver() # Customizing above fake if necessary for key, val in kwargs.items(): fake.__setattr__(key, val) @@ -206,6 +243,8 @@ class LibvirtConnTestCase(test.TestCase): # Inevitable mocks for connection.LibvirtConnection self.mox.StubOutWithMock(connection.utils, 'import_class') connection.utils.import_class(mox.IgnoreArg()).AndReturn(fakeip) + self.mox.StubOutWithMock(connection.utils, 'import_object') + connection.utils.import_object(mox.IgnoreArg()).AndReturn(fakevif) self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn') connection.LibvirtConnection._conn = fake @@ -241,6 +280,7 @@ class LibvirtConnTestCase(test.TestCase): return db.service_create(context.get_admin_context(), service_ref) + @test.skip_test("Please review this test to ensure intent") def test_preparing_xml_info(self): conn = connection.LibvirtConnection(True) instance_ref = db.instance_create(self.context, self.test_instance) @@ -256,39 +296,27 @@ class LibvirtConnTestCase(test.TestCase): _create_network_info(2)) self.assertTrue(len(result['nics']) == 2) - def test_get_nic_for_xml_v4(self): - conn = connection.LibvirtConnection(True) - network, mapping = _create_network_info()[0] - self.flags(use_ipv6=False) - params = conn._get_nic_for_xml(network, mapping)['extra_params'] - self.assertTrue(params.find('PROJNETV6') == -1) - self.assertTrue(params.find('PROJMASKV6') == -1) - - def test_get_nic_for_xml_v6(self): - conn = connection.LibvirtConnection(True) - network, mapping = _create_network_info()[0] - self.flags(use_ipv6=True) - params = conn._get_nic_for_xml(network, mapping)['extra_params'] - self.assertTrue(params.find('PROJNETV6') > -1) - self.assertTrue(params.find('PROJMASKV6') > -1) - + @test.skip_test("skipping libvirt tests depends on get_network_info shim") def test_xml_and_uri_no_ramdisk_no_kernel(self): instance_data = dict(self.test_instance) self._check_xml_and_uri(instance_data, expect_kernel=False, expect_ramdisk=False) + @test.skip_test("skipping libvirt tests depends on get_network_info shim") def test_xml_and_uri_no_ramdisk(self): instance_data = dict(self.test_instance) instance_data['kernel_id'] = 'aki-deadbeef' self._check_xml_and_uri(instance_data, expect_kernel=True, expect_ramdisk=False) + @test.skip_test("skipping libvirt tests depends on get_network_info shim") def test_xml_and_uri_no_kernel(self): instance_data = dict(self.test_instance) instance_data['ramdisk_id'] = 'ari-deadbeef' self._check_xml_and_uri(instance_data, expect_kernel=False, expect_ramdisk=False) + @test.skip_test("skipping libvirt tests depends on get_network_info shim") def test_xml_and_uri(self): instance_data = dict(self.test_instance) instance_data['ramdisk_id'] = 'ari-deadbeef' @@ -296,6 +324,7 @@ class LibvirtConnTestCase(test.TestCase): self._check_xml_and_uri(instance_data, expect_kernel=True, expect_ramdisk=True) + @test.skip_test("skipping libvirt tests depends on get_network_info shim") def test_xml_and_uri_rescue(self): instance_data = dict(self.test_instance) instance_data['ramdisk_id'] = 'ari-deadbeef' @@ -303,6 +332,7 @@ class LibvirtConnTestCase(test.TestCase): self._check_xml_and_uri(instance_data, expect_kernel=True, expect_ramdisk=True, rescue=True) + @test.skip_test("skipping libvirt tests depends on get_network_info shim") def test_lxc_container_and_uri(self): instance_data = dict(self.test_instance) self._check_xml_and_container(instance_data) @@ -402,12 +432,18 @@ class LibvirtConnTestCase(test.TestCase): user_context = context.RequestContext(project=self.project, user=self.user) instance_ref = db.instance_create(user_context, instance) - host = self.network.get_network_host(user_context.elevated()) - network_ref = db.project_get_network(context.get_admin_context(), - self.project.id) - + # Re-get the instance so it's bound to an actual session + instance_ref = db.instance_get(user_context, instance_ref['id']) + network_ref = db.project_get_networks(context.get_admin_context(), + self.project.id)[0] + + vif = {'address': '56:12:12:12:12:12', + 'network_id': network_ref['id'], + 'instance_id': instance_ref['id']} + vif_ref = db.virtual_interface_create(self.context, vif) fixed_ip = {'address': self.test_ip, - 'network_id': network_ref['id']} + 'network_id': network_ref['id'], + 'virtual_interface_id': vif_ref['id']} ctxt = context.get_admin_context() fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip) @@ -442,18 +478,10 @@ class LibvirtConnTestCase(test.TestCase): user_context = context.RequestContext(project=self.project, user=self.user) instance_ref = db.instance_create(user_context, instance) - host = self.network.get_network_host(user_context.elevated()) - network_ref = db.project_get_network(context.get_admin_context(), - self.project.id) - - fixed_ip = {'address': self.test_ip, - 'network_id': network_ref['id']} + network_ref = db.project_get_networks(context.get_admin_context(), + self.project.id)[0] - ctxt = context.get_admin_context() - fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip) - db.fixed_ip_update(ctxt, self.test_ip, - {'allocated': True, - 'instance_id': instance_ref['id']}) + _setup_networking(instance_ref['id'], ip=self.test_ip) type_uri_map = {'qemu': ('qemu:///system', [(lambda t: t.find('.').get('type'), 'qemu'), @@ -694,6 +722,9 @@ class LibvirtConnTestCase(test.TestCase): return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) + self.mox.StubOutWithMock(self.compute, "recover_live_migration") + self.compute.recover_live_migration(self.context, instance_ref, + dest='dest') # Start test self.mox.ReplayAll() @@ -712,6 +743,7 @@ class LibvirtConnTestCase(test.TestCase): db.volume_destroy(self.context, volume_ref['id']) db.instance_destroy(self.context, instance_ref['id']) + @test.skip_test("test needs rewrite: instance no longer has mac_address") def test_spawn_with_network_info(self): # Skip if non-libvirt environment if not self.lazy_load_library_exists(): @@ -730,8 +762,8 @@ class LibvirtConnTestCase(test.TestCase): conn.firewall_driver.setattr('setup_basic_filtering', fake_none) conn.firewall_driver.setattr('prepare_instance_filter', fake_none) - network = db.project_get_network(context.get_admin_context(), - self.project.id) + network = db.project_get_networks(context.get_admin_context(), + self.project.id)[0] ip_dict = {'ip': self.test_ip, 'netmask': network['netmask'], 'enabled': '1'} @@ -756,11 +788,6 @@ class LibvirtConnTestCase(test.TestCase): ip = conn.get_host_ip_addr() self.assertEquals(ip, FLAGS.my_ip) - def tearDown(self): - self.manager.delete_project(self.project) - self.manager.delete_user(self.user) - super(LibvirtConnTestCase, self).tearDown() - class NWFilterFakes: def __init__(self): @@ -866,19 +893,24 @@ class IptablesFirewallTestCase(test.TestCase): return db.instance_create(self.context, {'user_id': 'fake', 'project_id': 'fake', - 'mac_address': '56:12:12:12:12:12', 'instance_type_id': 1}) + @test.skip_test("skipping libvirt tests depends on get_network_info shim") def test_static_filters(self): instance_ref = self._create_instance_ref() ip = '10.11.12.13' - network_ref = db.project_get_network(self.context, - 'fake') + network_ref = db.project_get_networks(self.context, + 'fake', + associate=True)[0] + vif = {'address': '56:12:12:12:12:12', + 'network_id': network_ref['id'], + 'instance_id': instance_ref['id']} + vif_ref = db.virtual_interface_create(self.context, vif) fixed_ip = {'address': ip, - 'network_id': network_ref['id']} - + 'network_id': network_ref['id'], + 'virtual_interface_id': vif_ref['id']} admin_ctxt = context.get_admin_context() db.fixed_ip_create(admin_ctxt, fixed_ip) db.fixed_ip_update(admin_ctxt, ip, {'allocated': True, @@ -1015,6 +1047,7 @@ class IptablesFirewallTestCase(test.TestCase): self.assertEquals(ipv6_network_rules, ipv6_rules_per_network * networks_count) + @test.skip_test("skipping libvirt tests") def test_do_refresh_security_group_rules(self): instance_ref = self._create_instance_ref() self.mox.StubOutWithMock(self.fw, @@ -1025,6 +1058,7 @@ class IptablesFirewallTestCase(test.TestCase): self.mox.ReplayAll() self.fw.do_refresh_security_group_rules("fake") + @test.skip_test("skip libvirt test project_get_network no longer exists") def test_unfilter_instance_undefines_nwfilter(self): # Skip if non-libvirt environment if not self.lazy_load_library_exists(): @@ -1058,6 +1092,7 @@ class IptablesFirewallTestCase(test.TestCase): db.instance_destroy(admin_ctxt, instance_ref['id']) + @test.skip_test("skip libvirt test project_get_network no longer exists") def test_provider_firewall_rules(self): # setup basic instance data instance_ref = self._create_instance_ref() @@ -1207,7 +1242,6 @@ class NWFilterTestCase(test.TestCase): return db.instance_create(self.context, {'user_id': 'fake', 'project_id': 'fake', - 'mac_address': '00:A0:C9:14:C8:29', 'instance_type_id': 1}) def _create_instance_type(self, params={}): @@ -1225,6 +1259,7 @@ class NWFilterTestCase(test.TestCase): inst.update(params) return db.instance_type_create(context, inst)['id'] + @test.skip_test('Skipping this test') def test_creates_base_rule_first(self): # These come pre-defined by libvirt self.defined_filters = ['no-mac-spoofing', @@ -1258,13 +1293,15 @@ class NWFilterTestCase(test.TestCase): ip = '10.11.12.13' - network_ref = db.project_get_network(self.context, 'fake') - fixed_ip = {'address': ip, 'network_id': network_ref['id']} + #network_ref = db.project_get_networks(self.context, 'fake')[0] + #fixed_ip = {'address': ip, 'network_id': network_ref['id']} - admin_ctxt = context.get_admin_context() - db.fixed_ip_create(admin_ctxt, fixed_ip) - db.fixed_ip_update(admin_ctxt, ip, {'allocated': True, - 'instance_id': inst_id}) + #admin_ctxt = context.get_admin_context() + #db.fixed_ip_create(admin_ctxt, fixed_ip) + #db.fixed_ip_update(admin_ctxt, ip, {'allocated': True, + # 'instance_id': inst_id}) + + self._setup_networking(instance_ref['id'], ip=ip) def _ensure_all_called(): instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'], @@ -1299,6 +1336,7 @@ class NWFilterTestCase(test.TestCase): "fake") self.assertEquals(len(result), 3) + @test.skip_test("skip libvirt test project_get_network no longer exists") def test_unfilter_instance_undefines_nwfilters(self): admin_ctxt = context.get_admin_context() diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py new file mode 100644 index 000000000..c862726ab --- /dev/null +++ b/nova/tests/test_metadata.py @@ -0,0 +1,76 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Tests for the testing the metadata code.""" + +import base64 +import httplib + +import webob + +from nova import test +from nova import wsgi +from nova.api.ec2 import metadatarequesthandler +from nova.db.sqlalchemy import api + + +class MetadataTestCase(test.TestCase): + """Test that metadata is returning proper values.""" + + def setUp(self): + super(MetadataTestCase, self).setUp() + self.instance = ({'id': 1, + 'project_id': 'test', + 'key_name': None, + 'host': 'test', + 'launch_index': 1, + 'instance_type': 'm1.tiny', + 'reservation_id': 'r-xxxxxxxx', + 'user_data': '', + 'image_ref': 7, + 'hostname': 'test'}) + + def instance_get(*args, **kwargs): + return self.instance + + def floating_get(*args, **kwargs): + return '99.99.99.99' + + self.stubs.Set(api, 'instance_get', instance_get) + self.stubs.Set(api, 'fixed_ip_get_instance', instance_get) + self.stubs.Set(api, 'instance_get_floating_address', floating_get) + self.app = metadatarequesthandler.MetadataRequestHandler() + + def request(self, relative_url): + request = webob.Request.blank(relative_url) + request.remote_addr = "127.0.0.1" + return request.get_response(self.app).body + + def test_base(self): + self.assertEqual(self.request('/'), 'meta-data/\nuser-data') + + def test_user_data(self): + self.instance['user_data'] = base64.b64encode('happy') + self.assertEqual(self.request('/user-data'), 'happy') + + def test_security_groups(self): + def sg_get(*args, **kwargs): + return [{'name': 'default'}, {'name': 'other'}] + self.stubs.Set(api, 'security_group_get_by_instance', sg_get) + self.assertEqual(self.request('/meta-data/security-groups'), + 'default\nother') diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index 9327c7129..28f50d328 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -1,196 +1,269 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Rackspace # All Rights Reserved. # -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Unit Tests for network code -""" -import netaddr -import os +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from nova import db +from nova import exception +from nova import flags +from nova import log as logging from nova import test -from nova.network import linux_net - - -class IptablesManagerTestCase(test.TestCase): - sample_filter = ['#Generated by iptables-save on Fri Feb 18 15:17:05 2011', - '*filter', - ':INPUT ACCEPT [2223527:305688874]', - ':FORWARD ACCEPT [0:0]', - ':OUTPUT ACCEPT [2172501:140856656]', - ':nova-compute-FORWARD - [0:0]', - ':nova-compute-INPUT - [0:0]', - ':nova-compute-local - [0:0]', - ':nova-compute-OUTPUT - [0:0]', - ':nova-filter-top - [0:0]', - '-A FORWARD -j nova-filter-top ', - '-A OUTPUT -j nova-filter-top ', - '-A nova-filter-top -j nova-compute-local ', - '-A INPUT -j nova-compute-INPUT ', - '-A OUTPUT -j nova-compute-OUTPUT ', - '-A FORWARD -j nova-compute-FORWARD ', - '-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT ', - '-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT ', - '-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT ', - '-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ', - '-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ', - '-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ', - '-A FORWARD -o virbr0 -j REJECT --reject-with ' - 'icmp-port-unreachable ', - '-A FORWARD -i virbr0 -j REJECT --reject-with ' - 'icmp-port-unreachable ', - 'COMMIT', - '# Completed on Fri Feb 18 15:17:05 2011'] - - sample_nat = ['# Generated by iptables-save on Fri Feb 18 15:17:05 2011', - '*nat', - ':PREROUTING ACCEPT [3936:762355]', - ':INPUT ACCEPT [2447:225266]', - ':OUTPUT ACCEPT [63491:4191863]', - ':POSTROUTING ACCEPT [63112:4108641]', - ':nova-compute-OUTPUT - [0:0]', - ':nova-compute-floating-ip-snat - [0:0]', - ':nova-compute-SNATTING - [0:0]', - ':nova-compute-PREROUTING - [0:0]', - ':nova-compute-POSTROUTING - [0:0]', - ':nova-postrouting-bottom - [0:0]', - '-A PREROUTING -j nova-compute-PREROUTING ', - '-A OUTPUT -j nova-compute-OUTPUT ', - '-A POSTROUTING -j nova-compute-POSTROUTING ', - '-A POSTROUTING -j nova-postrouting-bottom ', - '-A nova-postrouting-bottom -j nova-compute-SNATTING ', - '-A nova-compute-SNATTING -j nova-compute-floating-ip-snat ', - 'COMMIT', - '# Completed on Fri Feb 18 15:17:05 2011'] +from nova.network import manager as network_manager + +import mox + + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.network') + + +HOST = "testhost" + + +class FakeModel(dict): + """Represent a model from the db""" + def __init__(self, *args, **kwargs): + self.update(kwargs) + + def __getattr__(self, name): + return self[name] + + +networks = [{'id': 0, + 'label': 'test0', + 'injected': False, + 'multi_host': False, + 'cidr': '192.168.0.0/24', + 'cidr_v6': '2001:db8::/64', + 'gateway_v6': '2001:db8::1', + 'netmask_v6': '64', + 'netmask': '255.255.255.0', + 'bridge': 'fa0', + 'bridge_interface': 'fake_fa0', + 'gateway': '192.168.0.1', + 'broadcast': '192.168.0.255', + 'dns1': '192.168.0.1', + 'dns2': '192.168.0.2', + 'vlan': None, + 'host': None, + 'project_id': 'fake_project', + 'vpn_public_address': '192.168.0.2'}, + {'id': 1, + 'label': 'test1', + 'injected': False, + 'multi_host': False, + 'cidr': '192.168.1.0/24', + 'cidr_v6': '2001:db9::/64', + 'gateway_v6': '2001:db9::1', + 'netmask_v6': '64', + 'netmask': '255.255.255.0', + 'bridge': 'fa1', + 'bridge_interface': 'fake_fa1', + 'gateway': '192.168.1.1', + 'broadcast': '192.168.1.255', + 'dns1': '192.168.0.1', + 'dns2': '192.168.0.2', + 'vlan': None, + 'host': None, + 'project_id': 'fake_project', + 'vpn_public_address': '192.168.1.2'}] + + +fixed_ips = [{'id': 0, + 'network_id': 0, + 'address': '192.168.0.100', + 'instance_id': 0, + 'allocated': False, + 'virtual_interface_id': 0, + 'floating_ips': []}, + {'id': 0, + 'network_id': 1, + 'address': '192.168.1.100', + 'instance_id': 0, + 'allocated': False, + 'virtual_interface_id': 0, + 'floating_ips': []}] + + +flavor = {'id': 0, + 'rxtx_cap': 3} + + +floating_ip_fields = {'id': 0, + 'address': '192.168.10.100', + 'fixed_ip_id': 0, + 'project_id': None, + 'auto_assigned': False} + +vifs = [{'id': 0, + 'address': 'DE:AD:BE:EF:00:00', + 'network_id': 0, + 'network': FakeModel(**networks[0]), + 'instance_id': 0}, + {'id': 1, + 'address': 'DE:AD:BE:EF:00:01', + 'network_id': 1, + 'network': FakeModel(**networks[1]), + 'instance_id': 0}] + + +class FlatNetworkTestCase(test.TestCase): + def setUp(self): + super(FlatNetworkTestCase, self).setUp() + self.network = network_manager.FlatManager(host=HOST) + self.network.db = db + + def test_get_instance_nw_info(self): + self.mox.StubOutWithMock(db, 'fixed_ip_get_by_instance') + self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance') + self.mox.StubOutWithMock(db, 'instance_type_get') + + db.fixed_ip_get_by_instance(mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn(fixed_ips) + db.virtual_interface_get_by_instance(mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn(vifs) + db.instance_type_get(mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn(flavor) + self.mox.ReplayAll() + + nw_info = self.network.get_instance_nw_info(None, 0, 0, None) + + self.assertTrue(nw_info) + + for i, nw in enumerate(nw_info): + i8 = i + 8 + check = {'bridge': 'fa%s' % i, + 'cidr': '192.168.%s.0/24' % i, + 'cidr_v6': '2001:db%s::/64' % i8, + 'id': i, + 'multi_host': False, + 'injected': 'DONTCARE', + 'bridge_interface': 'fake_fa%s' % i, + 'vlan': None} + + self.assertDictMatch(nw[0], check) + + check = {'broadcast': '192.168.%s.255' % i, + 'dhcp_server': '192.168.%s.1' % i, + 'dns': 'DONTCARE', + 'gateway': '192.168.%s.1' % i, + 'gateway6': '2001:db%s::1' % i8, + 'ip6s': 'DONTCARE', + 'ips': 'DONTCARE', + 'label': 'test%s' % i, + 'mac': 'DE:AD:BE:EF:00:0%s' % i, + 'rxtx_cap': 'DONTCARE', + 'should_create_vlan': False, + 'should_create_bridge': False} + self.assertDictMatch(nw[1], check) + + check = [{'enabled': 'DONTCARE', + 'ip': '2001:db%s::dcad:beff:feef:%s' % (i8, i), + 'netmask': '64'}] + self.assertDictListMatch(nw[1]['ip6s'], check) + + check = [{'enabled': '1', + 'ip': '192.168.%s.100' % i, + 'netmask': '255.255.255.0'}] + self.assertDictListMatch(nw[1]['ips'], check) + + +class VlanNetworkTestCase(test.TestCase): def setUp(self): - super(IptablesManagerTestCase, self).setUp() - self.manager = linux_net.IptablesManager() - - def test_filter_rules_are_wrapped(self): - current_lines = self.sample_filter - - table = self.manager.ipv4['filter'] - table.add_rule('FORWARD', '-s 1.2.3.4/5 -j DROP') - new_lines = self.manager._modify_rules(current_lines, table) - self.assertTrue('-A run_tests.py-FORWARD ' - '-s 1.2.3.4/5 -j DROP' in new_lines) - - table.remove_rule('FORWARD', '-s 1.2.3.4/5 -j DROP') - new_lines = self.manager._modify_rules(current_lines, table) - self.assertTrue('-A run_tests.py-FORWARD ' - '-s 1.2.3.4/5 -j DROP' not in new_lines) - - def test_nat_rules(self): - current_lines = self.sample_nat - new_lines = self.manager._modify_rules(current_lines, - self.manager.ipv4['nat']) - - for line in [':nova-compute-OUTPUT - [0:0]', - ':nova-compute-floating-ip-snat - [0:0]', - ':nova-compute-SNATTING - [0:0]', - ':nova-compute-PREROUTING - [0:0]', - ':nova-compute-POSTROUTING - [0:0]']: - self.assertTrue(line in new_lines, "One of nova-compute's chains " - "went missing.") - - seen_lines = set() - for line in new_lines: - line = line.strip() - self.assertTrue(line not in seen_lines, - "Duplicate line: %s" % line) - seen_lines.add(line) - - last_postrouting_line = '' - - for line in new_lines: - if line.startswith('-A POSTROUTING'): - last_postrouting_line = line - - self.assertTrue('-j nova-postrouting-bottom' in last_postrouting_line, - "Last POSTROUTING rule does not jump to " - "nova-postouting-bottom: %s" % last_postrouting_line) - - for chain in ['POSTROUTING', 'PREROUTING', 'OUTPUT']: - self.assertTrue('-A %s -j run_tests.py-%s' \ - % (chain, chain) in new_lines, - "Built-in chain %s not wrapped" % (chain,)) - - def test_filter_rules(self): - current_lines = self.sample_filter - new_lines = self.manager._modify_rules(current_lines, - self.manager.ipv4['filter']) - - for line in [':nova-compute-FORWARD - [0:0]', - ':nova-compute-INPUT - [0:0]', - ':nova-compute-local - [0:0]', - ':nova-compute-OUTPUT - [0:0]']: - self.assertTrue(line in new_lines, "One of nova-compute's chains" - " went missing.") - - seen_lines = set() - for line in new_lines: - line = line.strip() - self.assertTrue(line not in seen_lines, - "Duplicate line: %s" % line) - seen_lines.add(line) - - for chain in ['FORWARD', 'OUTPUT']: - for line in new_lines: - if line.startswith('-A %s' % chain): - self.assertTrue('-j nova-filter-top' in line, - "First %s rule does not " - "jump to nova-filter-top" % chain) - break - - self.assertTrue('-A nova-filter-top ' - '-j run_tests.py-local' in new_lines, - "nova-filter-top does not jump to wrapped local chain") - - for chain in ['INPUT', 'OUTPUT', 'FORWARD']: - self.assertTrue('-A %s -j run_tests.py-%s' \ - % (chain, chain) in new_lines, - "Built-in chain %s not wrapped" % (chain,)) - - def test_will_empty_chain(self): - self.manager.ipv4['filter'].add_chain('test-chain') - self.manager.ipv4['filter'].add_rule('test-chain', '-j DROP') - old_count = len(self.manager.ipv4['filter'].rules) - self.manager.ipv4['filter'].empty_chain('test-chain') - self.assertEqual(old_count - 1, len(self.manager.ipv4['filter'].rules)) - - def test_will_empty_unwrapped_chain(self): - self.manager.ipv4['filter'].add_chain('test-chain', wrap=False) - self.manager.ipv4['filter'].add_rule('test-chain', '-j DROP', - wrap=False) - old_count = len(self.manager.ipv4['filter'].rules) - self.manager.ipv4['filter'].empty_chain('test-chain', wrap=False) - self.assertEqual(old_count - 1, len(self.manager.ipv4['filter'].rules)) - - def test_will_not_empty_wrapped_when_unwrapped(self): - self.manager.ipv4['filter'].add_chain('test-chain') - self.manager.ipv4['filter'].add_rule('test-chain', '-j DROP') - old_count = len(self.manager.ipv4['filter'].rules) - self.manager.ipv4['filter'].empty_chain('test-chain', wrap=False) - self.assertEqual(old_count, len(self.manager.ipv4['filter'].rules)) - - def test_will_not_empty_unwrapped_when_wrapped(self): - self.manager.ipv4['filter'].add_chain('test-chain', wrap=False) - self.manager.ipv4['filter'].add_rule('test-chain', '-j DROP', - wrap=False) - old_count = len(self.manager.ipv4['filter'].rules) - self.manager.ipv4['filter'].empty_chain('test-chain') - self.assertEqual(old_count, len(self.manager.ipv4['filter'].rules)) + super(VlanNetworkTestCase, self).setUp() + self.network = network_manager.VlanManager(host=HOST) + self.network.db = db + + def test_vpn_allocate_fixed_ip(self): + self.mox.StubOutWithMock(db, 'fixed_ip_associate') + self.mox.StubOutWithMock(db, 'fixed_ip_update') + self.mox.StubOutWithMock(db, + 'virtual_interface_get_by_instance_and_network') + + db.fixed_ip_associate(mox.IgnoreArg(), + mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn('192.168.0.1') + db.fixed_ip_update(mox.IgnoreArg(), + mox.IgnoreArg(), + mox.IgnoreArg()) + db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(), + mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0}) + self.mox.ReplayAll() + + network = dict(networks[0]) + network['vpn_private_address'] = '192.168.0.2' + self.network.allocate_fixed_ip(None, 0, network, vpn=True) + + def test_allocate_fixed_ip(self): + self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool') + self.mox.StubOutWithMock(db, 'fixed_ip_update') + self.mox.StubOutWithMock(db, + 'virtual_interface_get_by_instance_and_network') + + db.fixed_ip_associate_pool(mox.IgnoreArg(), + mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn('192.168.0.1') + db.fixed_ip_update(mox.IgnoreArg(), + mox.IgnoreArg(), + mox.IgnoreArg()) + db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(), + mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0}) + self.mox.ReplayAll() + + network = dict(networks[0]) + network['vpn_private_address'] = '192.168.0.2' + self.network.allocate_fixed_ip(None, 0, network) + + def test_create_networks_too_big(self): + self.assertRaises(ValueError, self.network.create_networks, None, + num_networks=4094, vlan_start=1) + + def test_create_networks_too_many(self): + self.assertRaises(ValueError, self.network.create_networks, None, + num_networks=100, vlan_start=1, + cidr='192.168.0.1/24', network_size=100) + + +class CommonNetworkTestCase(test.TestCase): + + class FakeNetworkManager(network_manager.NetworkManager): + """This NetworkManager doesn't call the base class so we can bypass all + inherited service cruft and just perform unit tests. + """ + + class FakeDB: + def fixed_ip_get_by_instance(self, context, instance_id): + return [dict(address='10.0.0.0'), dict(address='10.0.0.1'), + dict(address='10.0.0.2')] + + def __init__(self): + self.db = self.FakeDB() + self.deallocate_called = None + + def deallocate_fixed_ip(self, context, address): + self.deallocate_called = address + + def test_remove_fixed_ip_from_instance(self): + manager = self.FakeNetworkManager() + manager.remove_fixed_ip_from_instance(None, 99, '10.0.0.1') + + self.assertEquals(manager.deallocate_called, '10.0.0.1') + + def test_remove_fixed_ip_from_instance_bad_input(self): + manager = self.FakeNetworkManager() + self.assertRaises(exception.FixedIpNotFoundForSpecificInstance, + manager.remove_fixed_ip_from_instance, + None, 99, 'bad input') diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py index 0691231e4..69d2deafe 100644 --- a/nova/tests/test_quota.py +++ b/nova/tests/test_quota.py @@ -51,7 +51,7 @@ class QuotaTestCase(test.TestCase): self.manager = manager.AuthManager() self.user = self.manager.create_user('admin', 'admin', 'admin', True) self.project = self.manager.create_project('admin', 'admin', 'admin') - self.network = utils.import_object(FLAGS.network_manager) + self.network = self.network = self.start_service('network') self.context = context.RequestContext(project=self.project, user=self.user) @@ -69,7 +69,6 @@ class QuotaTestCase(test.TestCase): inst['project_id'] = self.project.id inst['instance_type_id'] = '3' # m1.large inst['vcpus'] = cores - inst['mac_address'] = utils.generate_mac() return db.instance_create(self.context, inst)['id'] def _create_volume(self, size=10): @@ -270,19 +269,16 @@ class QuotaTestCase(test.TestCase): for volume_id in volume_ids: db.volume_destroy(self.context, volume_id) + @test.skip_test def test_too_many_addresses(self): address = '192.168.0.100' db.floating_ip_create(context.get_admin_context(), - {'address': address, 'host': FLAGS.host}) - float_addr = self.network.allocate_floating_ip(self.context, - self.project.id) - # NOTE(vish): This assert never fails. When cloud attempts to - # make an rpc.call, the test just finishes with OK. It - # appears to be something in the magic inline callbacks - # that is breaking. + {'address': address, 'host': FLAGS.host, + 'project_id': self.project.id}) self.assertRaises(quota.QuotaError, - network.API().allocate_floating_ip, - self.context) + self.network.allocate_floating_ip, + self.context, + self.project.id) db.floating_ip_destroy(context.get_admin_context(), address) def test_too_many_metadata_items(self): diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py index 3a3f914e4..0c359e981 100644 --- a/nova/tests/test_utils.py +++ b/nova/tests/test_utils.py @@ -276,6 +276,19 @@ class GenericUtilsTestCase(test.TestCase): result = utils.parse_server_string('www.exa:mple.com:8443') self.assertEqual(('', ''), result) + def test_bool_from_str(self): + self.assertTrue(utils.bool_from_str('1')) + self.assertTrue(utils.bool_from_str('2')) + self.assertTrue(utils.bool_from_str('-1')) + self.assertTrue(utils.bool_from_str('true')) + self.assertTrue(utils.bool_from_str('True')) + self.assertTrue(utils.bool_from_str('tRuE')) + self.assertFalse(utils.bool_from_str('False')) + self.assertFalse(utils.bool_from_str('false')) + self.assertFalse(utils.bool_from_str('0')) + self.assertFalse(utils.bool_from_str(None)) + self.assertFalse(utils.bool_from_str('junk')) + class IsUUIDLikeTestCase(test.TestCase): def assertUUIDLike(self, val, expected): diff --git a/nova/tests/test_vlan_network.py b/nova/tests/test_vlan_network.py deleted file mode 100644 index a1c8ab11c..000000000 --- a/nova/tests/test_vlan_network.py +++ /dev/null @@ -1,242 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Unit Tests for vlan network code -""" -import netaddr -import os - -from nova import context -from nova import db -from nova import exception -from nova import flags -from nova import log as logging -from nova import test -from nova import utils -from nova.auth import manager -from nova.tests.network import base -from nova.tests.network import binpath,\ - lease_ip, release_ip - -FLAGS = flags.FLAGS -LOG = logging.getLogger('nova.tests.network') - - -class VlanNetworkTestCase(base.NetworkTestCase): - """Test cases for network code""" - def test_public_network_association(self): - """Makes sure that we can allocaate a public ip""" - # TODO(vish): better way of adding floating ips - self.context._project = self.projects[0] - self.context.project_id = self.projects[0].id - pubnet = netaddr.IPNetwork(flags.FLAGS.floating_range) - address = str(list(pubnet)[0]) - try: - db.floating_ip_get_by_address(context.get_admin_context(), address) - except exception.NotFound: - db.floating_ip_create(context.get_admin_context(), - {'address': address, - 'host': FLAGS.host}) - float_addr = self.network.allocate_floating_ip(self.context, - self.projects[0].id) - fix_addr = self._create_address(0) - lease_ip(fix_addr) - self.assertEqual(float_addr, str(pubnet[0])) - self.network.associate_floating_ip(self.context, float_addr, fix_addr) - address = db.instance_get_floating_address(context.get_admin_context(), - self.instance_id) - self.assertEqual(address, float_addr) - self.network.disassociate_floating_ip(self.context, float_addr) - address = db.instance_get_floating_address(context.get_admin_context(), - self.instance_id) - self.assertEqual(address, None) - self.network.deallocate_floating_ip(self.context, float_addr) - self.network.deallocate_fixed_ip(self.context, fix_addr) - release_ip(fix_addr) - db.floating_ip_destroy(context.get_admin_context(), float_addr) - - def test_allocate_deallocate_fixed_ip(self): - """Makes sure that we can allocate and deallocate a fixed ip""" - address = self._create_address(0) - self.assertTrue(self._is_allocated_in_project(address, - self.projects[0].id)) - lease_ip(address) - self._deallocate_address(0, address) - - # Doesn't go away until it's dhcp released - self.assertTrue(self._is_allocated_in_project(address, - self.projects[0].id)) - - release_ip(address) - self.assertFalse(self._is_allocated_in_project(address, - self.projects[0].id)) - - def test_side_effects(self): - """Ensures allocating and releasing has no side effects""" - address = self._create_address(0) - address2 = self._create_address(1, self.instance2_id) - - self.assertTrue(self._is_allocated_in_project(address, - self.projects[0].id)) - self.assertTrue(self._is_allocated_in_project(address2, - self.projects[1].id)) - self.assertFalse(self._is_allocated_in_project(address, - self.projects[1].id)) - - # Addresses are allocated before they're issued - lease_ip(address) - lease_ip(address2) - - self._deallocate_address(0, address) - release_ip(address) - self.assertFalse(self._is_allocated_in_project(address, - self.projects[0].id)) - - # First address release shouldn't affect the second - self.assertTrue(self._is_allocated_in_project(address2, - self.projects[1].id)) - - self._deallocate_address(1, address2) - release_ip(address2) - self.assertFalse(self._is_allocated_in_project(address2, - self.projects[1].id)) - - def test_subnet_edge(self): - """Makes sure that private ips don't overlap""" - first = self._create_address(0) - lease_ip(first) - instance_ids = [] - for i in range(1, FLAGS.num_networks): - instance_ref = self._create_instance(i, mac=utils.generate_mac()) - instance_ids.append(instance_ref['id']) - address = self._create_address(i, instance_ref['id']) - instance_ref = self._create_instance(i, mac=utils.generate_mac()) - instance_ids.append(instance_ref['id']) - address2 = self._create_address(i, instance_ref['id']) - instance_ref = self._create_instance(i, mac=utils.generate_mac()) - instance_ids.append(instance_ref['id']) - address3 = self._create_address(i, instance_ref['id']) - lease_ip(address) - lease_ip(address2) - lease_ip(address3) - self.context._project = self.projects[i] - self.context.project_id = self.projects[i].id - self.assertFalse(self._is_allocated_in_project(address, - self.projects[0].id)) - self.assertFalse(self._is_allocated_in_project(address2, - self.projects[0].id)) - self.assertFalse(self._is_allocated_in_project(address3, - self.projects[0].id)) - self.network.deallocate_fixed_ip(self.context, address) - self.network.deallocate_fixed_ip(self.context, address2) - self.network.deallocate_fixed_ip(self.context, address3) - release_ip(address) - release_ip(address2) - release_ip(address3) - for instance_id in instance_ids: - db.instance_destroy(context.get_admin_context(), instance_id) - self.context._project = self.projects[0] - self.context.project_id = self.projects[0].id - self.network.deallocate_fixed_ip(self.context, first) - self._deallocate_address(0, first) - release_ip(first) - - def test_vpn_ip_and_port_looks_valid(self): - """Ensure the vpn ip and port are reasonable""" - self.assert_(self.projects[0].vpn_ip) - self.assert_(self.projects[0].vpn_port >= FLAGS.vpn_start) - self.assert_(self.projects[0].vpn_port <= FLAGS.vpn_start + - FLAGS.num_networks) - - def test_too_many_networks(self): - """Ensure error is raised if we run out of networks""" - projects = [] - networks_left = (FLAGS.num_networks - - db.network_count(context.get_admin_context())) - for i in range(networks_left): - project = self.manager.create_project('many%s' % i, self.user) - projects.append(project) - db.project_get_network(context.get_admin_context(), project.id) - project = self.manager.create_project('last', self.user) - projects.append(project) - self.assertRaises(db.NoMoreNetworks, - db.project_get_network, - context.get_admin_context(), - project.id) - for project in projects: - self.manager.delete_project(project) - - def test_ips_are_reused(self): - """Makes sure that ip addresses that are deallocated get reused""" - address = self._create_address(0) - lease_ip(address) - self.network.deallocate_fixed_ip(self.context, address) - release_ip(address) - - address2 = self._create_address(0) - self.assertEqual(address, address2) - lease_ip(address) - self.network.deallocate_fixed_ip(self.context, address2) - release_ip(address) - - def test_too_many_addresses(self): - """Test for a NoMoreAddresses exception when all fixed ips are used. - """ - admin_context = context.get_admin_context() - network = db.project_get_network(admin_context, self.projects[0].id) - num_available_ips = db.network_count_available_ips(admin_context, - network['id']) - addresses = [] - instance_ids = [] - for i in range(num_available_ips): - instance_ref = self._create_instance(0) - instance_ids.append(instance_ref['id']) - address = self._create_address(0, instance_ref['id']) - addresses.append(address) - lease_ip(address) - - ip_count = db.network_count_available_ips(context.get_admin_context(), - network['id']) - self.assertEqual(ip_count, 0) - self.assertRaises(db.NoMoreAddresses, - self.network.allocate_fixed_ip, - self.context, - 'foo') - - for i in range(num_available_ips): - self.network.deallocate_fixed_ip(self.context, addresses[i]) - release_ip(addresses[i]) - db.instance_destroy(context.get_admin_context(), instance_ids[i]) - ip_count = db.network_count_available_ips(context.get_admin_context(), - network['id']) - self.assertEqual(ip_count, num_available_ips) - - def _is_allocated_in_project(self, address, project_id): - """Returns true if address is in specified project""" - project_net = db.project_get_network(context.get_admin_context(), - project_id) - network = db.fixed_ip_get_network(context.get_admin_context(), - address) - instance = db.fixed_ip_get_instance(context.get_admin_context(), - address) - # instance exists until release - return instance is not None and network['id'] == project_net['id'] - - def run(self, result=None): - if(FLAGS.network_manager == 'nova.network.manager.VlanManager'): - super(VlanNetworkTestCase, self).run(result) diff --git a/nova/tests/test_vmwareapi.py b/nova/tests/test_vmwareapi.py index eddf01e9f..cbf7801cf 100644 --- a/nova/tests/test_vmwareapi.py +++ b/nova/tests/test_vmwareapi.py @@ -1,251 +1,276 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 Citrix Systems, Inc.
-# Copyright 2011 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Test suite for VMWareAPI.
-"""
-
-import stubout
-
-from nova import context
-from nova import db
-from nova import flags
-from nova import test
-from nova import utils
-from nova.auth import manager
-from nova.compute import power_state
-from nova.tests.glance import stubs as glance_stubs
-from nova.tests.vmwareapi import db_fakes
-from nova.tests.vmwareapi import stubs
-from nova.virt import vmwareapi_conn
-from nova.virt.vmwareapi import fake as vmwareapi_fake
-
-
-FLAGS = flags.FLAGS
-
-
-class VMWareAPIVMTestCase(test.TestCase):
- """Unit tests for Vmware API connection calls."""
-
- def setUp(self):
- super(VMWareAPIVMTestCase, self).setUp()
- self.flags(vmwareapi_host_ip='test_url',
- vmwareapi_host_username='test_username',
- vmwareapi_host_password='test_pass')
- self.manager = manager.AuthManager()
- self.user = self.manager.create_user('fake', 'fake', 'fake',
- admin=True)
- self.project = self.manager.create_project('fake', 'fake', 'fake')
- self.network = utils.import_object(FLAGS.network_manager)
- self.stubs = stubout.StubOutForTesting()
- vmwareapi_fake.reset()
- db_fakes.stub_out_db_instance_api(self.stubs)
- stubs.set_stubs(self.stubs)
- glance_stubs.stubout_glance_client(self.stubs)
- self.conn = vmwareapi_conn.get_connection(False)
-
- def _create_instance_in_the_db(self):
- values = {'name': 1,
- 'id': 1,
- 'project_id': self.project.id,
- 'user_id': self.user.id,
- 'image_ref': "1",
- 'kernel_id': "1",
- 'ramdisk_id': "1",
- 'instance_type': 'm1.large',
- 'mac_address': 'aa:bb:cc:dd:ee:ff',
- }
- self.instance = db.instance_create(None, values)
-
- def _create_vm(self):
- """Create and spawn the VM."""
- self._create_instance_in_the_db()
- self.type_data = db.instance_type_get_by_name(None, 'm1.large')
- self.conn.spawn(self.instance)
- self._check_vm_record()
-
- def _check_vm_record(self):
- """
- Check if the spawned VM's properties correspond to the instance in
- the db.
- """
- instances = self.conn.list_instances()
- self.assertEquals(len(instances), 1)
-
- # Get Nova record for VM
- vm_info = self.conn.get_info(1)
-
- # Get record for VM
- vms = vmwareapi_fake._get_objects("VirtualMachine")
- vm = vms[0]
-
- # Check that m1.large above turned into the right thing.
- mem_kib = long(self.type_data['memory_mb']) << 10
- vcpus = self.type_data['vcpus']
- self.assertEquals(vm_info['max_mem'], mem_kib)
- self.assertEquals(vm_info['mem'], mem_kib)
- self.assertEquals(vm.get("summary.config.numCpu"), vcpus)
- self.assertEquals(vm.get("summary.config.memorySizeMB"),
- self.type_data['memory_mb'])
-
- # Check that the VM is running according to Nova
- self.assertEquals(vm_info['state'], power_state.RUNNING)
-
- # Check that the VM is running according to vSphere API.
- self.assertEquals(vm.get("runtime.powerState"), 'poweredOn')
-
- def _check_vm_info(self, info, pwr_state=power_state.RUNNING):
- """
- Check if the get_info returned values correspond to the instance
- object in the db.
- """
- mem_kib = long(self.type_data['memory_mb']) << 10
- self.assertEquals(info["state"], pwr_state)
- self.assertEquals(info["max_mem"], mem_kib)
- self.assertEquals(info["mem"], mem_kib)
- self.assertEquals(info["num_cpu"], self.type_data['vcpus'])
-
- def test_list_instances(self):
- instances = self.conn.list_instances()
- self.assertEquals(len(instances), 0)
-
- def test_list_instances_1(self):
- self._create_vm()
- instances = self.conn.list_instances()
- self.assertEquals(len(instances), 1)
-
- def test_spawn(self):
- self._create_vm()
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
-
- def test_snapshot(self):
- self._create_vm()
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
- self.conn.snapshot(self.instance, "Test-Snapshot")
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
-
- def test_snapshot_non_existent(self):
- self._create_instance_in_the_db()
- self.assertRaises(Exception, self.conn.snapshot, self.instance,
- "Test-Snapshot")
-
- def test_reboot(self):
- self._create_vm()
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
- self.conn.reboot(self.instance)
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
-
- def test_reboot_non_existent(self):
- self._create_instance_in_the_db()
- self.assertRaises(Exception, self.conn.reboot, self.instance)
-
- def test_reboot_not_poweredon(self):
- self._create_vm()
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
- self.conn.suspend(self.instance, self.dummy_callback_handler)
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.PAUSED)
- self.assertRaises(Exception, self.conn.reboot, self.instance)
-
- def test_suspend(self):
- self._create_vm()
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
- self.conn.suspend(self.instance, self.dummy_callback_handler)
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.PAUSED)
-
- def test_suspend_non_existent(self):
- self._create_instance_in_the_db()
- self.assertRaises(Exception, self.conn.suspend, self.instance,
- self.dummy_callback_handler)
-
- def test_resume(self):
- self._create_vm()
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
- self.conn.suspend(self.instance, self.dummy_callback_handler)
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.PAUSED)
- self.conn.resume(self.instance, self.dummy_callback_handler)
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
-
- def test_resume_non_existent(self):
- self._create_instance_in_the_db()
- self.assertRaises(Exception, self.conn.resume, self.instance,
- self.dummy_callback_handler)
-
- def test_resume_not_suspended(self):
- self._create_vm()
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
- self.assertRaises(Exception, self.conn.resume, self.instance,
- self.dummy_callback_handler)
-
- def test_get_info(self):
- self._create_vm()
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
-
- def test_destroy(self):
- self._create_vm()
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
- instances = self.conn.list_instances()
- self.assertEquals(len(instances), 1)
- self.conn.destroy(self.instance)
- instances = self.conn.list_instances()
- self.assertEquals(len(instances), 0)
-
- def test_destroy_non_existent(self):
- self._create_instance_in_the_db()
- self.assertEquals(self.conn.destroy(self.instance), None)
-
- def test_pause(self):
- pass
-
- def test_unpause(self):
- pass
-
- def test_diagnostics(self):
- pass
-
- def test_get_console_output(self):
- pass
-
- def test_get_ajax_console(self):
- pass
-
- def dummy_callback_handler(self, ret):
- """
- Dummy callback function to be passed to suspend, resume, etc., calls.
- """
- pass
-
- def tearDown(self):
- super(VMWareAPIVMTestCase, self).tearDown()
- vmwareapi_fake.cleanup()
- self.manager.delete_project(self.project)
- self.manager.delete_user(self.user)
- self.stubs.UnsetAll()
+# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Citrix Systems, Inc. +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Test suite for VMWareAPI. +""" + +import stubout + +from nova import context +from nova import db +from nova import flags +from nova import test +from nova import utils +from nova.auth import manager +from nova.compute import power_state +from nova.tests.glance import stubs as glance_stubs +from nova.tests.vmwareapi import db_fakes +from nova.tests.vmwareapi import stubs +from nova.virt import vmwareapi_conn +from nova.virt.vmwareapi import fake as vmwareapi_fake + + +FLAGS = flags.FLAGS + + +class VMWareAPIVMTestCase(test.TestCase): + """Unit tests for Vmware API connection calls.""" + + # NOTE(jkoelker): This is leaking stubs into the db module. + # Commenting out until updated for multi-nic. + #def setUp(self): + # super(VMWareAPIVMTestCase, self).setUp() + # self.flags(vmwareapi_host_ip='test_url', + # vmwareapi_host_username='test_username', + # vmwareapi_host_password='test_pass') + # self.manager = manager.AuthManager() + # self.user = self.manager.create_user('fake', 'fake', 'fake', + # admin=True) + # self.project = self.manager.create_project('fake', 'fake', 'fake') + # self.network = utils.import_object(FLAGS.network_manager) + # self.stubs = stubout.StubOutForTesting() + # vmwareapi_fake.reset() + # db_fakes.stub_out_db_instance_api(self.stubs) + # stubs.set_stubs(self.stubs) + # glance_stubs.stubout_glance_client(self.stubs, + # glance_stubs.FakeGlance) + # self.conn = vmwareapi_conn.get_connection(False) + + #def tearDown(self): + # super(VMWareAPIVMTestCase, self).tearDown() + # vmwareapi_fake.cleanup() + # self.manager.delete_project(self.project) + # self.manager.delete_user(self.user) + # self.stubs.UnsetAll() + + def _create_instance_in_the_db(self): + values = {'name': 1, + 'id': 1, + 'project_id': self.project.id, + 'user_id': self.user.id, + 'image_id': "1", + 'kernel_id': "1", + 'ramdisk_id': "1", + 'instance_type': 'm1.large', + 'mac_address': 'aa:bb:cc:dd:ee:ff', + } + self.instance = db.instance_create(values) + + def _create_vm(self): + """Create and spawn the VM.""" + self._create_instance_in_the_db() + self.type_data = db.instance_type_get_by_name(None, 'm1.large') + self.conn.spawn(self.instance) + self._check_vm_record() + + def _check_vm_record(self): + """ + Check if the spawned VM's properties correspond to the instance in + the db. + """ + instances = self.conn.list_instances() + self.assertEquals(len(instances), 1) + + # Get Nova record for VM + vm_info = self.conn.get_info(1) + + # Get record for VM + vms = vmwareapi_fake._get_objects("VirtualMachine") + vm = vms[0] + + # Check that m1.large above turned into the right thing. + mem_kib = long(self.type_data['memory_mb']) << 10 + vcpus = self.type_data['vcpus'] + self.assertEquals(vm_info['max_mem'], mem_kib) + self.assertEquals(vm_info['mem'], mem_kib) + self.assertEquals(vm.get("summary.config.numCpu"), vcpus) + self.assertEquals(vm.get("summary.config.memorySizeMB"), + self.type_data['memory_mb']) + + # Check that the VM is running according to Nova + self.assertEquals(vm_info['state'], power_state.RUNNING) + + # Check that the VM is running according to vSphere API. + self.assertEquals(vm.get("runtime.powerState"), 'poweredOn') + + def _check_vm_info(self, info, pwr_state=power_state.RUNNING): + """ + Check if the get_info returned values correspond to the instance + object in the db. + """ + mem_kib = long(self.type_data['memory_mb']) << 10 + self.assertEquals(info["state"], pwr_state) + self.assertEquals(info["max_mem"], mem_kib) + self.assertEquals(info["mem"], mem_kib) + self.assertEquals(info["num_cpu"], self.type_data['vcpus']) + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_list_instances(self): + instances = self.conn.list_instances() + self.assertEquals(len(instances), 0) + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_list_instances_1(self): + self._create_vm() + instances = self.conn.list_instances() + self.assertEquals(len(instances), 1) + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_spawn(self): + self._create_vm() + info = self.conn.get_info(1) + self._check_vm_info(info, power_state.RUNNING) + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_snapshot(self): + self._create_vm() + info = self.conn.get_info(1) + self._check_vm_info(info, power_state.RUNNING) + self.conn.snapshot(self.instance, "Test-Snapshot") + info = self.conn.get_info(1) + self._check_vm_info(info, power_state.RUNNING) + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_snapshot_non_existent(self): + self._create_instance_in_the_db() + self.assertRaises(Exception, self.conn.snapshot, self.instance, + "Test-Snapshot") + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_reboot(self): + self._create_vm() + info = self.conn.get_info(1) + self._check_vm_info(info, power_state.RUNNING) + self.conn.reboot(self.instance) + info = self.conn.get_info(1) + self._check_vm_info(info, power_state.RUNNING) + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_reboot_non_existent(self): + self._create_instance_in_the_db() + self.assertRaises(Exception, self.conn.reboot, self.instance) + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_reboot_not_poweredon(self): + self._create_vm() + info = self.conn.get_info(1) + self._check_vm_info(info, power_state.RUNNING) + self.conn.suspend(self.instance, self.dummy_callback_handler) + info = self.conn.get_info(1) + self._check_vm_info(info, power_state.PAUSED) + self.assertRaises(Exception, self.conn.reboot, self.instance) + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_suspend(self): + self._create_vm() + info = self.conn.get_info(1) + self._check_vm_info(info, power_state.RUNNING) + self.conn.suspend(self.instance, self.dummy_callback_handler) + info = self.conn.get_info(1) + self._check_vm_info(info, power_state.PAUSED) + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_suspend_non_existent(self): + self._create_instance_in_the_db() + self.assertRaises(Exception, self.conn.suspend, self.instance, + self.dummy_callback_handler) + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_resume(self): + self._create_vm() + info = self.conn.get_info(1) + self._check_vm_info(info, power_state.RUNNING) + self.conn.suspend(self.instance, self.dummy_callback_handler) + info = self.conn.get_info(1) + self._check_vm_info(info, power_state.PAUSED) + self.conn.resume(self.instance, self.dummy_callback_handler) + info = self.conn.get_info(1) + self._check_vm_info(info, power_state.RUNNING) + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_resume_non_existent(self): + self._create_instance_in_the_db() + self.assertRaises(Exception, self.conn.resume, self.instance, + self.dummy_callback_handler) + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_resume_not_suspended(self): + self._create_vm() + info = self.conn.get_info(1) + self._check_vm_info(info, power_state.RUNNING) + self.assertRaises(Exception, self.conn.resume, self.instance, + self.dummy_callback_handler) + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_get_info(self): + self._create_vm() + info = self.conn.get_info(1) + self._check_vm_info(info, power_state.RUNNING) + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_destroy(self): + self._create_vm() + info = self.conn.get_info(1) + self._check_vm_info(info, power_state.RUNNING) + instances = self.conn.list_instances() + self.assertEquals(len(instances), 1) + self.conn.destroy(self.instance) + instances = self.conn.list_instances() + self.assertEquals(len(instances), 0) + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_destroy_non_existent(self): + self._create_instance_in_the_db() + self.assertEquals(self.conn.destroy(self.instance), None) + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_pause(self): + pass + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_unpause(self): + pass + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_diagnostics(self): + pass + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_get_console_output(self): + pass + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_get_ajax_console(self): + pass + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def dummy_callback_handler(self, ret): + """ + Dummy callback function to be passed to suspend, resume, etc., calls. + """ + pass diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py index 4f10ee6af..c0f89601f 100644 --- a/nova/tests/test_volume.py +++ b/nova/tests/test_volume.py @@ -27,8 +27,10 @@ from nova import exception from nova import db from nova import flags from nova import log as logging +from nova import rpc from nova import test from nova import utils +from nova import volume FLAGS = flags.FLAGS LOG = logging.getLogger('nova.tests.volume') @@ -43,6 +45,11 @@ class VolumeTestCase(test.TestCase): self.flags(connection_type='fake') self.volume = utils.import_object(FLAGS.volume_manager) self.context = context.get_admin_context() + self.instance_id = db.instance_create(self.context, {})['id'] + + def tearDown(self): + db.instance_destroy(self.context, self.instance_id) + super(VolumeTestCase, self).tearDown() @staticmethod def _create_volume(size='0', snapshot_id=None): @@ -127,7 +134,6 @@ class VolumeTestCase(test.TestCase): inst['user_id'] = 'fake' inst['project_id'] = 'fake' inst['instance_type_id'] = '2' # m1.tiny - inst['mac_address'] = utils.generate_mac() inst['ami_launch_index'] = 0 instance_id = db.instance_create(self.context, inst)['id'] mountpoint = "/dev/sdf" @@ -224,6 +230,30 @@ class VolumeTestCase(test.TestCase): snapshot_id) self.volume.delete_volume(self.context, volume_id) + def test_create_snapshot_force(self): + """Test snapshot in use can be created forcibly.""" + + def fake_cast(ctxt, topic, msg): + pass + self.stubs.Set(rpc, 'cast', fake_cast) + + volume_id = self._create_volume() + self.volume.create_volume(self.context, volume_id) + db.volume_attached(self.context, volume_id, self.instance_id, + '/dev/sda1') + + volume_api = volume.api.API() + self.assertRaises(exception.ApiError, + volume_api.create_snapshot, + self.context, volume_id, + 'fake_name', 'fake_description') + snapshot_ref = volume_api.create_snapshot_force(self.context, + volume_id, + 'fake_name', + 'fake_description') + db.snapshot_destroy(self.context, snapshot_ref['id']) + db.volume_destroy(self.context, volume_id) + class DriverTestCase(test.TestCase): """Base Test class for Drivers.""" diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index d9a514745..199a8bc52 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -83,7 +83,6 @@ class XenAPIVolumeTestCase(test.TestCase): 'kernel_id': 2, 'ramdisk_id': 3, 'instance_type_id': '3', # m1.large - 'mac_address': 'aa:bb:cc:dd:ee:ff', 'os_type': 'linux', 'architecture': 'x86-64'} @@ -211,11 +210,24 @@ class XenAPIVMTestCase(test.TestCase): 'kernel_id': 2, 'ramdisk_id': 3, 'instance_type_id': '3', # m1.large - 'mac_address': 'aa:bb:cc:dd:ee:ff', 'os_type': 'linux', 'architecture': 'x86-64'} + network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False}, + {'broadcast': '192.168.0.255', + 'dns': ['192.168.0.1'], + 'gateway': '192.168.0.1', + 'gateway6': 'dead:beef::1', + 'ip6s': [{'enabled': '1', + 'ip': 'dead:beef::dcad:beff:feef:0', + 'netmask': '64'}], + 'ips': [{'enabled': '1', + 'ip': '192.168.0.100', + 'netmask': '255.255.255.0'}], + 'label': 'fake', + 'mac': 'DE:AD:BE:EF:00:00', + 'rxtx_cap': 3})] instance = db.instance_create(self.context, values) - self.conn.spawn(instance) + self.conn.spawn(instance, network_info) gt1 = eventlet.spawn(_do_build, 1, self.project.id, self.user.id) gt2 = eventlet.spawn(_do_build, 2, self.project.id, self.user.id) @@ -320,22 +332,22 @@ class XenAPIVMTestCase(test.TestCase): if check_injection: xenstore_data = self.vm['xenstore_data'] - key = 'vm-data/networking/aabbccddeeff' + key = 'vm-data/networking/DEADBEEF0000' xenstore_value = xenstore_data[key] tcpip_data = ast.literal_eval(xenstore_value) self.assertEquals(tcpip_data, - {'label': 'fake_flat_network', - 'broadcast': '10.0.0.255', - 'ips': [{'ip': '10.0.0.3', - 'netmask':'255.255.255.0', - 'enabled':'1'}], - 'ip6s': [{'ip': 'fe80::a8bb:ccff:fedd:eeff', - 'netmask': '120', - 'enabled': '1'}], - 'mac': 'aa:bb:cc:dd:ee:ff', - 'dns': ['10.0.0.2'], - 'gateway': '10.0.0.1', - 'gateway6': 'fe80::a00:1'}) + {'broadcast': '192.168.0.255', + 'dns': ['192.168.0.1'], + 'gateway': '192.168.0.1', + 'gateway6': 'dead:beef::1', + 'ip6s': [{'enabled': '1', + 'ip': 'dead:beef::dcad:beff:feef:0', + 'netmask': '64'}], + 'ips': [{'enabled': '1', + 'ip': '192.168.0.100', + 'netmask': '255.255.255.0'}], + 'label': 'fake', + 'mac': 'DE:AD:BE:EF:00:00'}) def check_vm_params_for_windows(self): self.assertEquals(self.vm['platform']['nx'], 'true') @@ -369,6 +381,18 @@ class XenAPIVMTestCase(test.TestCase): self.assertEquals(self.vm['HVM_boot_params'], {}) self.assertEquals(self.vm['HVM_boot_policy'], '') + def _list_vdis(self): + url = FLAGS.xenapi_connection_url + username = FLAGS.xenapi_connection_username + password = FLAGS.xenapi_connection_password + session = xenapi_conn.XenAPISession(url, username, password) + return session.call_xenapi('VDI.get_all') + + def _check_vdis(self, start_list, end_list): + for vdi_ref in end_list: + if not vdi_ref in start_list: + self.fail('Found unexpected VDI:%s' % vdi_ref) + def _test_spawn(self, image_ref, kernel_id, ramdisk_id, instance_type_id="3", os_type="linux", architecture="x86-64", instance_id=1, @@ -381,11 +405,24 @@ class XenAPIVMTestCase(test.TestCase): 'kernel_id': kernel_id, 'ramdisk_id': ramdisk_id, 'instance_type_id': instance_type_id, - 'mac_address': 'aa:bb:cc:dd:ee:ff', 'os_type': os_type, 'architecture': architecture} instance = db.instance_create(self.context, values) - self.conn.spawn(instance) + network_info = [({'bridge': 'fa0', 'id': 0, 'injected': True}, + {'broadcast': '192.168.0.255', + 'dns': ['192.168.0.1'], + 'gateway': '192.168.0.1', + 'gateway6': 'dead:beef::1', + 'ip6s': [{'enabled': '1', + 'ip': 'dead:beef::dcad:beff:feef:0', + 'netmask': '64'}], + 'ips': [{'enabled': '1', + 'ip': '192.168.0.100', + 'netmask': '255.255.255.0'}], + 'label': 'fake', + 'mac': 'DE:AD:BE:EF:00:00', + 'rxtx_cap': 3})] + self.conn.spawn(instance, network_info) self.create_vm_record(self.conn, os_type, instance_id) self.check_vm_record(self.conn, check_injection) self.assertTrue(instance.os_type) @@ -397,6 +434,36 @@ class XenAPIVMTestCase(test.TestCase): self._test_spawn, 1, 2, 3, "4") # m1.xlarge + def test_spawn_fail_cleanup_1(self): + """Simulates an error while downloading an image. + + Verifies that VDIs created are properly cleaned up. + + """ + vdi_recs_start = self._list_vdis() + FLAGS.xenapi_image_service = 'glance' + stubs.stubout_fetch_image_glance_disk(self.stubs) + self.assertRaises(xenapi_fake.Failure, + self._test_spawn, 1, 2, 3) + # No additional VDI should be found. + vdi_recs_end = self._list_vdis() + self._check_vdis(vdi_recs_start, vdi_recs_end) + + def test_spawn_fail_cleanup_2(self): + """Simulates an error while creating VM record. + + It verifies that VDIs created are properly cleaned up. + + """ + vdi_recs_start = self._list_vdis() + FLAGS.xenapi_image_service = 'glance' + stubs.stubout_create_vm(self.stubs) + self.assertRaises(xenapi_fake.Failure, + self._test_spawn, 1, 2, 3) + # No additional VDI should be found. + vdi_recs_end = self._list_vdis() + self._check_vdis(vdi_recs_start, vdi_recs_end) + def test_spawn_raw_objectstore(self): FLAGS.xenapi_image_service = 'objectstore' self._test_spawn(1, None, None) @@ -467,11 +534,11 @@ class XenAPIVMTestCase(test.TestCase): index = config.index('auto eth0') self.assertEquals(config[index + 1:index + 8], [ 'iface eth0 inet static', - 'address 10.0.0.3', + 'address 192.168.0.100', 'netmask 255.255.255.0', - 'broadcast 10.0.0.255', - 'gateway 10.0.0.1', - 'dns-nameservers 10.0.0.2', + 'broadcast 192.168.0.255', + 'gateway 192.168.0.1', + 'dns-nameservers 192.168.0.1', '']) self._tee_executed = True return '', '' @@ -532,23 +599,37 @@ class XenAPIVMTestCase(test.TestCase): # guest agent is detected self.assertFalse(self._tee_executed) + @test.skip_test("Never gets an address, not sure why") def test_spawn_vlanmanager(self): self.flags(xenapi_image_service='glance', network_manager='nova.network.manager.VlanManager', network_driver='nova.network.xenapi_net', vlan_interface='fake0') + + def dummy(*args, **kwargs): + pass + + self.stubs.Set(VMOps, 'create_vifs', dummy) # Reset network table xenapi_fake.reset_table('network') # Instance id = 2 will use vlan network (see db/fakes.py) - fake_instance_id = 2 + ctxt = self.context.elevated() + instance_ref = self._create_instance(2) network_bk = self.network # Ensure we use xenapi_net driver self.network = utils.import_object(FLAGS.network_manager) - self.network.setup_compute_network(None, fake_instance_id) + networks = self.network.db.network_get_all(ctxt) + for network in networks: + self.network.set_network_host(ctxt, network['id']) + + self.network.allocate_for_instance(ctxt, instance_id=instance_ref.id, + instance_type_id=1, project_id=self.project.id) + self.network.setup_compute_network(ctxt, instance_ref.id) self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE, glance_stubs.FakeGlance.IMAGE_KERNEL, glance_stubs.FakeGlance.IMAGE_RAMDISK, - instance_id=fake_instance_id) + instance_id=instance_ref.id, + create_record=False) # TODO(salvatore-orlando): a complete test here would require # a check for making sure the bridge for the VM's VIF is # consistent with bridge specified in nova db @@ -560,13 +641,13 @@ class XenAPIVMTestCase(test.TestCase): vif_rec = xenapi_fake.get_record('VIF', vif_ref) self.assertEquals(vif_rec['qos_algorithm_type'], 'ratelimit') self.assertEquals(vif_rec['qos_algorithm_params']['kbps'], - str(4 * 1024)) + str(3 * 1024)) def test_rescue(self): self.flags(xenapi_inject_image=False) instance = self._create_instance() conn = xenapi_conn.get_connection(False) - conn.rescue(instance, None) + conn.rescue(instance, None, []) def test_unrescue(self): instance = self._create_instance() @@ -582,22 +663,35 @@ class XenAPIVMTestCase(test.TestCase): self.vm = None self.stubs.UnsetAll() - def _create_instance(self): + def _create_instance(self, instance_id=1): """Creates and spawns a test instance.""" stubs.stubout_loopingcall_start(self.stubs) values = { - 'id': 1, + 'id': instance_id, 'project_id': self.project.id, 'user_id': self.user.id, 'image_ref': 1, 'kernel_id': 2, 'ramdisk_id': 3, 'instance_type_id': '3', # m1.large - 'mac_address': 'aa:bb:cc:dd:ee:ff', 'os_type': 'linux', 'architecture': 'x86-64'} instance = db.instance_create(self.context, values) - self.conn.spawn(instance) + network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False}, + {'broadcast': '192.168.0.255', + 'dns': ['192.168.0.1'], + 'gateway': '192.168.0.1', + 'gateway6': 'dead:beef::1', + 'ip6s': [{'enabled': '1', + 'ip': 'dead:beef::dcad:beff:feef:0', + 'netmask': '64'}], + 'ips': [{'enabled': '1', + 'ip': '192.168.0.100', + 'netmask': '255.255.255.0'}], + 'label': 'fake', + 'mac': 'DE:AD:BE:EF:00:00', + 'rxtx_cap': 3})] + self.conn.spawn(instance, network_info) return instance @@ -669,7 +763,6 @@ class XenAPIMigrateInstance(test.TestCase): 'ramdisk_id': None, 'local_gb': 5, 'instance_type_id': '3', # m1.large - 'mac_address': 'aa:bb:cc:dd:ee:ff', 'os_type': 'linux', 'architecture': 'x86-64'} @@ -695,7 +788,22 @@ class XenAPIMigrateInstance(test.TestCase): stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests) stubs.stubout_loopingcall_start(self.stubs) conn = xenapi_conn.get_connection(False) - conn.finish_resize(instance, dict(base_copy='hurr', cow='durr')) + network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False}, + {'broadcast': '192.168.0.255', + 'dns': ['192.168.0.1'], + 'gateway': '192.168.0.1', + 'gateway6': 'dead:beef::1', + 'ip6s': [{'enabled': '1', + 'ip': 'dead:beef::dcad:beff:feef:0', + 'netmask': '64'}], + 'ips': [{'enabled': '1', + 'ip': '192.168.0.100', + 'netmask': '255.255.255.0'}], + 'label': 'fake', + 'mac': 'DE:AD:BE:EF:00:00', + 'rxtx_cap': 3})] + conn.finish_resize(instance, dict(base_copy='hurr', cow='durr'), + network_info) class XenAPIDetermineDiskImageTestCase(test.TestCase): diff --git a/nova/tests/test_zones.py b/nova/tests/test_zones.py index e132809dc..a943fee27 100644 --- a/nova/tests/test_zones.py +++ b/nova/tests/test_zones.py @@ -198,3 +198,178 @@ class ZoneManagerTestCase(test.TestCase): self.assertEquals(zone_state.attempt, 3) self.assertFalse(zone_state.is_active) self.assertEquals(zone_state.name, None) + + def test_host_service_caps_stale_no_stale_service(self): + zm = zone_manager.ZoneManager() + + # services just updated capabilities + zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2)) + zm.update_service_capabilities("svc2", "host1", dict(a=3, b=4)) + self.assertFalse(zm.host_service_caps_stale("host1", "svc1")) + self.assertFalse(zm.host_service_caps_stale("host1", "svc2")) + + def test_host_service_caps_stale_all_stale_services(self): + zm = zone_manager.ZoneManager() + expiry_time = (FLAGS.periodic_interval * 3) + 1 + + # Both services became stale + zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2)) + zm.update_service_capabilities("svc2", "host1", dict(a=3, b=4)) + time_future = utils.utcnow() + datetime.timedelta(seconds=expiry_time) + utils.set_time_override(time_future) + self.assertTrue(zm.host_service_caps_stale("host1", "svc1")) + self.assertTrue(zm.host_service_caps_stale("host1", "svc2")) + utils.clear_time_override() + + def test_host_service_caps_stale_one_stale_service(self): + zm = zone_manager.ZoneManager() + expiry_time = (FLAGS.periodic_interval * 3) + 1 + + # One service became stale + zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2)) + zm.update_service_capabilities("svc2", "host1", dict(a=3, b=4)) + caps = zm.service_states["host1"]["svc1"] + caps["timestamp"] = utils.utcnow() - \ + datetime.timedelta(seconds=expiry_time) + self.assertTrue(zm.host_service_caps_stale("host1", "svc1")) + self.assertFalse(zm.host_service_caps_stale("host1", "svc2")) + + def test_delete_expired_host_services_del_one_service(self): + zm = zone_manager.ZoneManager() + + # Delete one service in a host + zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2)) + zm.update_service_capabilities("svc2", "host1", dict(a=3, b=4)) + stale_host_services = {"host1": ["svc1"]} + zm.delete_expired_host_services(stale_host_services) + self.assertFalse("svc1" in zm.service_states["host1"]) + self.assertTrue("svc2" in zm.service_states["host1"]) + + def test_delete_expired_host_services_del_all_hosts(self): + zm = zone_manager.ZoneManager() + + # Delete all services in a host + zm.update_service_capabilities("svc2", "host1", dict(a=3, b=4)) + zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2)) + stale_host_services = {"host1": ["svc1", "svc2"]} + zm.delete_expired_host_services(stale_host_services) + self.assertFalse("host1" in zm.service_states) + + def test_delete_expired_host_services_del_one_service_per_host(self): + zm = zone_manager.ZoneManager() + + # Delete one service per host + zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2)) + zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4)) + stale_host_services = {"host1": ["svc1"], "host2": ["svc1"]} + zm.delete_expired_host_services(stale_host_services) + self.assertFalse("host1" in zm.service_states) + self.assertFalse("host2" in zm.service_states) + + def test_get_zone_capabilities_one_host(self): + zm = zone_manager.ZoneManager() + + # Service capabilities recent + zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2)) + caps = zm.get_zone_capabilities(None) + self.assertEquals(caps, dict(svc1_a=(1, 1), svc1_b=(2, 2))) + + def test_get_zone_capabilities_expired_host(self): + zm = zone_manager.ZoneManager() + expiry_time = (FLAGS.periodic_interval * 3) + 1 + + # Service capabilities stale + zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2)) + time_future = utils.utcnow() + datetime.timedelta(seconds=expiry_time) + utils.set_time_override(time_future) + caps = zm.get_zone_capabilities(None) + self.assertEquals(caps, {}) + utils.clear_time_override() + + def test_get_zone_capabilities_multiple_hosts(self): + zm = zone_manager.ZoneManager() + + # Both host service capabilities recent + zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2)) + zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4)) + caps = zm.get_zone_capabilities(None) + self.assertEquals(caps, dict(svc1_a=(1, 3), svc1_b=(2, 4))) + + def test_get_zone_capabilities_one_stale_host(self): + zm = zone_manager.ZoneManager() + expiry_time = (FLAGS.periodic_interval * 3) + 1 + + # One host service capabilities become stale + zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2)) + zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4)) + serv_caps = zm.service_states["host1"]["svc1"] + serv_caps["timestamp"] = utils.utcnow() - \ + datetime.timedelta(seconds=expiry_time) + caps = zm.get_zone_capabilities(None) + self.assertEquals(caps, dict(svc1_a=(3, 3), svc1_b=(4, 4))) + + def test_get_zone_capabilities_multiple_service_per_host(self): + zm = zone_manager.ZoneManager() + + # Multiple services per host + zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2)) + zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4)) + zm.update_service_capabilities("svc2", "host1", dict(a=5, b=6)) + zm.update_service_capabilities("svc2", "host2", dict(a=7, b=8)) + caps = zm.get_zone_capabilities(None) + self.assertEquals(caps, dict(svc1_a=(1, 3), svc1_b=(2, 4), + svc2_a=(5, 7), svc2_b=(6, 8))) + + def test_get_zone_capabilities_one_stale_service_per_host(self): + zm = zone_manager.ZoneManager() + expiry_time = (FLAGS.periodic_interval * 3) + 1 + + # Two host services among four become stale + zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2)) + zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4)) + zm.update_service_capabilities("svc2", "host1", dict(a=5, b=6)) + zm.update_service_capabilities("svc2", "host2", dict(a=7, b=8)) + serv_caps_1 = zm.service_states["host1"]["svc2"] + serv_caps_1["timestamp"] = utils.utcnow() - \ + datetime.timedelta(seconds=expiry_time) + serv_caps_2 = zm.service_states["host2"]["svc1"] + serv_caps_2["timestamp"] = utils.utcnow() - \ + datetime.timedelta(seconds=expiry_time) + caps = zm.get_zone_capabilities(None) + self.assertEquals(caps, dict(svc1_a=(1, 1), svc1_b=(2, 2), + svc2_a=(7, 7), svc2_b=(8, 8))) + + def test_get_zone_capabilities_three_stale_host_services(self): + zm = zone_manager.ZoneManager() + expiry_time = (FLAGS.periodic_interval * 3) + 1 + + # Three host services among four become stale + zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2)) + zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4)) + zm.update_service_capabilities("svc2", "host1", dict(a=5, b=6)) + zm.update_service_capabilities("svc2", "host2", dict(a=7, b=8)) + serv_caps_1 = zm.service_states["host1"]["svc2"] + serv_caps_1["timestamp"] = utils.utcnow() - \ + datetime.timedelta(seconds=expiry_time) + serv_caps_2 = zm.service_states["host2"]["svc1"] + serv_caps_2["timestamp"] = utils.utcnow() - \ + datetime.timedelta(seconds=expiry_time) + serv_caps_3 = zm.service_states["host2"]["svc2"] + serv_caps_3["timestamp"] = utils.utcnow() - \ + datetime.timedelta(seconds=expiry_time) + caps = zm.get_zone_capabilities(None) + self.assertEquals(caps, dict(svc1_a=(1, 1), svc1_b=(2, 2))) + + def test_get_zone_capabilities_all_stale_host_services(self): + zm = zone_manager.ZoneManager() + expiry_time = (FLAGS.periodic_interval * 3) + 1 + + # All the host services become stale + zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2)) + zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4)) + zm.update_service_capabilities("svc2", "host1", dict(a=5, b=6)) + zm.update_service_capabilities("svc2", "host2", dict(a=7, b=8)) + time_future = utils.utcnow() + datetime.timedelta(seconds=expiry_time) + utils.set_time_override(time_future) + caps = zm.get_zone_capabilities(None) + self.assertEquals(caps, {}) diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 151a3e909..66c79d465 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -98,6 +98,42 @@ def stubout_is_vdi_pv(stubs): stubs.Set(vm_utils, '_is_vdi_pv', f) +def stubout_determine_is_pv_objectstore(stubs): + """Assumes VMs never have PV kernels""" + + @classmethod + def f(cls, *args): + return False + stubs.Set(vm_utils.VMHelper, '_determine_is_pv_objectstore', f) + + +def stubout_lookup_image(stubs): + """Simulates a failure in lookup image.""" + def f(_1, _2, _3, _4): + raise Exception("Test Exception raised by fake lookup_image") + stubs.Set(vm_utils, 'lookup_image', f) + + +def stubout_fetch_image_glance_disk(stubs): + """Simulates a failure in fetch image_glance_disk.""" + + @classmethod + def f(cls, *args): + raise fake.Failure("Test Exception raised by " + + "fake fetch_image_glance_disk") + stubs.Set(vm_utils.VMHelper, '_fetch_image_glance_disk', f) + + +def stubout_create_vm(stubs): + """Simulates a failure in create_vm.""" + + @classmethod + def f(cls, *args): + raise fake.Failure("Test Exception raised by " + + "fake create_vm") + stubs.Set(vm_utils.VMHelper, 'create_vm', f) + + def stubout_loopingcall_start(stubs): def fake_start(self, interval, now=True): self.f(*self.args, **self.kw) @@ -120,6 +156,9 @@ class FakeSessionForVMTests(fake.SessionBase): super(FakeSessionForVMTests, self).__init__(uri) def host_call_plugin(self, _1, _2, plugin, method, _5): + # If the call is for 'copy_kernel_vdi' return None. + if method == 'copy_kernel_vdi': + return sr_ref = fake.get_all('SR')[0] vdi_ref = fake.create_vdi('', False, sr_ref, False) vdi_rec = fake.get_record('VDI', vdi_ref) diff --git a/nova/utils.py b/nova/utils.py index 918541224..8784a227d 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -262,14 +262,6 @@ def generate_uid(topic, size=8): return '%s-%s' % (topic, ''.join(choices)) -def generate_mac(): - mac = [0x02, 0x16, 0x3e, - random.randint(0x00, 0x7f), - random.randint(0x00, 0xff), - random.randint(0x00, 0xff)] - return ':'.join(map(lambda x: '%02x' % x, mac)) - - # Default symbols to use for passwords. Avoids visually confusing characters. # ~6 bits per symbol DEFAULT_PASSWORD_SYMBOLS = ('23456789' # Removed: 0,1 @@ -282,6 +274,22 @@ EASIER_PASSWORD_SYMBOLS = ('23456789' # Removed: 0, 1 'ABCDEFGHJKLMNPQRSTUVWXYZ') # Removed: I, O +def usage_from_instance(instance_ref, **kw): + usage_info = dict( + tenant_id=instance_ref['project_id'], + user_id=instance_ref['user_id'], + instance_id=instance_ref['id'], + instance_type=instance_ref['instance_type']['name'], + instance_type_id=instance_ref['instance_type_id'], + display_name=instance_ref['display_name'], + created_at=str(instance_ref['created_at']), + launched_at=str(instance_ref['launched_at']) \ + if instance_ref['launched_at'] else '', + image_ref=instance_ref['image_ref']) + usage_info.update(kw) + return usage_info + + def generate_password(length=20, symbols=DEFAULT_PASSWORD_SYMBOLS): """Generate a random password from the supplied symbols. @@ -756,6 +764,17 @@ def is_uuid_like(val): return (len(val) == 36) and (val.count('-') == 4) +def bool_from_str(val): + """Convert a string representation of a bool into a bool value""" + + if not val: + return False + try: + return True if int(val) else False + except ValueError: + return val.lower() == 'true' + + class Bootstrapper(object): """Provides environment bootstrapping capabilities for entry points.""" diff --git a/nova/virt/driver.py b/nova/virt/driver.py index 2c7c0cfcc..34dc5f544 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -61,11 +61,11 @@ class ComputeDriver(object): """Return a list of InstanceInfo for all registered VMs""" raise NotImplementedError() - def spawn(self, instance, network_info=None, block_device_mapping=None): + def spawn(self, instance, network_info, block_device_mapping=None): """Launch a VM for the specified instance""" raise NotImplementedError() - def destroy(self, instance, cleanup=True): + def destroy(self, instance, network_info, cleanup=True): """Destroy (shutdown and delete) the specified instance. The given parameter is an instance of nova.compute.service.Instance, @@ -81,7 +81,7 @@ class ComputeDriver(object): """ raise NotImplementedError() - def reboot(self, instance): + def reboot(self, instance, network_info): """Reboot specified VM""" raise NotImplementedError() @@ -146,11 +146,11 @@ class ComputeDriver(object): """resume the specified instance""" raise NotImplementedError() - def rescue(self, instance, callback): + def rescue(self, instance, callback, network_info): """Rescue the specified instance""" raise NotImplementedError() - def unrescue(self, instance, callback): + def unrescue(self, instance, callback, network_info): """Unrescue the specified instance""" raise NotImplementedError() @@ -197,7 +197,7 @@ class ComputeDriver(object): def reset_network(self, instance): """reset networking for specified instance""" - raise NotImplementedError() + pass def ensure_filtering_rules_for_instance(self, instance_ref): """Setting up filtering rules and waiting for its completion. @@ -224,7 +224,7 @@ class ComputeDriver(object): """ raise NotImplementedError() - def unfilter_instance(self, instance): + def unfilter_instance(self, instance, network_info): """Stop filtering instance""" raise NotImplementedError() @@ -242,10 +242,18 @@ class ComputeDriver(object): """Update agent on the VM instance.""" raise NotImplementedError() - def inject_network_info(self, instance): + def inject_network_info(self, instance, nw_info): """inject network info for specified instance""" - raise NotImplementedError() + pass def poll_rescued_instances(self, timeout): """Poll for rescued instances""" raise NotImplementedError() + + def set_host_enabled(self, host, enabled): + """Sets the specified host's ability to accept new instances.""" + raise NotImplementedError() + + def plug_vifs(self, instance, network_info): + """Plugs in VIFs to networks.""" + raise NotImplementedError() diff --git a/nova/virt/fake.py b/nova/virt/fake.py index f78c29bd0..26bc421c0 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -129,7 +129,7 @@ class FakeConnection(driver.ComputeDriver): info_list.append(self._map_to_instance_info(instance)) return info_list - def spawn(self, instance, network_info=None, block_device_mapping=None): + def spawn(self, instance, network_info, block_device_mapping=None): """ Create a new instance/VM/domain on the virtualization platform. @@ -167,7 +167,7 @@ class FakeConnection(driver.ComputeDriver): """ pass - def reboot(self, instance): + def reboot(self, instance, network_info): """ Reboot the specified instance. @@ -240,13 +240,13 @@ class FakeConnection(driver.ComputeDriver): """ pass - def rescue(self, instance): + def rescue(self, instance, callback, network_info): """ Rescue the specified instance. """ pass - def unrescue(self, instance): + def unrescue(self, instance, callback, network_info): """ Unrescue the specified instance. """ @@ -293,7 +293,7 @@ class FakeConnection(driver.ComputeDriver): """ pass - def destroy(self, instance): + def destroy(self, instance, network_info): key = instance.name if key in self.instances: del self.instances[key] @@ -499,7 +499,7 @@ class FakeConnection(driver.ComputeDriver): """This method is supported only by libvirt.""" return - def unfilter_instance(self, instance_ref): + def unfilter_instance(self, instance_ref, network_info=None): """This method is supported only by libvirt.""" raise NotImplementedError('This method is supported only by libvirt.') @@ -514,3 +514,7 @@ class FakeConnection(driver.ComputeDriver): def get_host_stats(self, refresh=False): """Return fake Host Status of ram, disk, network.""" return self.host_status + + def set_host_enabled(self, host, enabled): + """Sets the specified host's ability to accept new instances.""" + pass diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py index 772e7eb59..81c7dea58 100644 --- a/nova/virt/hyperv.py +++ b/nova/virt/hyperv.py @@ -139,7 +139,7 @@ class HyperVConnection(driver.ComputeDriver): return instance_infos - def spawn(self, instance, network_info=None, block_device_mapping=None): + def spawn(self, instance, network_info, block_device_mapping=None): """ Create a new VM and start it.""" vm = self._lookup(instance.name) if vm is not None: @@ -157,7 +157,12 @@ class HyperVConnection(driver.ComputeDriver): self._create_vm(instance) self._create_disk(instance['name'], vhdfile) - self._create_nic(instance['name'], instance['mac_address']) + + mac_address = None + if instance['mac_addresses']: + mac_address = instance['mac_addresses'][0]['address'] + + self._create_nic(instance['name'], mac_address) LOG.debug(_('Starting VM %s '), instance.name) self._set_vm_state(instance['name'], 'Enabled') @@ -363,14 +368,14 @@ class HyperVConnection(driver.ComputeDriver): wmi_obj.Properties_.Item(prop).Value return newinst - def reboot(self, instance): + def reboot(self, instance, network_info): """Reboot the specified instance.""" vm = self._lookup(instance.name) if vm is None: raise exception.InstanceNotFound(instance_id=instance.id) self._set_vm_state(instance.name, 'Reboot') - def destroy(self, instance): + def destroy(self, instance, network_info): """Destroy the VM. Also destroy the associated VHD disk files""" LOG.debug(_("Got request to destroy vm %s"), instance.name) vm = self._lookup(instance.name) @@ -494,3 +499,7 @@ class HyperVConnection(driver.ComputeDriver): def get_host_stats(self, refresh=False): """See xenapi_conn.py implementation.""" pass + + def set_host_enabled(self, host, enabled): + """Sets the specified host's ability to accept new instances.""" + pass diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template index e1a683da8..a75636390 100644 --- a/nova/virt/libvirt.xml.template +++ b/nova/virt/libvirt.xml.template @@ -82,9 +82,13 @@ </disk> #end if #for $vol in $volumes - <disk type='block'> + <disk type='${vol.type}'> <driver type='raw'/> + #if $vol.type == 'network' + <source protocol='${vol.protocol}' name='${vol.name}'/> + #else <source dev='${vol.device_path}'/> + #end if <target dev='${vol.mount_device}' bus='${disk_bus}'/> </disk> #end for @@ -92,6 +96,22 @@ #end if #for $nic in $nics + #if $vif_type == 'ethernet' + <interface type='ethernet'> + <target dev='${nic.name}' /> + <mac address='${nic.mac_address}' /> + <script path='${nic.script}' /> + </interface> + #else if $vif_type == '802.1Qbh' + <interface type='direct'> + <mac address='${nic.mac_address}'/> + <source dev='${nic.device_name}' mode='private'/> + <virtualport type='802.1Qbh'> + <parameters profileid='${nic.profile_name}'/> + </virtualport> + <model type='virtio'/> + </interface> + #else <interface type='bridge'> <source bridge='${nic.bridge_name}'/> <mac address='${nic.mac_address}'/> @@ -107,6 +127,8 @@ #end if </filterref> </interface> + #end if + #end for <!-- The order is significant here. File must be defined first --> <serial type="file"> diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index fadf77629..96f9c41f9 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -123,6 +123,11 @@ flags.DEFINE_string('qemu_img', 'qemu-img', 'binary to use for qemu-img commands') flags.DEFINE_bool('start_guests_on_host_boot', False, 'Whether to restart guests when the host reboots') +flags.DEFINE_string('libvirt_vif_type', 'bridge', + 'Type of VIF to create.') +flags.DEFINE_string('libvirt_vif_driver', + 'nova.virt.libvirt.vif.LibvirtBridgeDriver', + 'The libvirt VIF driver to configure the VIFs.') def get_connection(read_only): @@ -165,6 +170,7 @@ class LibvirtConnection(driver.ComputeDriver): fw_class = utils.import_class(FLAGS.firewall_driver) self.firewall_driver = fw_class(get_connection=self._get_connection) + self.vif_driver = utils.import_object(FLAGS.libvirt_vif_driver) def init_host(self, host): # Adopt existing VM's running here @@ -256,7 +262,12 @@ class LibvirtConnection(driver.ComputeDriver): infos.append(info) return infos - def destroy(self, instance, cleanup=True): + def plug_vifs(self, instance, network_info): + """Plugin VIFs into networks.""" + for (network, mapping) in network_info: + self.vif_driver.plug(instance, network, mapping) + + def destroy(self, instance, network_info, cleanup=True): instance_name = instance['name'] try: @@ -300,6 +311,9 @@ class LibvirtConnection(driver.ComputeDriver): locals()) raise + for (network, mapping) in network_info: + self.vif_driver.unplug(instance, network, mapping) + def _wait_for_destroy(): """Called at an interval until the VM is gone.""" instance_name = instance['name'] @@ -314,7 +328,8 @@ class LibvirtConnection(driver.ComputeDriver): timer = utils.LoopingCall(_wait_for_destroy) timer.start(interval=0.5, now=True) - self.firewall_driver.unfilter_instance(instance) + self.firewall_driver.unfilter_instance(instance, + network_info=network_info) if cleanup: self._cleanup(instance) @@ -331,25 +346,24 @@ class LibvirtConnection(driver.ComputeDriver): if os.path.exists(target): shutil.rmtree(target) - @exception.wrap_exception + @exception.wrap_exception() def attach_volume(self, instance_name, device_path, mountpoint): virt_dom = self._lookup_by_name(instance_name) mount_device = mountpoint.rpartition("/")[2] - if device_path.startswith('/dev/'): + (type, protocol, name) = \ + self._get_volume_device_info(vol['device_path']) + if type == 'block': xml = """<disk type='block'> <driver name='qemu' type='raw'/> <source dev='%s'/> <target dev='%s' bus='virtio'/> </disk>""" % (device_path, mount_device) - elif ':' in device_path: - (protocol, name) = device_path.split(':') + elif type == 'network': xml = """<disk type='network'> <driver name='qemu' type='raw'/> <source protocol='%s' name='%s'/> <target dev='%s' bus='virtio'/> - </disk>""" % (protocol, - name, - mount_device) + </disk>""" % (protocol, name, mount_device) else: raise exception.InvalidDevicePath(path=device_path) @@ -375,7 +389,7 @@ class LibvirtConnection(driver.ComputeDriver): if doc is not None: doc.freeDoc() - @exception.wrap_exception + @exception.wrap_exception() def detach_volume(self, instance_name, mountpoint): virt_dom = self._lookup_by_name(instance_name) mount_device = mountpoint.rpartition("/")[2] @@ -384,7 +398,7 @@ class LibvirtConnection(driver.ComputeDriver): raise exception.DiskNotFound(location=mount_device) virt_dom.detachDevice(xml) - @exception.wrap_exception + @exception.wrap_exception() def snapshot(self, instance, image_href): """Create snapshot from a running VM instance. @@ -460,8 +474,8 @@ class LibvirtConnection(driver.ComputeDriver): # Clean up shutil.rmtree(temp_dir) - @exception.wrap_exception - def reboot(self, instance): + @exception.wrap_exception() + def reboot(self, instance, network_info): """Reboot a virtual machine, given an instance reference. This method actually destroys and re-creates the domain to ensure the @@ -476,7 +490,8 @@ class LibvirtConnection(driver.ComputeDriver): # NOTE(itoumsn): self.shutdown() and wait instead of self.destroy() is # better because we cannot ensure flushing dirty buffers # in the guest OS. But, in case of KVM, shutdown() does not work... - self.destroy(instance, False) + self.destroy(instance, network_info, cleanup=False) + self.plug_vifs(instance, network_info) self.firewall_driver.setup_basic_filtering(instance) self.firewall_driver.prepare_instance_filter(instance) self._create_new_domain(xml) @@ -501,32 +516,32 @@ class LibvirtConnection(driver.ComputeDriver): timer = utils.LoopingCall(_wait_for_reboot) return timer.start(interval=0.5, now=True) - @exception.wrap_exception + @exception.wrap_exception() def pause(self, instance, callback): """Pause VM instance""" dom = self._lookup_by_name(instance.name) dom.suspend() - @exception.wrap_exception + @exception.wrap_exception() def unpause(self, instance, callback): """Unpause paused VM instance""" dom = self._lookup_by_name(instance.name) dom.resume() - @exception.wrap_exception + @exception.wrap_exception() def suspend(self, instance, callback): """Suspend the specified instance""" dom = self._lookup_by_name(instance.name) dom.managedSave(0) - @exception.wrap_exception + @exception.wrap_exception() def resume(self, instance, callback): """resume the specified instance""" dom = self._lookup_by_name(instance.name) dom.create() - @exception.wrap_exception - def rescue(self, instance): + @exception.wrap_exception() + def rescue(self, instance, callback, network_info): """Loads a VM using rescue images. A rescue is normally performed when something goes wrong with the @@ -535,7 +550,7 @@ class LibvirtConnection(driver.ComputeDriver): data recovery. """ - self.destroy(instance, False) + self.destroy(instance, network_info, cleanup=False) xml = self.to_xml(instance, rescue=True) rescue_images = {'image_id': FLAGS.rescue_image_id, @@ -563,24 +578,24 @@ class LibvirtConnection(driver.ComputeDriver): timer = utils.LoopingCall(_wait_for_rescue) return timer.start(interval=0.5, now=True) - @exception.wrap_exception - def unrescue(self, instance): + @exception.wrap_exception() + def unrescue(self, instance, network_info): """Reboot the VM which is being rescued back into primary images. Because reboot destroys and re-creates instances, unresue should simply call reboot. """ - self.reboot(instance) + self.reboot(instance, network_info) - @exception.wrap_exception + @exception.wrap_exception() def poll_rescued_instances(self, timeout): pass # NOTE(ilyaalekseyev): Implementation like in multinics # for xenapi(tr3buchet) - @exception.wrap_exception - def spawn(self, instance, network_info=None, block_device_mapping=None): + @exception.wrap_exception() + def spawn(self, instance, network_info, block_device_mapping=None): xml = self.to_xml(instance, False, network_info=network_info, block_device_mapping=block_device_mapping) block_device_mapping = block_device_mapping or [] @@ -642,7 +657,7 @@ class LibvirtConnection(driver.ComputeDriver): LOG.info(_('Contents of file %(fpath)s: %(contents)r') % locals()) return contents - @exception.wrap_exception + @exception.wrap_exception() def get_console_output(self, instance): console_log = os.path.join(FLAGS.instances_path, instance['name'], 'console.log') @@ -663,7 +678,7 @@ class LibvirtConnection(driver.ComputeDriver): return self._dump_file(fpath) - @exception.wrap_exception + @exception.wrap_exception() def get_ajax_console(self, instance): def get_open_port(): start_port, end_port = FLAGS.ajaxterm_portrange.split("-") @@ -704,7 +719,7 @@ class LibvirtConnection(driver.ComputeDriver): def get_host_ip_addr(self): return FLAGS.my_ip - @exception.wrap_exception + @exception.wrap_exception() def get_vnc_console(self, instance): def get_vnc_port_for_instance(instance_name): virt_dom = self._lookup_by_name(instance_name) @@ -771,8 +786,6 @@ class LibvirtConnection(driver.ComputeDriver): def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None, network_info=None, block_device_mapping=None): block_device_mapping = block_device_mapping or [] - if not network_info: - network_info = netutils.get_network_info(inst) if not suffix: suffix = '' @@ -881,18 +894,23 @@ class LibvirtConnection(driver.ComputeDriver): have_injected_networks = True address = mapping['ips'][0]['ip'] + netmask = mapping['ips'][0]['netmask'] address_v6 = None + gateway_v6 = None + netmask_v6 = None if FLAGS.use_ipv6: address_v6 = mapping['ip6s'][0]['ip'] + netmask_v6 = mapping['ip6s'][0]['netmask'] + gateway_v6 = mapping['gateway6'] net_info = {'name': 'eth%d' % ifc_num, 'address': address, - 'netmask': network_ref['netmask'], - 'gateway': network_ref['gateway'], - 'broadcast': network_ref['broadcast'], - 'dns': network_ref['dns'], + 'netmask': netmask, + 'gateway': mapping['gateway'], + 'broadcast': mapping['broadcast'], + 'dns': mapping['dns'], 'address_v6': address_v6, - 'gateway_v6': network_ref['gateway_v6'], - 'netmask_v6': network_ref['netmask_v6']} + 'gateway6': gateway_v6, + 'netmask_v6': netmask_v6} nets.append(net_info) if have_injected_networks: @@ -926,40 +944,6 @@ class LibvirtConnection(driver.ComputeDriver): if FLAGS.libvirt_type == 'uml': utils.execute('sudo', 'chown', 'root', basepath('disk')) - def _get_nic_for_xml(self, network, mapping): - # Assume that the gateway also acts as the dhcp server. - dhcp_server = network['gateway'] - gateway_v6 = network['gateway_v6'] - mac_id = mapping['mac'].replace(':', '') - - if FLAGS.allow_project_net_traffic: - template = "<parameter name=\"%s\"value=\"%s\" />\n" - net, mask = netutils.get_net_and_mask(network['cidr']) - values = [("PROJNET", net), ("PROJMASK", mask)] - if FLAGS.use_ipv6: - net_v6, prefixlen_v6 = netutils.get_net_and_prefixlen( - network['cidr_v6']) - values.extend([("PROJNETV6", net_v6), - ("PROJMASKV6", prefixlen_v6)]) - - extra_params = "".join([template % value for value in values]) - else: - extra_params = "\n" - - result = { - 'id': mac_id, - 'bridge_name': network['bridge'], - 'mac_address': mapping['mac'], - 'ip_address': mapping['ips'][0]['ip'], - 'dhcp_server': dhcp_server, - 'extra_params': extra_params, - } - - if gateway_v6: - result['gateway_v6'] = gateway_v6 + "/128" - - return result - root_mount_device = 'vda' # FIXME for now. it's hard coded. local_mount_device = 'vdb' # FIXME for now. it's hard coded. @@ -971,6 +955,16 @@ class LibvirtConnection(driver.ComputeDriver): return True return False + @exception.wrap_exception + def _get_volume_device_info(self, device_path): + if device_path.startswith('/dev/'): + return ('block', None, None) + elif ':' in device_path: + (protocol, name) = device_path.split(':') + return ('network', protocol, name) + else: + raise exception.InvalidDevicePath(path=device_path) + def _prepare_xml_info(self, instance, rescue=False, network_info=None, block_device_mapping=None): block_device_mapping = block_device_mapping or [] @@ -981,7 +975,7 @@ class LibvirtConnection(driver.ComputeDriver): nics = [] for (network, mapping) in network_info: - nics.append(self._get_nic_for_xml(network, mapping)) + nics.append(self.vif_driver.plug(instance, network, mapping)) # FIXME(vish): stick this in db inst_type_id = instance['instance_type_id'] inst_type = instance_types.get_instance_type(inst_type_id) @@ -993,6 +987,9 @@ class LibvirtConnection(driver.ComputeDriver): for vol in block_device_mapping: vol['mount_device'] = _strip_dev(vol['mount_device']) + (vol['type'], vol['protocol'], vol['name']) = \ + self._get_volume_device_info(vol['device_path']) + ebs_root = self._volume_in_mapping(self.root_mount_device, block_device_mapping) if self._volume_in_mapping(self.local_mount_device, @@ -1010,14 +1007,14 @@ class LibvirtConnection(driver.ComputeDriver): 'rescue': rescue, 'local': local_gb, 'driver_type': driver_type, + 'vif_type': FLAGS.libvirt_vif_type, 'nics': nics, 'ebs_root': ebs_root, 'volumes': block_device_mapping} - if FLAGS.vnc_enabled: - if FLAGS.libvirt_type != 'lxc': - xml_info['vncserver_host'] = FLAGS.vncserver_host - xml_info['vnc_keymap'] = FLAGS.vnc_keymap + if FLAGS.vnc_enabled and FLAGS.libvirt_type not in ('lxc', 'uml'): + xml_info['vncserver_host'] = FLAGS.vncserver_host + xml_info['vnc_keymap'] = FLAGS.vnc_keymap if not rescue: if instance['kernel_id']: xml_info['kernel'] = xml_info['basepath'] + "/kernel" @@ -1580,9 +1577,10 @@ class LibvirtConnection(driver.ComputeDriver): timer.f = wait_for_live_migration timer.start(interval=0.5, now=True) - def unfilter_instance(self, instance_ref): + def unfilter_instance(self, instance_ref, network_info): """See comments of same method in firewall_driver.""" - self.firewall_driver.unfilter_instance(instance_ref) + self.firewall_driver.unfilter_instance(instance_ref, + network_info=network_info) def update_host_status(self): """See xenapi_conn.py implementation.""" @@ -1591,3 +1589,7 @@ class LibvirtConnection(driver.ComputeDriver): def get_host_stats(self, refresh=False): """See xenapi_conn.py implementation.""" pass + + def set_host_enabled(self, host, enabled): + """Sets the specified host's ability to accept new instances.""" + pass diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py index b99f2ffb0..9ce57b6c9 100644 --- a/nova/virt/libvirt/firewall.py +++ b/nova/virt/libvirt/firewall.py @@ -46,7 +46,7 @@ class FirewallDriver(object): At this point, the instance isn't running yet.""" raise NotImplementedError() - def unfilter_instance(self, instance): + def unfilter_instance(self, instance, network_info=None): """Stop filtering instance""" raise NotImplementedError() @@ -300,9 +300,10 @@ class NWFilterFirewall(FirewallDriver): # execute in a native thread and block current greenthread until done tpool.execute(self._conn.nwfilterDefineXML, xml) - def unfilter_instance(self, instance): + def unfilter_instance(self, instance, network_info=None): """Clear out the nwfilter rules.""" - network_info = netutils.get_network_info(instance) + if not network_info: + network_info = netutils.get_network_info(instance) instance_name = instance.name for (network, mapping) in network_info: nic_id = mapping['mac'].replace(':', '') @@ -542,11 +543,11 @@ class IptablesFirewallDriver(FirewallDriver): """No-op. Everything is done in prepare_instance_filter""" pass - def unfilter_instance(self, instance): + def unfilter_instance(self, instance, network_info=None): if self.instances.pop(instance['id'], None): self.remove_filters_for_instance(instance) self.iptables.apply() - self.nwfilter.unfilter_instance(instance) + self.nwfilter.unfilter_instance(instance, network_info) else: LOG.info(_('Attempted to unfilter instance %s which is not ' 'filtered'), instance['id']) @@ -620,7 +621,7 @@ class IptablesFirewallDriver(FirewallDriver): ipv4_rules += ['-j $provider'] ipv6_rules += ['-j $provider'] - dhcp_servers = [network['gateway'] for (network, _m) in network_info] + dhcp_servers = [info['gateway'] for (_n, info) in network_info] for dhcp_server in dhcp_servers: ipv4_rules.append('-s %s -p udp --sport 67 --dport 68 ' @@ -637,7 +638,7 @@ class IptablesFirewallDriver(FirewallDriver): # they're not worth the clutter. if FLAGS.use_ipv6: # Allow RA responses - gateways_v6 = [network['gateway_v6'] for (network, _m) in + gateways_v6 = [mapping['gateway6'] for (_n, mapping) in network_info] for gateway_v6 in gateways_v6: ipv6_rules.append( @@ -645,8 +646,8 @@ class IptablesFirewallDriver(FirewallDriver): #Allow project network traffic if FLAGS.allow_project_net_traffic: - cidrv6s = [network['cidr_v6'] for (network, _m) - in network_info] + cidrv6s = [network['cidr_v6'] for (network, _m) in + network_info] for cidrv6 in cidrv6s: ipv6_rules.append('-s %s -j ACCEPT' % (cidrv6,)) diff --git a/nova/virt/libvirt/netutils.py b/nova/virt/libvirt/netutils.py index 0bad84f7c..041eacb2d 100644 --- a/nova/virt/libvirt/netutils.py +++ b/nova/virt/libvirt/netutils.py @@ -49,31 +49,36 @@ def get_ip_version(cidr): def get_network_info(instance): + # TODO(tr3buchet): this function needs to go away! network info + # MUST be passed down from compute # TODO(adiantum) If we will keep this function # we should cache network_info admin_context = context.get_admin_context() - ip_addresses = db.fixed_ip_get_all_by_instance(admin_context, - instance['id']) + fixed_ips = db.fixed_ip_get_by_instance(admin_context, instance['id']) + vifs = db.virtual_interface_get_by_instance(admin_context, instance['id']) networks = db.network_get_all_by_instance(admin_context, instance['id']) - flavor = db.instance_type_get_by_id(admin_context, + flavor = db.instance_type_get(admin_context, instance['instance_type_id']) network_info = [] - for network in networks: - network_ips = [ip for ip in ip_addresses - if ip['network_id'] == network['id']] + for vif in vifs: + network = vif['network'] + + # determine which of the instance's IPs belong to this network + network_ips = [fixed_ip['address'] for fixed_ip in fixed_ips if + fixed_ip['network_id'] == network['id']] def ip_dict(ip): return { - 'ip': ip['address'], + 'ip': ip, 'netmask': network['netmask'], 'enabled': '1'} def ip6_dict(): prefix = network['cidr_v6'] - mac = instance['mac_address'] + mac = vif['address'] project_id = instance['project_id'] return { 'ip': ipv6.to_global(prefix, mac, project_id), @@ -84,11 +89,16 @@ def get_network_info(instance): 'label': network['label'], 'gateway': network['gateway'], 'broadcast': network['broadcast'], - 'mac': instance['mac_address'], + 'mac': vif['address'], 'rxtx_cap': flavor['rxtx_cap'], - 'dns': [network['dns']], + 'dns': [], 'ips': [ip_dict(ip) for ip in network_ips]} + if network['dns1']: + mapping['dns'].append(network['dns1']) + if network['dns2']: + mapping['dns'].append(network['dns2']) + if FLAGS.use_ipv6: mapping['ip6s'] = [ip6_dict()] mapping['gateway6'] = network['gateway_v6'] diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py new file mode 100644 index 000000000..24d45d1a7 --- /dev/null +++ b/nova/virt/libvirt/vif.py @@ -0,0 +1,134 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (C) 2011 Midokura KK +# Copyright (C) 2011 Nicira, Inc +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""VIF drivers for libvirt.""" + +from nova import flags +from nova import log as logging +from nova.network import linux_net +from nova.virt.libvirt import netutils +from nova import utils +from nova.virt.vif import VIFDriver + +LOG = logging.getLogger('nova.virt.libvirt.vif') + +FLAGS = flags.FLAGS + +flags.DEFINE_string('libvirt_ovs_bridge', 'br-int', + 'Name of Integration Bridge used by Open vSwitch') + + +class LibvirtBridgeDriver(VIFDriver): + """VIF driver for Linux bridge.""" + + def _get_configurations(self, network, mapping): + """Get a dictionary of VIF configurations for bridge type.""" + # Assume that the gateway also acts as the dhcp server. + gateway6 = mapping.get('gateway6') + mac_id = mapping['mac'].replace(':', '') + + if FLAGS.allow_project_net_traffic: + template = "<parameter name=\"%s\"value=\"%s\" />\n" + net, mask = netutils.get_net_and_mask(network['cidr']) + values = [("PROJNET", net), ("PROJMASK", mask)] + if FLAGS.use_ipv6: + net_v6, prefixlen_v6 = netutils.get_net_and_prefixlen( + network['cidr_v6']) + values.extend([("PROJNETV6", net_v6), + ("PROJMASKV6", prefixlen_v6)]) + + extra_params = "".join([template % value for value in values]) + else: + extra_params = "\n" + + result = { + 'id': mac_id, + 'bridge_name': network['bridge'], + 'mac_address': mapping['mac'], + 'ip_address': mapping['ips'][0]['ip'], + 'dhcp_server': mapping['dhcp_server'], + 'extra_params': extra_params, + } + + if gateway6: + result['gateway6'] = gateway6 + "/128" + + return result + + def plug(self, instance, network, mapping): + """Ensure that the bridge exists, and add VIF to it.""" + if (not network.get('multi_host') and + mapping.get('should_create_bridge')): + if mapping.get('should_create_vlan'): + LOG.debug(_('Ensuring vlan %(vlan)s and bridge %(bridge)s'), + {'vlan': network['vlan'], + 'bridge': network['bridge']}) + linux_net.ensure_vlan_bridge(network['vlan'], + network['bridge'], + network['bridge_interface']) + else: + LOG.debug(_("Ensuring bridge %s"), network['bridge']) + linux_net.ensure_bridge(network['bridge'], + network['bridge_interface']) + + return self._get_configurations(network, mapping) + + def unplug(self, instance, network, mapping): + """No manual unplugging required.""" + pass + + +class LibvirtOpenVswitchDriver(VIFDriver): + """VIF driver for Open vSwitch.""" + + def plug(self, instance, network, mapping): + vif_id = str(instance['id']) + "-" + str(network['id']) + dev = "tap-%s" % vif_id + iface_id = "nova-" + vif_id + if not linux_net._device_exists(dev): + utils.execute('sudo', 'ip', 'tuntap', 'add', dev, 'mode', 'tap') + utils.execute('sudo', 'ip', 'link', 'set', dev, 'up') + utils.execute('sudo', 'ovs-vsctl', '--', '--may-exist', 'add-port', + FLAGS.libvirt_ovs_bridge, dev, + '--', 'set', 'Interface', dev, + "external-ids:iface-id=%s" % iface_id, + '--', 'set', 'Interface', dev, + "external-ids:iface-status=active", + '--', 'set', 'Interface', dev, + "external-ids:attached-mac=%s" % mapping['mac']) + + result = { + 'script': '', + 'name': dev, + 'mac_address': mapping['mac']} + return result + + def unplug(self, instance, network, mapping): + """Unplug the VIF from the network by deleting the port from + the bridge.""" + vif_id = str(instance['id']) + "-" + str(network['id']) + dev = "tap-%s" % vif_id + try: + utils.execute('sudo', 'ovs-vsctl', 'del-port', + FLAGS.flat_network_bridge, dev) + utils.execute('sudo', 'ip', 'link', 'delete', dev) + except: + LOG.warning(_("Failed while unplugging vif of instance '%s'"), + instance['name']) + raise diff --git a/nova/virt/vif.py b/nova/virt/vif.py new file mode 100644 index 000000000..b78689957 --- /dev/null +++ b/nova/virt/vif.py @@ -0,0 +1,30 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (C) 2011 Midokura KK +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""VIF module common to all virt layers.""" + + +class VIFDriver(object): + """Abstract class that defines generic interfaces for all VIF drivers.""" + + def plug(self, instance, network, mapping): + """Plug VIF into network.""" + raise NotImplementedError() + + def unplug(self, instance, network, mapping): + """Unplug VIF from network.""" + raise NotImplementedError() diff --git a/nova/virt/vmwareapi/network_utils.py b/nova/virt/vmwareapi/network_utils.py index e77842535..08e3bf0b1 100644 --- a/nova/virt/vmwareapi/network_utils.py +++ b/nova/virt/vmwareapi/network_utils.py @@ -45,10 +45,30 @@ def get_network_with_the_name(session, network_name="vmnet0"): networks = session._call_method(vim_util,
"get_properties_for_a_collection_of_objects",
"Network", vm_networks, ["summary.name"])
- for network in networks:
- if network.propSet[0].val == network_name:
- return network.obj
- return None
+ network_obj = {}
+ for network in vm_networks:
+ # Get network properties
+ if network._type == 'DistributedVirtualPortgroup':
+ props = session._call_method(vim_util,
+ "get_dynamic_property", network,
+ "DistributedVirtualPortgroup", "config")
+ # NOTE(asomya): This only works on ESXi if the port binding is
+ # set to ephemeral
+ if props.name == network_name:
+ network_obj['type'] = 'DistributedVirtualPortgroup'
+ network_obj['dvpg'] = props.key
+ network_obj['dvsw'] = props.distributedVirtualSwitch.value
+ else:
+ props = session._call_method(vim_util,
+ "get_dynamic_property", network,
+ "Network", "summary.name")
+ if props == network_name:
+ network_obj['type'] = 'Network'
+ network_obj['name'] = network_name
+ if (len(network_obj) > 0):
+ return network_obj
+ else:
+ return None
def get_vswitch_for_vlan_interface(session, vlan_interface):
diff --git a/nova/virt/vmwareapi/vif.py b/nova/virt/vmwareapi/vif.py new file mode 100644 index 000000000..b3e43b209 --- /dev/null +++ b/nova/virt/vmwareapi/vif.py @@ -0,0 +1,95 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Citrix Systems, Inc. +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""VIF drivers for VMWare.""" + +from nova import db +from nova import exception +from nova import flags +from nova import log as logging +from nova import utils +from nova.virt.vif import VIFDriver +from nova.virt.vmwareapi_conn import VMWareAPISession +from nova.virt.vmwareapi import network_utils + + +LOG = logging.getLogger("nova.virt.vmwareapi.vif") + +FLAGS = flags.FLAGS + + +class VMWareVlanBridgeDriver(VIFDriver): + """VIF Driver to setup bridge/VLAN networking using VMWare API.""" + + def plug(self, instance, network, mapping): + """Create a vlan and bridge unless they already exist.""" + vlan_num = network['vlan'] + bridge = network['bridge'] + bridge_interface = network['bridge_interface'] + + # Open vmwareapi session + host_ip = FLAGS.vmwareapi_host_ip + host_username = FLAGS.vmwareapi_host_username + host_password = FLAGS.vmwareapi_host_password + if not host_ip or host_username is None or host_password is None: + raise Exception(_('Must specify vmwareapi_host_ip, ' + 'vmwareapi_host_username ' + 'and vmwareapi_host_password to use ' + 'connection_type=vmwareapi')) + session = VMWareAPISession(host_ip, host_username, host_password, + FLAGS.vmwareapi_api_retry_count) + vlan_interface = bridge_interface + # Check if the vlan_interface physical network adapter exists on the + # host. + if not network_utils.check_if_vlan_interface_exists(session, + vlan_interface): + raise exception.NetworkAdapterNotFound(adapter=vlan_interface) + + # Get the vSwitch associated with the Physical Adapter + vswitch_associated = network_utils.get_vswitch_for_vlan_interface( + session, vlan_interface) + if vswitch_associated is None: + raise exception.SwicthNotFoundForNetworkAdapter( + adapter=vlan_interface) + # Check whether bridge already exists and retrieve the the ref of the + # network whose name_label is "bridge" + network_ref = network_utils.get_network_with_the_name(session, bridge) + if network_ref is None: + # Create a port group on the vSwitch associated with the + # vlan_interface corresponding physical network adapter on the ESX + # host. + network_utils.create_port_group(session, bridge, + vswitch_associated, vlan_num) + else: + # Get the vlan id and vswitch corresponding to the port group + pg_vlanid, pg_vswitch = \ + network_utils.get_vlanid_and_vswitch_for_portgroup(session, + bridge) + + # Check if the vswitch associated is proper + if pg_vswitch != vswitch_associated: + raise exception.InvalidVLANPortGroup( + bridge=bridge, expected=vswitch_associated, + actual=pg_vswitch) + + # Check if the vlan id is proper for the port group + if pg_vlanid != vlan_num: + raise exception.InvalidVLANTag(bridge=bridge, tag=vlan_num, + pgroup=pg_vlanid) + + def unplug(self, instance, network, mapping): + pass diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py index a2fa7600c..55578dd3c 100644 --- a/nova/virt/vmwareapi/vm_util.py +++ b/nova/virt/vmwareapi/vm_util.py @@ -40,7 +40,7 @@ def split_datastore_path(datastore_path): def get_vm_create_spec(client_factory, instance, data_store_name,
network_name="vmnet0",
- os_type="otherGuest"):
+ os_type="otherGuest", network_ref=None):
"""Builds the VM Create spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
config_spec.name = instance.name
@@ -61,8 +61,12 @@ def get_vm_create_spec(client_factory, instance, data_store_name, config_spec.numCPUs = int(instance.vcpus)
config_spec.memoryMB = int(instance.memory_mb)
+ mac_address = None
+ if instance['mac_addresses']:
+ mac_address = instance['mac_addresses'][0]['address']
+
nic_spec = create_network_spec(client_factory,
- network_name, instance.mac_address)
+ network_name, mac_address)
device_config_spec = [nic_spec]
@@ -89,7 +93,8 @@ def create_controller_spec(client_factory, key): return virtual_device_config
-def create_network_spec(client_factory, network_name, mac_address):
+def create_network_spec(client_factory, network_name, mac_address,
+ network_ref=None):
"""
Builds a config spec for the addition of a new network
adapter to the VM.
@@ -101,9 +106,24 @@ def create_network_spec(client_factory, network_name, mac_address): # Get the recommended card type for the VM based on the guest OS of the VM
net_device = client_factory.create('ns0:VirtualPCNet32')
- backing = \
- client_factory.create('ns0:VirtualEthernetCardNetworkBackingInfo')
- backing.deviceName = network_name
+ # NOTE(asomya): Only works on ESXi if the portgroup binding is set to
+ # ephemeral. Invalid configuration if set to static and the NIC does
+ # not come up on boot if set to dynamic.
+ backing = None
+ if (network_ref['type'] == "DistributedVirtualPortgroup"):
+ backing_name = \
+ 'ns0:VirtualEthernetCardDistributedVirtualPortBackingInfo'
+ backing = \
+ client_factory.create(backing_name)
+ portgroup = \
+ client_factory.create('ns0:DistributedVirtualSwitchPortConnection')
+ portgroup.switchUuid = network_ref['dvsw']
+ portgroup.portgroupKey = network_ref['dvpg']
+ backing.port = portgroup
+ else:
+ backing = \
+ client_factory.create('ns0:VirtualEthernetCardNetworkBackingInfo')
+ backing.deviceName = network_name
connectable_spec = \
client_factory.create('ns0:VirtualDeviceConnectInfo')
@@ -274,9 +294,11 @@ def get_dummy_vm_create_spec(client_factory, name, data_store_name): return config_spec
-def get_machine_id_change_spec(client_factory, mac, ip_addr, netmask, gateway):
+def get_machine_id_change_spec(client_factory, mac, ip_addr, netmask,
+ gateway, broadcast, dns):
"""Builds the machine id change config spec."""
- machine_id_str = "%s;%s;%s;%s" % (mac, ip_addr, netmask, gateway)
+ machine_id_str = "%s;%s;%s;%s;%s;%s" % (mac, ip_addr, netmask,
+ gateway, broadcast, dns)
virtual_machine_config_spec = \
client_factory.create('ns0:VirtualMachineConfigSpec')
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index 5f76b0df5..7e7d2dac3 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -31,6 +31,7 @@ from nova import db from nova import exception
from nova import flags
from nova import log as logging
+from nova import utils
from nova.compute import power_state
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
@@ -38,6 +39,10 @@ from nova.virt.vmwareapi import vmware_images from nova.virt.vmwareapi import network_utils
FLAGS = flags.FLAGS
+flags.DEFINE_string('vmware_vif_driver',
+ 'nova.virt.vmwareapi.vif.VMWareVlanBridgeDriver',
+ 'The VMWare VIF driver to configure the VIFs.')
+
LOG = logging.getLogger("nova.virt.vmwareapi.vmops")
VMWARE_POWER_STATES = {
@@ -52,6 +57,7 @@ class VMWareVMOps(object): def __init__(self, session):
"""Initializer."""
self._session = session
+ self._vif_driver = utils.import_object(FLAGS.vmware_vif_driver)
def _wait_with_callback(self, instance_id, task, callback):
"""Waits for the task to finish and does a callback after."""
@@ -83,7 +89,7 @@ class VMWareVMOps(object): LOG.debug(_("Got total of %s instances") % str(len(lst_vm_names)))
return lst_vm_names
- def spawn(self, instance):
+ def spawn(self, instance, network_info):
"""
Creates a VM instance.
@@ -116,8 +122,10 @@ class VMWareVMOps(object): net_name)
if network_ref is None:
raise exception.NetworkNotFoundForBridge(bridge=net_name)
+ return network_ref
- _check_if_network_bridge_exists()
+ self.plug_vifs(instance, network_info)
+ network_obj = _check_if_network_bridge_exists()
def _get_datastore_ref():
"""Get the datastore list and choose the first local storage."""
@@ -175,8 +183,10 @@ class VMWareVMOps(object): vm_folder_mor, res_pool_mor = _get_vmfolder_and_res_pool_mors()
# Get the create vm config spec
- config_spec = vm_util.get_vm_create_spec(client_factory, instance,
- data_store_name, net_name, os_type)
+ config_spec = vm_util.get_vm_create_spec(
+ client_factory, instance,
+ data_store_name, net_name, os_type,
+ network_obj)
def _execute_create_vm():
"""Create VM on ESX host."""
@@ -472,11 +482,14 @@ class VMWareVMOps(object): _clean_temp_data()
- def reboot(self, instance):
+ def reboot(self, instance, network_info):
"""Reboot a VM instance."""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance.id)
+
+ self.plug_vifs(instance, network_info)
+
lst_properties = ["summary.guest.toolsStatus", "runtime.powerState",
"summary.guest.toolsRunningStatus"]
props = self._session._call_method(vim_util, "get_object_properties",
@@ -514,7 +527,7 @@ class VMWareVMOps(object): self._session._wait_for_task(instance.id, reset_task)
LOG.debug(_("Did hard reboot of VM %s") % instance.name)
- def destroy(self, instance):
+ def destroy(self, instance, network_info):
"""
Destroy a VM instance. Steps followed are:
1. Power off the VM, if it is in poweredOn state.
@@ -560,6 +573,8 @@ class VMWareVMOps(object): LOG.warn(_("In vmwareapi:vmops:destroy, got this exception"
" while un-registering the VM: %s") % str(excep))
+ self._unplug_vifs(instance, network_info)
+
# Delete the folder holding the VM related content on
# the datastore.
try:
@@ -706,19 +721,29 @@ class VMWareVMOps(object): Set the machine id of the VM for guest tools to pick up and change
the IP.
"""
+ admin_context = context.get_admin_context()
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance.id)
network = db.network_get_by_instance(context.get_admin_context(),
instance['id'])
- mac_addr = instance.mac_address
+ mac_address = None
+ if instance['mac_addresses']:
+ mac_address = instance['mac_addresses'][0]['address']
+
net_mask = network["netmask"]
gateway = network["gateway"]
- ip_addr = db.instance_get_fixed_address(context.get_admin_context(),
- instance['id'])
+ broadcast = network["broadcast"]
+ dns = network["dns"]
+
+ addresses = db.instance_get_fixed_addresses(admin_context,
+ instance['id'])
+ ip_addr = addresses[0] if addresses else None
+
machine_id_chanfge_spec = \
- vm_util.get_machine_id_change_spec(client_factory, mac_addr,
- ip_addr, net_mask, gateway)
+ vm_util.get_machine_id_change_spec(client_factory, mac_address,
+ ip_addr, net_mask, gateway,
+ broadcast, dns)
LOG.debug(_("Reconfiguring VM instance %(name)s to set the machine id "
"with ip - %(ip_addr)s") %
({'name': instance.name,
@@ -778,3 +803,13 @@ class VMWareVMOps(object): if vm.propSet[0].val == vm_name:
return vm.obj
return None
+
+ def plug_vifs(self, instance, network_info):
+ """Plug VIFs into networks."""
+ for (network, mapping) in network_info:
+ self._vif_driver.plug(instance, network, mapping)
+
+ def _unplug_vifs(self, instance, network_info):
+ """Unplug VIFs from networks."""
+ for (network, mapping) in network_info:
+ self._vif_driver.unplug(instance, network, mapping)
diff --git a/nova/virt/vmwareapi_conn.py b/nova/virt/vmwareapi_conn.py index 3c6345ec8..ce57847b2 100644 --- a/nova/virt/vmwareapi_conn.py +++ b/nova/virt/vmwareapi_conn.py @@ -124,21 +124,21 @@ class VMWareESXConnection(driver.ComputeDriver): """List VM instances."""
return self._vmops.list_instances()
- def spawn(self, instance, network_info=None, block_device_mapping=None):
+ def spawn(self, instance, network_info, block_device_mapping=None):
"""Create VM instance."""
- self._vmops.spawn(instance)
+ self._vmops.spawn(instance, network_info)
def snapshot(self, instance, name):
"""Create snapshot from a running VM instance."""
self._vmops.snapshot(instance, name)
- def reboot(self, instance):
+ def reboot(self, instance, network_info):
"""Reboot VM instance."""
- self._vmops.reboot(instance)
+ self._vmops.reboot(instance, network_info)
- def destroy(self, instance):
+ def destroy(self, instance, network_info):
"""Destroy VM instance."""
- self._vmops.destroy(instance)
+ self._vmops.destroy(instance, network_info)
def pause(self, instance, callback):
"""Pause VM instance."""
@@ -190,6 +190,14 @@ class VMWareESXConnection(driver.ComputeDriver): """This method is supported only by libvirt."""
return
+ def set_host_enabled(self, host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ pass
+
+ def plug_vifs(self, instance, network_info):
+ """Plugs in VIFs to networks."""
+ self._vmops.plug_vifs(instance, network_info)
+
class VMWareAPISession(object):
"""
diff --git a/nova/virt/xenapi/vif.py b/nova/virt/xenapi/vif.py new file mode 100644 index 000000000..527602243 --- /dev/null +++ b/nova/virt/xenapi/vif.py @@ -0,0 +1,140 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Citrix Systems, Inc. +# Copyright 2011 OpenStack LLC. +# Copyright (C) 2011 Nicira, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""VIF drivers for XenAPI.""" + +from nova import flags +from nova import log as logging +from nova.virt.vif import VIFDriver +from nova.virt.xenapi.network_utils import NetworkHelper + +FLAGS = flags.FLAGS +flags.DEFINE_string('xenapi_ovs_integration_bridge', 'xapi1', + 'Name of Integration Bridge used by Open vSwitch') + +LOG = logging.getLogger("nova.virt.xenapi.vif") + + +class XenAPIBridgeDriver(VIFDriver): + """VIF Driver for XenAPI that uses XenAPI to create Networks.""" + + def plug(self, xenapi_session, vm_ref, instance, device, network, + network_mapping): + if network_mapping.get('should_create_vlan'): + network_ref = self.ensure_vlan_bridge(xenapi_session, network) + else: + network_ref = NetworkHelper.find_network_with_bridge( + xenapi_session, network['bridge']) + rxtx_cap = network_mapping.pop('rxtx_cap') + vif_rec = {} + vif_rec['device'] = str(device) + vif_rec['network'] = network_ref + vif_rec['VM'] = vm_ref + vif_rec['MAC'] = network_mapping['mac'] + vif_rec['MTU'] = '1500' + vif_rec['other_config'] = {} + vif_rec['qos_algorithm_type'] = "ratelimit" if rxtx_cap else '' + vif_rec['qos_algorithm_params'] = \ + {"kbps": str(rxtx_cap * 1024)} if rxtx_cap else {} + return vif_rec + + def ensure_vlan_bridge(self, xenapi_session, network): + """Ensure that a VLAN bridge exists""" + + vlan_num = network['vlan'] + bridge = network['bridge'] + bridge_interface = network['bridge_interface'] + # Check whether bridge already exists + # Retrieve network whose name_label is "bridge" + network_ref = NetworkHelper.find_network_with_name_label( + xenapi_session, bridge) + if network_ref is None: + # If bridge does not exists + # 1 - create network + description = 'network for nova bridge %s' % bridge + network_rec = {'name_label': bridge, + 'name_description': description, + 'other_config': {}} + network_ref = xenapi_session.call_xenapi('network.create', + network_rec) + # 2 - find PIF for VLAN NOTE(salvatore-orlando): using double + # quotes inside single quotes as xapi filter only support + # tokens in double quotes + expr = 'field "device" = "%s" and \ + field "VLAN" = "-1"' % bridge_interface + pifs = xenapi_session.call_xenapi('PIF.get_all_records_where', + expr) + pif_ref = None + # Multiple PIF are ok: we are dealing with a pool + if len(pifs) == 0: + raise Exception(_('Found no PIF for device %s') % \ + bridge_interface) + for pif_ref in pifs.keys(): + xenapi_session.call_xenapi('VLAN.create', + pif_ref, + str(vlan_num), + network_ref) + else: + # Check VLAN tag is appropriate + network_rec = xenapi_session.call_xenapi('network.get_record', + network_ref) + # Retrieve PIFs from network + for pif_ref in network_rec['PIFs']: + # Retrieve VLAN from PIF + pif_rec = xenapi_session.call_xenapi('PIF.get_record', + pif_ref) + pif_vlan = int(pif_rec['VLAN']) + # Raise an exception if VLAN != vlan_num + if pif_vlan != vlan_num: + raise Exception(_( + "PIF %(pif_rec['uuid'])s for network " + "%(bridge)s has VLAN id %(pif_vlan)d. " + "Expected %(vlan_num)d") % locals()) + + return network_ref + + def unplug(self, instance, network, mapping): + pass + + +class XenAPIOpenVswitchDriver(VIFDriver): + """VIF driver for Open vSwitch with XenAPI.""" + + def plug(self, xenapi_session, vm_ref, instance, device, network, + network_mapping): + # with OVS model, always plug into an OVS integration bridge + # that is already created + network_ref = NetworkHelper.find_network_with_bridge(xenapi_session, + FLAGS.xenapi_ovs_integration_bridge) + vif_rec = {} + vif_rec['device'] = str(device) + vif_rec['network'] = network_ref + vif_rec['VM'] = vm_ref + vif_rec['MAC'] = network_mapping['mac'] + vif_rec['MTU'] = '1500' + vif_id = "nova-" + str(instance['id']) + "-" + str(network['id']) + vif_rec['qos_algorithm_type'] = "" + vif_rec['qos_algorithm_params'] = {} + # OVS on the hypervisor monitors this key and uses it to + # set the iface-id attribute + vif_rec['other_config'] = {"nicira-iface-id": vif_id} + return vif_rec + + def unplug(self, instance, network, mapping): + pass diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index f91958c57..62863c6d8 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -23,6 +23,7 @@ import json import os import pickle import re +import sys import tempfile import time import urllib @@ -71,17 +72,51 @@ KERNEL_DIR = '/boot/guest' class ImageType: """ Enumeration class for distinguishing different image types - 0 - kernel/ramdisk image (goes on dom0's filesystem) - 1 - disk image (local SR, partitioned by objectstore plugin) - 2 - raw disk image (local SR, NOT partitioned by plugin) - 3 - vhd disk image (local SR, NOT inspected by XS, PV assumed for + 0 - kernel image (goes on dom0's filesystem) + 1 - ramdisk image (goes on dom0's filesystem) + 2 - disk image (local SR, partitioned by objectstore plugin) + 3 - raw disk image (local SR, NOT partitioned by plugin) + 4 - vhd disk image (local SR, NOT inspected by XS, PV assumed for linux, HVM assumed for Windows) """ - KERNEL_RAMDISK = 0 - DISK = 1 - DISK_RAW = 2 - DISK_VHD = 3 + KERNEL = 0 + RAMDISK = 1 + DISK = 2 + DISK_RAW = 3 + DISK_VHD = 4 + + KERNEL_STR = "kernel" + RAMDISK_STR = "ramdisk" + DISK_STR = "os" + DISK_RAW_STR = "os_raw" + DISK_VHD_STR = "vhd" + + @classmethod + def to_string(cls, image_type): + if image_type == ImageType.KERNEL: + return ImageType.KERNEL_STR + elif image_type == ImageType.RAMDISK: + return ImageType.RAMDISK_STR + elif image_type == ImageType.DISK: + return ImageType.DISK_STR + elif image_type == ImageType.DISK_RAW: + return ImageType.DISK_RAW_STR + elif image_type == ImageType.DISK_VHD: + return ImageType.VHD_STR + + @classmethod + def from_string(cls, image_type_str): + if image_type_str == ImageType.KERNEL_STR: + return ImageType.KERNEL + elif image_type == ImageType.RAMDISK_STR: + return ImageType.RAMDISK + elif image_type == ImageType.DISK_STR: + return ImageType.DISK + elif image_type == ImageType.DISK_RAW_STR: + return ImageType.DISK_RAW + elif image_type == ImageType.DISK_VHD_STR: + return ImageType.VHD class VMHelper(HelperBase): @@ -145,7 +180,6 @@ class VMHelper(HelperBase): 'VCPUs_max': vcpus, 'VCPUs_params': {}, 'xenstore_data': {}} - # Complete VM configuration record according to the image type # non-raw/raw with PV kernel/raw in HVM mode if use_pv_kernel: @@ -240,26 +274,13 @@ class VMHelper(HelperBase): raise StorageError(_('Unable to destroy VBD %s') % vbd_ref) @classmethod - def create_vif(cls, session, vm_ref, network_ref, mac_address, - dev, rxtx_cap=0): - """Create a VIF record. Returns a Deferred that gives the new - VIF reference.""" - vif_rec = {} - vif_rec['device'] = str(dev) - vif_rec['network'] = network_ref - vif_rec['VM'] = vm_ref - vif_rec['MAC'] = mac_address - vif_rec['MTU'] = '1500' - vif_rec['other_config'] = {} - vif_rec['qos_algorithm_type'] = "ratelimit" if rxtx_cap else '' - vif_rec['qos_algorithm_params'] = \ - {"kbps": str(rxtx_cap * 1024)} if rxtx_cap else {} - LOG.debug(_('Creating VIF for VM %(vm_ref)s,' - ' network %(network_ref)s.') % locals()) - vif_ref = session.call_xenapi('VIF.create', vif_rec) - LOG.debug(_('Created VIF %(vif_ref)s for VM %(vm_ref)s,' - ' network %(network_ref)s.') % locals()) - return vif_ref + def destroy_vdi(cls, session, vdi_ref): + try: + task = session.call_xenapi('Async.VDI.destroy', vdi_ref) + session.wait_for_task(task) + except cls.XenAPI.Failure, exc: + LOG.exception(exc) + raise StorageError(_('Unable to destroy VDI %s') % vdi_ref) @classmethod def create_vdi(cls, session, sr_ref, name_label, virtual_size, read_only): @@ -394,12 +415,12 @@ class VMHelper(HelperBase): """ LOG.debug(_("Asking xapi to fetch vhd image %(image)s") % locals()) - sr_ref = safe_find_sr(session) - # NOTE(sirp): The Glance plugin runs under Python 2.4 which does not - # have the `uuid` module. To work around this, we generate the uuids - # here (under Python 2.6+) and pass them as arguments + # NOTE(sirp): The Glance plugin runs under Python 2.4 + # which does not have the `uuid` module. To work around this, + # we generate the uuids here (under Python 2.6+) and + # pass them as arguments uuid_stack = [str(uuid.uuid4()) for i in xrange(2)] glance_host, glance_port = \ @@ -449,18 +470,20 @@ class VMHelper(HelperBase): # FIXME(sirp): Since the Glance plugin seems to be required for the # VHD disk, it may be worth using the plugin for both VHD and RAW and # DISK restores + LOG.debug(_("Fetching image %(image)s") % locals()) + LOG.debug(_("Image Type: %s"), ImageType.to_string(image_type)) sr_ref = safe_find_sr(session) glance_client, image_id = nova.image.get_glance_client(image) meta, image_file = glance_client.get_image(image_id) virtual_size = int(meta['size']) vdi_size = virtual_size - LOG.debug(_("Size for image %(image)s:%(virtual_size)d") % locals()) - + LOG.debug(_("Size for image %(image)s:" + + "%(virtual_size)d") % locals()) if image_type == ImageType.DISK: # Make room for MBR. vdi_size += MBR_SIZE_BYTES - elif image_type == ImageType.KERNEL_RAMDISK and \ + elif image_type in (ImageType.KERNEL, ImageType.RAMDISK) and \ vdi_size > FLAGS.max_kernel_ramdisk_size: max_size = FLAGS.max_kernel_ramdisk_size raise exception.Error( @@ -469,29 +492,45 @@ class VMHelper(HelperBase): name_label = get_name_label_for_image(image) vdi_ref = cls.create_vdi(session, sr_ref, name_label, vdi_size, False) - - with_vdi_attached_here(session, vdi_ref, False, - lambda dev: - _stream_disk(dev, image_type, - virtual_size, image_file)) - if image_type == ImageType.KERNEL_RAMDISK: - #we need to invoke a plugin for copying VDI's - #content into proper path - LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi_ref) - fn = "copy_kernel_vdi" - args = {} - args['vdi-ref'] = vdi_ref - #let the plugin copy the correct number of bytes - args['image-size'] = str(vdi_size) - task = session.async_call_plugin('glance', fn, args) - filename = session.wait_for_task(task, instance_id) - #remove the VDI as it is not needed anymore - session.get_xenapi().VDI.destroy(vdi_ref) - LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi_ref) - return filename - else: + # From this point we have a VDI on Xen host; + # If anything goes wrong, we need to remember its uuid. + try: + filename = None vdi_uuid = session.get_xenapi().VDI.get_uuid(vdi_ref) - return [dict(vdi_type='os', vdi_uuid=vdi_uuid)] + with_vdi_attached_here(session, vdi_ref, False, + lambda dev: + _stream_disk(dev, image_type, + virtual_size, image_file)) + if image_type in (ImageType.KERNEL, ImageType.RAMDISK): + # We need to invoke a plugin for copying the + # content of the VDI into the proper path. + LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi_ref) + fn = "copy_kernel_vdi" + args = {} + args['vdi-ref'] = vdi_ref + # Let the plugin copy the correct number of bytes. + args['image-size'] = str(vdi_size) + task = session.async_call_plugin('glance', fn, args) + filename = session.wait_for_task(task, instance_id) + # Remove the VDI as it is not needed anymore. + session.get_xenapi().VDI.destroy(vdi_ref) + LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi_ref) + return [dict(vdi_type=ImageType.to_string(image_type), + vdi_uuid=None, + file=filename)] + else: + return [dict(vdi_type=ImageType.to_string(image_type), + vdi_uuid=vdi_uuid, + file=None)] + except (cls.XenAPI.Failure, IOError, OSError) as e: + # We look for XenAPI and OS failures. + LOG.exception(_("instance %s: Failed to fetch glance image"), + instance_id, exc_info=sys.exc_info()) + e.args = e.args + ([dict(vdi_type=ImageType. + to_string(image_type), + vdi_uuid=vdi_uuid, + file=filename)],) + raise e @classmethod def determine_disk_image_type(cls, instance): @@ -506,7 +545,8 @@ class VMHelper(HelperBase): whether a kernel_id is specified. """ def log_disk_format(image_type): - pretty_format = {ImageType.KERNEL_RAMDISK: 'KERNEL_RAMDISK', + pretty_format = {ImageType.KERNEL: 'KERNEL', + ImageType.RAMDISK: 'RAMDISK', ImageType.DISK: 'DISK', ImageType.DISK_RAW: 'DISK_RAW', ImageType.DISK_VHD: 'DISK_VHD'} @@ -519,8 +559,8 @@ class VMHelper(HelperBase): def determine_from_glance(): glance_disk_format2nova_type = { 'ami': ImageType.DISK, - 'aki': ImageType.KERNEL_RAMDISK, - 'ari': ImageType.KERNEL_RAMDISK, + 'aki': ImageType.KERNEL, + 'ari': ImageType.RAMDISK, 'raw': ImageType.DISK_RAW, 'vhd': ImageType.DISK_VHD} image_ref = instance.image_ref @@ -553,7 +593,7 @@ class VMHelper(HelperBase): image_type): """Fetch image from glance based on image type. - Returns: A single filename if image_type is KERNEL_RAMDISK + Returns: A single filename if image_type is KERNEL or RAMDISK A list of dictionaries that describe VDIs, otherwise """ if image_type == ImageType.DISK_VHD: @@ -568,13 +608,13 @@ class VMHelper(HelperBase): secret, image_type): """Fetch an image from objectstore. - Returns: A single filename if image_type is KERNEL_RAMDISK + Returns: A single filename if image_type is KERNEL or RAMDISK A list of dictionaries that describe VDIs, otherwise """ url = "http://%s:%s/_images/%s/image" % (FLAGS.s3_host, FLAGS.s3_port, image) LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals()) - if image_type == ImageType.KERNEL_RAMDISK: + if image_type in (ImageType.KERNEL, ImageType.RAMDISK): fn = 'get_kernel' else: fn = 'get_vdi' @@ -584,15 +624,20 @@ class VMHelper(HelperBase): args['password'] = secret args['add_partition'] = 'false' args['raw'] = 'false' - if image_type != ImageType.KERNEL_RAMDISK: + if not image_type in (ImageType.KERNEL, ImageType.RAMDISK): args['add_partition'] = 'true' if image_type == ImageType.DISK_RAW: args['raw'] = 'true' task = session.async_call_plugin('objectstore', fn, args) - uuid_or_fn = session.wait_for_task(task, instance_id) - if image_type != ImageType.KERNEL_RAMDISK: - return [dict(vdi_type='os', vdi_uuid=uuid_or_fn)] - return uuid_or_fn + vdi_uuid = None + filename = None + if image_type in (ImageType.KERNEL, ImageType.RAMDISK): + filename = session.wait_for_task(task, instance_id) + else: + vdi_uuid = session.wait_for_task(task, instance_id) + return [dict(vdi_type=ImageType.to_string(image_type), + vdi_uuid=vdi_uuid, + file=filename)] @classmethod def determine_is_pv(cls, session, instance_id, vdi_ref, disk_image_type, diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 2f4286184..0473abb97 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -24,7 +24,10 @@ import json import M2Crypto import os import pickle +import random import subprocess +import sys +import time import uuid from nova import context @@ -44,7 +47,14 @@ from nova.virt.xenapi.vm_utils import ImageType XenAPI = None LOG = logging.getLogger("nova.virt.xenapi.vmops") + FLAGS = flags.FLAGS +flags.DEFINE_integer('windows_version_timeout', 300, + 'number of seconds to wait for windows agent to be ' + 'fully operational') +flags.DEFINE_string('xenapi_vif_driver', + 'nova.virt.xenapi.vif.XenAPIBridgeDriver', + 'The XenAPI VIF driver using XenServer Network APIs.') def cmp_version(a, b): @@ -71,6 +81,7 @@ class VMOps(object): self._session = session self.poll_rescue_last_ran = None VMHelper.XenAPI = self.XenAPI + self.vif_driver = utils.import_object(FLAGS.xenapi_vif_driver) def list_instances(self): """List VM instances.""" @@ -103,11 +114,12 @@ class VMOps(object): vm_ref = VMHelper.lookup(self._session, instance.name) self._start(instance, vm_ref) - def finish_resize(self, instance, disk_info): + def finish_resize(self, instance, disk_info, network_info): vdi_uuid = self.link_disks(instance, disk_info['base_copy'], disk_info['cow']) vm_ref = self._create_vm(instance, - [dict(vdi_type='os', vdi_uuid=vdi_uuid)]) + [dict(vdi_type='os', vdi_uuid=vdi_uuid)], + network_info) self.resize_instance(instance, vdi_uuid) self._spawn(instance, vm_ref) @@ -130,16 +142,25 @@ class VMOps(object): disk_image_type) return vdis - def spawn(self, instance, network_info=None): - vdis = self._create_disks(instance) - vm_ref = self._create_vm(instance, vdis, network_info) - self._spawn(instance, vm_ref) + def spawn(self, instance, network_info): + vdis = None + try: + vdis = self._create_disks(instance) + vm_ref = self._create_vm(instance, vdis, network_info) + self._spawn(instance, vm_ref) + except (self.XenAPI.Failure, OSError, IOError) as spawn_error: + LOG.exception(_("instance %s: Failed to spawn"), + instance.id, exc_info=sys.exc_info()) + LOG.debug(_('Instance %s failed to spawn - performing clean-up'), + instance.id) + self._handle_spawn_error(vdis, spawn_error) + raise spawn_error def spawn_rescue(self, instance): """Spawn a rescue instance.""" self.spawn(instance) - def _create_vm(self, instance, vdis, network_info=None): + def _create_vm(self, instance, vdis, network_info): """Create VM instance.""" instance_name = instance.name vm_ref = VMHelper.lookup(self._session, instance_name) @@ -159,42 +180,64 @@ class VMOps(object): project = AuthManager().get_project(instance.project_id) disk_image_type = VMHelper.determine_disk_image_type(instance) - kernel = None - if instance.kernel_id: - kernel = VMHelper.fetch_image(self._session, instance.id, - instance.kernel_id, user, project, - ImageType.KERNEL_RAMDISK) - ramdisk = None - if instance.ramdisk_id: - ramdisk = VMHelper.fetch_image(self._session, instance.id, - instance.ramdisk_id, user, project, - ImageType.KERNEL_RAMDISK) - - # Create the VM ref and attach the first disk - first_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', - vdis[0]['vdi_uuid']) - - vm_mode = instance.vm_mode and instance.vm_mode.lower() - if vm_mode == 'pv': - use_pv_kernel = True - elif vm_mode in ('hv', 'hvm'): - use_pv_kernel = False - vm_mode = 'hvm' # Normalize - else: - use_pv_kernel = VMHelper.determine_is_pv(self._session, - instance.id, first_vdi_ref, disk_image_type, - instance.os_type) - vm_mode = use_pv_kernel and 'pv' or 'hvm' - - if instance.vm_mode != vm_mode: - # Update database with normalized (or determined) value - db.instance_update(context.get_admin_context(), - instance['id'], {'vm_mode': vm_mode}) - - vm_ref = VMHelper.create_vm(self._session, instance, - kernel, ramdisk, use_pv_kernel) + try: + if instance.kernel_id: + kernel = VMHelper.fetch_image(self._session, instance.id, + instance.kernel_id, user, project, + ImageType.KERNEL)[0] + if instance.ramdisk_id: + ramdisk = VMHelper.fetch_image(self._session, instance.id, + instance.ramdisk_id, user, project, + ImageType.RAMDISK)[0] + # Create the VM ref and attach the first disk + first_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', + vdis[0]['vdi_uuid']) + + vm_mode = instance.vm_mode and instance.vm_mode.lower() + if vm_mode == 'pv': + use_pv_kernel = True + elif vm_mode in ('hv', 'hvm'): + use_pv_kernel = False + vm_mode = 'hvm' # Normalize + else: + use_pv_kernel = VMHelper.determine_is_pv(self._session, + instance.id, first_vdi_ref, disk_image_type, + instance.os_type) + vm_mode = use_pv_kernel and 'pv' or 'hvm' + + if instance.vm_mode != vm_mode: + # Update database with normalized (or determined) value + db.instance_update(context.get_admin_context(), + instance['id'], {'vm_mode': vm_mode}) + vm_ref = VMHelper.create_vm(self._session, instance, + kernel and kernel.get('file', None) or None, + ramdisk and ramdisk.get('file', None) or None, + use_pv_kernel) + except (self.XenAPI.Failure, OSError, IOError) as vm_create_error: + # Collect VDI/file resources to clean up; + # These resources will be removed by _handle_spawn_error. + LOG.exception(_("instance %s: Failed to spawn - " + + "Unable to create VM"), + instance.id, exc_info=sys.exc_info()) + last_arg = None + resources = [] + + if vm_create_error.args: + last_arg = vm_create_error.args[-1] + if isinstance(last_arg, list): + resources = last_arg + else: + vm_create_error.args = vm_create_error.args + (resources,) + + if kernel: + resources.append(kernel) + if ramdisk: + resources.append(ramdisk) + + raise vm_create_error + VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, vdi_ref=first_vdi_ref, userdevice=0, bootable=True) @@ -211,17 +254,12 @@ class VMOps(object): bootable=False) userdevice += 1 - # TODO(tr3buchet) - check to make sure we have network info, otherwise - # create it now. This goes away once nova-multi-nic hits. - if network_info is None: - network_info = self._get_network_info(instance) - # Alter the image before VM start for, e.g. network injection if FLAGS.xenapi_inject_image: VMHelper.preconfigure_instance(self._session, instance, first_vdi_ref, network_info) - self.create_vifs(vm_ref, network_info) + self.create_vifs(vm_ref, instance, network_info) self.inject_network_info(instance, network_info, vm_ref) return vm_ref @@ -247,7 +285,15 @@ class VMOps(object): 'architecture': instance.architecture}) def _check_agent_version(): - version = self.get_agent_version(instance) + if instance.os_type == 'windows': + # Windows will generally perform a setup process on first boot + # that can take a couple of minutes and then reboot. So we + # need to be more patient than normal as well as watch for + # domid changes + version = self.get_agent_version(instance, + timeout=FLAGS.windows_version_timeout) + else: + version = self.get_agent_version(instance) if not version: LOG.info(_('No agent version returned by instance')) return @@ -298,6 +344,7 @@ class VMOps(object): _check_agent_version() _inject_files() _set_admin_password() + self.reset_network(instance, vm_ref) return True except Exception, exc: LOG.warn(exc) @@ -307,11 +354,49 @@ class VMOps(object): timer.f = _wait_for_boot - # call to reset network to configure network from xenstore - self.reset_network(instance, vm_ref) - return timer.start(interval=0.5, now=True) + def _handle_spawn_error(self, vdis, spawn_error): + # Extract resource list from spawn_error. + resources = [] + if spawn_error.args: + last_arg = spawn_error.args[-1] + resources = last_arg + if vdis: + for vdi in vdis: + resources.append(dict(vdi_type=vdi['vdi_type'], + vdi_uuid=vdi['vdi_uuid'], + file=None)) + + LOG.debug(_("Resources to remove:%s"), resources) + kernel_file = None + ramdisk_file = None + + for item in resources: + vdi_type = item['vdi_type'] + vdi_to_remove = item['vdi_uuid'] + if vdi_to_remove: + try: + vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', + vdi_to_remove) + LOG.debug(_('Removing VDI %(vdi_ref)s' + + '(uuid:%(vdi_to_remove)s)'), locals()) + VMHelper.destroy_vdi(self._session, vdi_ref) + except self.XenAPI.Failure: + # Vdi has already been deleted + LOG.debug(_("Skipping VDI destroy for %s"), vdi_to_remove) + if item['file']: + # There is also a file to remove. + if vdi_type == ImageType.KERNEL_STR: + kernel_file = item['file'] + elif vdi_type == ImageType.RAMDISK_STR: + ramdisk_file = item['file'] + + if kernel_file or ramdisk_file: + LOG.debug(_("Removing kernel/ramdisk files from dom0")) + self._destroy_kernel_ramdisk_plugin_call(kernel_file, + ramdisk_file) + def _get_vm_opaque_ref(self, instance_or_vm): """ Refactored out the common code of many methods that receive either @@ -386,7 +471,7 @@ class VMOps(object): self._session, instance, template_vdi_uuids, image_id) finally: if template_vm_ref: - self._destroy(instance, template_vm_ref, + self._destroy(instance, template_vm_ref, None, shutdown=False, destroy_kernel_ramdisk=False) logging.debug(_("Finished snapshot and upload for VM %s"), instance) @@ -502,18 +587,43 @@ class VMOps(object): task = self._session.call_xenapi('Async.VM.clean_reboot', vm_ref) self._session.wait_for_task(task, instance.id) - def get_agent_version(self, instance): + def get_agent_version(self, instance, timeout=None): """Get the version of the agent running on the VM instance.""" - # Send the encrypted password - transaction_id = str(uuid.uuid4()) - args = {'id': transaction_id} - resp = self._make_agent_call('version', instance, '', args) - if resp is None: - # No response from the agent - return - resp_dict = json.loads(resp) - return resp_dict['message'] + def _call(): + # Send the encrypted password + transaction_id = str(uuid.uuid4()) + args = {'id': transaction_id} + resp = self._make_agent_call('version', instance, '', args) + if resp is None: + # No response from the agent + return + resp_dict = json.loads(resp) + # Some old versions of the Windows agent have a trailing \\r\\n + # (ie CRLF escaped) for some reason. Strip that off. + return resp_dict['message'].replace('\\r\\n', '') + + if timeout: + vm_ref = self._get_vm_opaque_ref(instance) + vm_rec = self._session.get_xenapi().VM.get_record(vm_ref) + + domid = vm_rec['domid'] + + expiration = time.time() + timeout + while time.time() < expiration: + ret = _call() + if ret: + return ret + + vm_rec = self._session.get_xenapi().VM.get_record(vm_ref) + if vm_rec['domid'] != domid: + LOG.info(_('domid changed from %(olddomid)s to ' + '%(newdomid)s') % { + 'olddomid': domid, + 'newdomid': vm_rec['domid']}) + domid = vm_rec['domid'] + else: + return _call() def agent_update(self, instance, url, md5sum): """Update agent on the VM instance.""" @@ -556,9 +666,13 @@ class VMOps(object): # There was some sort of error; the message will contain # a description of the error. raise RuntimeError(resp_dict['message']) - agent_pub = int(resp_dict['message']) + # Some old versions of the Windows agent have a trailing \\r\\n + # (ie CRLF escaped) for some reason. Strip that off. + agent_pub = int(resp_dict['message'].replace('\\r\\n', '')) dh.compute_shared(agent_pub) - enc_pass = dh.encrypt(new_pass) + # Some old versions of Linux and Windows agent expect trailing \n + # on password to work correctly. + enc_pass = dh.encrypt(new_pass + '\n') # Send the encrypted password password_transaction_id = str(uuid.uuid4()) password_args = {'id': password_transaction_id, 'enc_pass': enc_pass} @@ -666,6 +780,16 @@ class VMOps(object): VMHelper.unplug_vbd(self._session, vbd_ref) VMHelper.destroy_vbd(self._session, vbd_ref) + def _destroy_kernel_ramdisk_plugin_call(self, kernel, ramdisk): + args = {} + if kernel: + args['kernel-file'] = kernel + if ramdisk: + args['ramdisk-file'] = ramdisk + task = self._session.async_call_plugin( + 'glance', 'remove_kernel_ramdisk', args) + self._session.wait_for_task(task) + def _destroy_kernel_ramdisk(self, instance, vm_ref): """Three situations can occur: @@ -695,13 +819,7 @@ class VMOps(object): (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(self._session, vm_ref) - LOG.debug(_("Removing kernel/ramdisk files")) - - args = {'kernel-file': kernel, 'ramdisk-file': ramdisk} - task = self._session.async_call_plugin( - 'glance', 'remove_kernel_ramdisk', args) - self._session.wait_for_task(task, instance.id) - + self._destroy_kernel_ramdisk_plugin_call(kernel, ramdisk) LOG.debug(_("kernel/ramdisk files removed")) def _destroy_vm(self, instance, vm_ref): @@ -723,7 +841,7 @@ class VMOps(object): self._session.call_xenapi("Async.VM.destroy", rescue_vm_ref) - def destroy(self, instance): + def destroy(self, instance, network_info): """Destroy VM instance. This is the method exposed by xenapi_conn.destroy(). The rest of the @@ -733,9 +851,9 @@ class VMOps(object): instance_id = instance.id LOG.info(_("Destroying VM for Instance %(instance_id)s") % locals()) vm_ref = VMHelper.lookup(self._session, instance.name) - return self._destroy(instance, vm_ref, shutdown=True) + return self._destroy(instance, vm_ref, network_info, shutdown=True) - def _destroy(self, instance, vm_ref, shutdown=True, + def _destroy(self, instance, vm_ref, network_info, shutdown=True, destroy_kernel_ramdisk=True): """Destroys VM instance by performing: @@ -757,6 +875,10 @@ class VMOps(object): self._destroy_kernel_ramdisk(instance, vm_ref) self._destroy_vm(instance, vm_ref) + if network_info: + for (network, mapping) in network_info: + self.vif_driver.unplug(instance, network, mapping) + def _wait_with_callback(self, instance_id, task, callback): ret = None try: @@ -900,76 +1022,44 @@ class VMOps(object): # TODO: implement this! return 'http://fakeajaxconsole/fake_url' - # TODO(tr3buchet) - remove this function after nova multi-nic - def _get_network_info(self, instance): - """Creates network info list for instance.""" - admin_context = context.get_admin_context() - ips = db.fixed_ip_get_all_by_instance(admin_context, - instance['id']) - networks = db.network_get_all_by_instance(admin_context, - instance['id']) - - inst_type = db.instance_type_get_by_id(admin_context, - instance['instance_type_id']) - - network_info = [] - for network in networks: - network_ips = [ip for ip in ips if ip.network_id == network.id] - - def ip_dict(ip): - return { - "ip": ip.address, - "netmask": network["netmask"], - "enabled": "1"} - - def ip6_dict(): - return { - "ip": ipv6.to_global(network['cidr_v6'], - instance['mac_address'], - instance['project_id']), - "netmask": network['netmask_v6'], - "enabled": "1"} - - info = { - 'label': network['label'], - 'gateway': network['gateway'], - 'broadcast': network['broadcast'], - 'mac': instance.mac_address, - 'rxtx_cap': inst_type['rxtx_cap'], - 'dns': [network['dns']], - 'ips': [ip_dict(ip) for ip in network_ips]} - if network['cidr_v6']: - info['ip6s'] = [ip6_dict()] - if network['gateway_v6']: - info['gateway6'] = network['gateway_v6'] - network_info.append((network, info)) - return network_info - - #TODO{tr3buchet) remove this shim with nova-multi-nic - def inject_network_info(self, instance, network_info=None, vm_ref=None): - """ - shim in place which makes inject_network_info work without being - passed network_info. - shim goes away after nova-multi-nic + def set_host_enabled(self, host, enabled): + """Sets the specified host's ability to accept new instances.""" + args = {"enabled": json.dumps(enabled)} + json_resp = self._call_xenhost("set_host_enabled", args) + resp = json.loads(json_resp) + return resp["status"] + + def _call_xenhost(self, method, arg_dict): + """There will be several methods that will need this general + handling for interacting with the xenhost plugin, so this abstracts + out that behavior. """ - if not network_info: - network_info = self._get_network_info(instance) - self._inject_network_info(instance, network_info, vm_ref) + # Create a task ID as something that won't match any instance ID + task_id = random.randint(-80000, -70000) + try: + task = self._session.async_call_plugin("xenhost", method, + args=arg_dict) + #args={"params": arg_dict}) + ret = self._session.wait_for_task(task, task_id) + except self.XenAPI.Failure as e: + ret = None + LOG.error(_("The call to %(method)s returned an error: %(e)s.") + % locals()) + return ret - def _inject_network_info(self, instance, network_info, vm_ref=None): + def inject_network_info(self, instance, network_info, vm_ref=None): """ Generate the network info and make calls to place it into the xenstore and the xenstore param list. vm_ref can be passed in because it will sometimes be different than what VMHelper.lookup(session, instance.name) will find (ex: rescue) """ - logging.debug(_("injecting network info to xs for vm: |%s|"), vm_ref) - if vm_ref: # this function raises if vm_ref is not a vm_opaque_ref self._session.get_xenapi().VM.get_record(vm_ref) else: vm_ref = VMHelper.lookup(self._session, instance.name) + logging.debug(_("injecting network info to xs for vm: |%s|"), vm_ref) for (network, info) in network_info: location = 'vm-data/networking/%s' % info['mac'].replace(':', '') @@ -984,22 +1074,28 @@ class VMOps(object): # catch KeyError for domid if instance isn't running pass - def create_vifs(self, vm_ref, network_info): + def create_vifs(self, vm_ref, instance, network_info): """Creates vifs for an instance.""" + logging.debug(_("creating vif(s) for vm: |%s|"), vm_ref) # this function raises if vm_ref is not a vm_opaque_ref self._session.get_xenapi().VM.get_record(vm_ref) for device, (network, info) in enumerate(network_info): - mac_address = info['mac'] - bridge = network['bridge'] - rxtx_cap = info.pop('rxtx_cap') - network_ref = \ - NetworkHelper.find_network_with_bridge(self._session, - bridge) - VMHelper.create_vif(self._session, vm_ref, network_ref, - mac_address, device, rxtx_cap) + vif_rec = self.vif_driver.plug(self._session, + vm_ref, instance, device, network, info) + network_ref = vif_rec['network'] + LOG.debug(_('Creating VIF for VM %(vm_ref)s,' \ + ' network %(network_ref)s.') % locals()) + vif_ref = self._session.call_xenapi('VIF.create', vif_rec) + LOG.debug(_('Created VIF %(vif_ref)s for VM %(vm_ref)s,' + ' network %(network_ref)s.') % locals()) + + def plug_vifs(instance, network_info): + """Set up VIF networking on the host.""" + for (network, mapping) in network_info: + self.vif_driver.plug(self._session, instance, network, mapping) def reset_network(self, instance, vm_ref=None): """Creates uuid arg to pass to make_agent_call and calls it.""" diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 5fcec1715..7c355a55b 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -194,23 +194,23 @@ class XenAPIConnection(driver.ComputeDriver): def list_instances_detail(self): return self._vmops.list_instances_detail() - def spawn(self, instance, network_info=None, block_device_mapping=None): + def spawn(self, instance, network_info, block_device_mapping=None): """Create VM instance""" - self._vmops.spawn(instance) + self._vmops.spawn(instance, network_info) def revert_resize(self, instance): """Reverts a resize, powering back on the instance""" self._vmops.revert_resize(instance) - def finish_resize(self, instance, disk_info): + def finish_resize(self, instance, disk_info, network_info): """Completes a resize, turning on the migrated instance""" - self._vmops.finish_resize(instance, disk_info) + self._vmops.finish_resize(instance, disk_info, network_info) def snapshot(self, instance, image_id): """ Create snapshot from a running VM instance """ self._vmops.snapshot(instance, image_id) - def reboot(self, instance): + def reboot(self, instance, network_info): """Reboot VM instance""" self._vmops.reboot(instance) @@ -224,9 +224,9 @@ class XenAPIConnection(driver.ComputeDriver): """ self._vmops.inject_file(instance, b64_path, b64_contents) - def destroy(self, instance): + def destroy(self, instance, network_info): """Destroy VM instance""" - self._vmops.destroy(instance) + self._vmops.destroy(instance, network_info) def pause(self, instance, callback): """Pause VM instance""" @@ -249,11 +249,11 @@ class XenAPIConnection(driver.ComputeDriver): """resume the specified instance""" self._vmops.resume(instance, callback) - def rescue(self, instance, callback): + def rescue(self, instance, callback, network_info): """Rescue the specified instance""" self._vmops.rescue(instance, callback) - def unrescue(self, instance, callback): + def unrescue(self, instance, callback, network_info): """Unrescue the specified instance""" self._vmops.unrescue(instance, callback) @@ -265,9 +265,12 @@ class XenAPIConnection(driver.ComputeDriver): """reset networking for specified instance""" self._vmops.reset_network(instance) - def inject_network_info(self, instance): + def inject_network_info(self, instance, network_info): """inject network info for specified instance""" - self._vmops.inject_network_info(instance) + self._vmops.inject_network_info(instance, network_info) + + def plug_vifs(self, instance_ref, network_info): + self._vmops.plug_vifs(instance_ref, network_info) def get_info(self, instance_id): """Return data about VM instance""" @@ -322,7 +325,7 @@ class XenAPIConnection(driver.ComputeDriver): """This method is supported only by libvirt.""" return - def unfilter_instance(self, instance_ref): + def unfilter_instance(self, instance_ref, network_info): """This method is supported only by libvirt.""" raise NotImplementedError('This method is supported only by libvirt.') @@ -336,6 +339,10 @@ class XenAPIConnection(driver.ComputeDriver): True, run the update first.""" return self.HostState.get_host_stats(refresh=refresh) + def set_host_enabled(self, host, enabled): + """Sets the specified host's ability to accept new instances.""" + return self._vmops.set_host_enabled(host, enabled) + class XenAPISession(object): """The session to invoke XenAPI SDK calls""" diff --git a/nova/volume/api.py b/nova/volume/api.py index 7d27abff9..52b3a9fed 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -52,7 +52,7 @@ class API(base.Base): if quota.allowed_volumes(context, 1, size) < 1: pid = context.project_id - LOG.warn(_("Quota exceeeded for %(pid)s, tried to create" + LOG.warn(_("Quota exceeded for %(pid)s, tried to create" " %(size)sG volume") % locals()) raise quota.QuotaError(_("Volume quota exceeded. You cannot " "create a volume of size %sG") % size) @@ -140,9 +140,10 @@ class API(base.Base): {"method": "remove_volume", "args": {'volume_id': volume_id}}) - def create_snapshot(self, context, volume_id, name, description): + def _create_snapshot(self, context, volume_id, name, description, + force=False): volume = self.get(context, volume_id) - if volume['status'] != "available": + if ((not force) and (volume['status'] != "available")): raise exception.ApiError(_("Volume status must be available")) options = { @@ -164,6 +165,14 @@ class API(base.Base): "snapshot_id": snapshot['id']}}) return snapshot + def create_snapshot(self, context, volume_id, name, description): + return self._create_snapshot(context, volume_id, name, description, + False) + + def create_snapshot_force(self, context, volume_id, name, description): + return self._create_snapshot(context, volume_id, name, description, + True) + def delete_snapshot(self, context, snapshot_id): snapshot = self.get_snapshot(context, snapshot_id) if snapshot['status'] != "available": diff --git a/nova/wsgi.py b/nova/wsgi.py index 23d29079f..eae3afcb4 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -67,6 +67,7 @@ class Server(object): self.host = host or "0.0.0.0" self.port = port or 0 self._server = None + self._tcp_server = None self._socket = None self._pool = eventlet.GreenPool(pool_size or self.default_pool_size) self._logger = logging.getLogger("eventlet.wsgi.server") @@ -106,6 +107,17 @@ class Server(object): """ LOG.info(_("Stopping WSGI server.")) self._server.kill() + if self._tcp_server is not None: + LOG.info(_("Stopping raw TCP server.")) + self._tcp_server.kill() + + def start_tcp(self, listener, port, host='0.0.0.0', key=None, backlog=128): + """Run a raw TCP server with the given application.""" + arg0 = sys.argv[0] + LOG.info(_('Starting TCP server %(arg0)s on %(host)s:%(port)s') + % locals()) + socket = eventlet.listen((host, port), backlog=backlog) + self._tcp_server = self._pool.spawn_n(self._run_tcp, listener, socket) def wait(self): """Block, until the server has stopped. @@ -120,6 +132,15 @@ class Server(object): except greenlet.GreenletExit: LOG.info(_("WSGI server has stopped.")) + def _run_tcp(self, listener, socket): + """Start a raw TCP server in a new green thread.""" + while True: + try: + new_sock, address = socket.accept() + self._pool.spawn_n(listener, new_sock) + except (SystemExit, KeyboardInterrupt): + pass + class Request(webob.Request): pass diff --git a/plugins/xenserver/xenapi/contrib/build-rpm.sh b/plugins/xenserver/xenapi/contrib/build-rpm.sh new file mode 100755 index 000000000..f7bed4d84 --- /dev/null +++ b/plugins/xenserver/xenapi/contrib/build-rpm.sh @@ -0,0 +1,20 @@ +#!/bin/bash +PACKAGE=openstack-xen-plugins +RPMBUILD_DIR=$PWD/rpmbuild +if [ ! -d $RPMBUILD_DIR ]; then + echo $RPMBUILD_DIR is missing + exit 1 +fi + +for dir in BUILD BUILDROOT SRPMS RPMS SOURCES; do + rm -rf $RPMBUILD_DIR/$dir + mkdir -p $RPMBUILD_DIR/$dir +done + +rm -rf /tmp/$PACKAGE +mkdir /tmp/$PACKAGE +cp -r ../etc/xapi.d /tmp/$PACKAGE +tar czf $RPMBUILD_DIR/SOURCES/$PACKAGE.tar.gz -C /tmp $PACKAGE + +rpmbuild -ba --nodeps --define "_topdir $RPMBUILD_DIR" \ + $RPMBUILD_DIR/SPECS/$PACKAGE.spec diff --git a/plugins/xenserver/xenapi/contrib/rpmbuild/SPECS/openstack-xen-plugins.spec b/plugins/xenserver/xenapi/contrib/rpmbuild/SPECS/openstack-xen-plugins.spec new file mode 100644 index 000000000..cb2af2109 --- /dev/null +++ b/plugins/xenserver/xenapi/contrib/rpmbuild/SPECS/openstack-xen-plugins.spec @@ -0,0 +1,37 @@ +Name: openstack-xen-plugins +Version: 2011.3 +Release: 1 +Summary: Files for XenAPI support. +License: ASL 2.0 +Group: Applications/Utilities +Source0: openstack-xen-plugins.tar.gz +BuildArch: noarch +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) +Requires: parted + +%define debug_package %{nil} + +%description +This package contains files that are required for XenAPI support for OpenStack. + +%prep +%setup -q -n openstack-xen-plugins + +%install +rm -rf $RPM_BUILD_ROOT +mkdir -p $RPM_BUILD_ROOT/etc +cp -r xapi.d $RPM_BUILD_ROOT/etc +chmod a+x $RPM_BUILD_ROOT/etc/xapi.d/plugins/* + +%clean +rm -rf $RPM_BUILD_ROOT + +%files +%defattr(-,root,root,-) +/etc/xapi.d/plugins/agent +/etc/xapi.d/plugins/glance +/etc/xapi.d/plugins/migration +/etc/xapi.d/plugins/objectstore +/etc/xapi.d/plugins/pluginlib_nova.py +/etc/xapi.d/plugins/xenhost +/etc/xapi.d/plugins/xenstore.py diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent index b8a1b936a..d609a88ab 100755 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent @@ -37,7 +37,7 @@ import time import XenAPIPlugin from pluginlib_nova import * -configure_logging("xenstore") +configure_logging("agent") import xenstore AGENT_TIMEOUT = 30 @@ -72,7 +72,9 @@ def key_init(self, arg_dict): info to be passed, such as passwords. Returns the shared secret key value. """ - pub = int(arg_dict["pub"]) + # WARNING: Some older Windows agents will crash if the public key isn't + # a string + pub = arg_dict["pub"] arg_dict["value"] = json.dumps({"name": "keyinit", "value": pub}) request_id = arg_dict["id"] arg_dict["path"] = "data/host/%s" % request_id @@ -114,7 +116,6 @@ def resetnetwork(self, arg_dict): xenstore.write_record(self, arg_dict) -@jsonify def inject_file(self, arg_dict): """Expects a file path and the contents of the file to be written. Both should be base64-encoded in order to eliminate errors as they are passed @@ -127,20 +128,21 @@ def inject_file(self, arg_dict): been disabled, and raise a NotImplemented error if that is the case. """ b64_path = arg_dict["b64_path"] - b64_file = arg_dict["b64_file"] + b64_file = arg_dict["b64_contents"] request_id = arg_dict["id"] - if self._agent_has_method("file_inject"): + agent_features = _get_agent_features(self, arg_dict) + if "file_inject" in agent_features: # New version of the agent. Agent should receive a 'value' # key whose value is a dictionary containing 'b64_path' and # 'b64_file'. See old version below. arg_dict["value"] = json.dumps({"name": "file_inject", "value": {"b64_path": b64_path, "b64_file": b64_file}}) - elif self._agent_has_method("injectfile"): + elif "injectfile" in agent_features: # Old agent requires file path and file contents to be # combined into one base64 value. raw_path = base64.b64decode(b64_path) raw_file = base64.b64decode(b64_file) - new_b64 = base64.b64encode("%s,%s") % (raw_path, raw_file) + new_b64 = base64.b64encode("%s,%s" % (raw_path, raw_file)) arg_dict["value"] = json.dumps({"name": "injectfile", "value": new_b64}) else: @@ -174,30 +176,23 @@ def agent_update(self, arg_dict): return resp -def _agent_has_method(self, method): - """Check that the agent has a particular method by checking its - features. Cache the features so we don't have to query the agent - every time we need to check. - """ +def _get_agent_features(self, arg_dict): + """Return an array of features that an agent supports.""" + tmp_id = commands.getoutput("uuidgen") + dct = {} + dct.update(arg_dict) + dct["value"] = json.dumps({"name": "features", "value": ""}) + dct["path"] = "data/host/%s" % tmp_id + xenstore.write_record(self, dct) try: - self._agent_methods - except AttributeError: - self._agent_methods = [] - if not self._agent_methods: - # Haven't been defined - tmp_id = commands.getoutput("uuidgen") - dct = {} - dct["value"] = json.dumps({"name": "features", "value": ""}) - dct["path"] = "data/host/%s" % tmp_id - xenstore.write_record(self, dct) - try: - resp = _wait_for_agent(self, tmp_id, dct) - except TimeoutError, e: - raise PluginError(e) - response = json.loads(resp) - # The agent returns a comma-separated list of methods. - self._agent_methods = response.split(",") - return method in self._agent_methods + resp = _wait_for_agent(self, tmp_id, dct) + except TimeoutError, e: + raise PluginError(e) + response = json.loads(resp) + if response['returncode'] != 0: + return response["message"].split(",") + else: + return {} def _wait_for_agent(self, request_id, arg_dict): diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 46031ebe8..fbe080b22 100644..100755 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -412,8 +412,8 @@ def copy_kernel_vdi(session, args): def remove_kernel_ramdisk(session, args): """Removes kernel and/or ramdisk from dom0's file system""" - kernel_file = exists(args, 'kernel-file') - ramdisk_file = exists(args, 'ramdisk-file') + kernel_file = optional(args, 'kernel-file') + ramdisk_file = optional(args, 'ramdisk-file') if kernel_file: os.remove(kernel_file) if ramdisk_file: diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration index ac1c50ad9..ac1c50ad9 100644..100755 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/objectstore b/plugins/xenserver/xenapi/etc/xapi.d/plugins/objectstore index d0313b4ed..d0313b4ed 100644..100755 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/objectstore +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/objectstore diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py b/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py index f51f5fce4..f51f5fce4 100755..100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost index a8428e841..292bbce12 100644..100755 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost @@ -33,9 +33,10 @@ import tempfile import time import XenAPIPlugin +import pluginlib_nova as pluginlib -from pluginlib_nova import * -configure_logging("xenhost") + +pluginlib.configure_logging("xenhost") host_data_pattern = re.compile(r"\s*(\S+) \([^\)]+\) *: ?(.*)") @@ -65,14 +66,49 @@ def _run_command(cmd): return proc.stdout.read() +def _get_host_uuid(): + cmd = "xe host-list | grep uuid" + resp = _run_command(cmd) + return resp.split(":")[-1].strip() + + +@jsonify +def set_host_enabled(self, arg_dict): + """Sets this host's ability to accept new instances. + It will otherwise continue to operate normally. + """ + enabled = arg_dict.get("enabled") + if enabled is None: + raise pluginlib.PluginError( + _("Missing 'enabled' argument to set_host_enabled")) + if enabled == "true": + result = _run_command("xe host-enable") + elif enabled == "false": + result = _run_command("xe host-disable") + else: + raise pluginlib.PluginError(_("Illegal enabled status: %s") % enabled) + # Should be empty string + if result: + raise pluginlib.PluginError(result) + # Return the current enabled status + host_uuid = _get_host_uuid() + cmd = "xe host-param-list uuid=%s | grep enabled" % host_uuid + resp = _run_command(cmd) + # Response should be in the format: "enabled ( RO): true" + host_enabled = resp.strip().split()[-1] + if host_enabled == "true": + status = "enabled" + else: + status = "disabled" + return {"status": status} + + @jsonify def host_data(self, arg_dict): """Runs the commands on the xenstore host to return the current status information. """ - cmd = "xe host-list | grep uuid" - resp = _run_command(cmd) - host_uuid = resp.split(":")[-1].strip() + host_uuid = _get_host_uuid() cmd = "xe host-param-list uuid=%s" % host_uuid resp = _run_command(cmd) parsed_data = parse_response(resp) @@ -180,4 +216,5 @@ def cleanup(dct): if __name__ == "__main__": XenAPIPlugin.dispatch( - {"host_data": host_data}) + {"host_data": host_data, + "set_host_enabled": set_host_enabled}) @@ -14,8 +14,8 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-03-19 06:18+0000\n" -"X-Generator: Launchpad (build 12559)\n" +"X-Launchpad-Export-Date: 2011-07-23 05:11+0000\n" +"X-Generator: Launchpad (build 13405)\n" #: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 #: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 @@ -14,8 +14,8 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-03-19 06:18+0000\n" -"X-Generator: Launchpad (build 12559)\n" +"X-Launchpad-Export-Date: 2011-07-23 05:11+0000\n" +"X-Generator: Launchpad (build 13405)\n" #: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 #: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 @@ -14,8 +14,8 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-03-19 06:18+0000\n" -"X-Generator: Launchpad (build 12559)\n" +"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n" +"X-Generator: Launchpad (build 13405)\n" #: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 #: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 @@ -8,14 +8,14 @@ msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n" "POT-Creation-Date: 2011-02-21 10:03-0500\n" -"PO-Revision-Date: 2011-04-03 19:42+0000\n" -"Last-Translator: Matthias Loidolt <kedapperdrake@googlemail.com>\n" +"PO-Revision-Date: 2011-06-06 07:58+0000\n" +"Last-Translator: Christian Berendt <Unknown>\n" "Language-Team: German <de@li.org>\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-04-04 05:19+0000\n" -"X-Generator: Launchpad (build 12559)\n" +"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n" +"X-Generator: Launchpad (build 13405)\n" #: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 #: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 @@ -85,6 +85,7 @@ msgstr "" #, python-format msgid "%(param)s property not found for image %(_image_id)s" msgstr "" +"Die Property %(param)s konnte im Image %(_image_id)s nicht gefunden werden" #: ../nova/api/openstack/servers.py:168 msgid "No keypairs defined" @@ -141,12 +142,12 @@ msgstr "PID-Datei %s existiert nicht. Läuft der Daemon nicht?\n" #: ../nova/twistd.py:221 msgid "No such process" -msgstr "" +msgstr "Kein passender Prozess gefunden" #: ../nova/twistd.py:230 ../nova/service.py:224 #, python-format msgid "Serving %s" -msgstr "" +msgstr "Bedient %s" #: ../nova/twistd.py:262 ../nova/service.py:225 msgid "Full set of FLAGS:" @@ -183,12 +184,13 @@ msgstr "" #: ../nova/virt/xenapi/volumeops.py:91 #, python-format msgid "Unable to attach volume to instance %s" -msgstr "" +msgstr "Nicht möglich Volumen zur Instanze %s hinzuzufügen" #: ../nova/virt/xenapi/volumeops.py:93 #, python-format msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" msgstr "" +"Einhängepunkt%(mountpoint)s zur Instanze %(instance_name)s hinzugefügt" #. Detach VBD from VM #: ../nova/virt/xenapi/volumeops.py:104 @@ -199,7 +201,7 @@ msgstr "" #: ../nova/virt/xenapi/volumeops.py:112 #, python-format msgid "Unable to locate volume %s" -msgstr "" +msgstr "Nicht möglich volume %s zufinden" #: ../nova/virt/xenapi/volumeops.py:120 #, python-format @@ -214,7 +216,7 @@ msgstr "" #: ../nova/compute/instance_types.py:41 #, python-format msgid "Unknown instance type: %s" -msgstr "" +msgstr "Unbekannter Instanztyp: %s" #: ../nova/crypto.py:46 msgid "Filename of root CA" @@ -230,7 +232,7 @@ msgstr "Dateiname der Certificate Revocation List" #: ../nova/crypto.py:53 msgid "Where we keep our keys" -msgstr "" +msgstr "Wo wir unsere Schlüssel aufbewahren" #: ../nova/crypto.py:55 msgid "Where we keep our root CA" @@ -298,12 +300,12 @@ msgstr "" #: ../nova/compute/manager.py:179 msgid "Instance has already been created" -msgstr "" +msgstr "Instanz wurde bereits erstellt" #: ../nova/compute/manager.py:180 #, python-format msgid "instance %s: starting..." -msgstr "" +msgstr "Instanz %s startet..." #. pylint: disable=W0702 #: ../nova/compute/manager.py:219 @@ -314,7 +316,7 @@ msgstr "" #: ../nova/compute/manager.py:233 ../nova/tests/test_cloud.py:286 #, python-format msgid "Terminating instance %s" -msgstr "" +msgstr "Beende Instanz %s" #: ../nova/compute/manager.py:255 #, python-format @@ -377,7 +379,7 @@ msgstr "" #: ../nova/compute/manager.py:372 #, python-format msgid "instance %s: rescuing" -msgstr "" +msgstr "Instanz %s: Rettung" #: ../nova/compute/manager.py:387 #, python-format @@ -387,12 +389,12 @@ msgstr "" #: ../nova/compute/manager.py:406 #, python-format msgid "instance %s: pausing" -msgstr "" +msgstr "Instanz %s pausiert" #: ../nova/compute/manager.py:423 #, python-format msgid "instance %s: unpausing" -msgstr "" +msgstr "Instanz %s wird fortgesetzt" #: ../nova/compute/manager.py:440 #, python-format @@ -584,7 +586,7 @@ msgstr "" #: ../nova/virt/connection.py:73 msgid "Failed to open connection to the hypervisor" -msgstr "" +msgstr "Konnte Verbindung zum Hypervisor nicht öffnen" #: ../nova/network/linux_net.py:187 #, python-format @@ -637,7 +639,7 @@ msgstr "Klasse %s konnte nicht gefunden werden" #: ../nova/utils.py:118 #, python-format msgid "Fetching %s" -msgstr "" +msgstr "Hole %s" #: ../nova/utils.py:130 #, python-format @@ -2562,7 +2564,7 @@ msgstr "" #: ../nova/auth/manager.py:270 #, python-format msgid "Using project name = user name (%s)" -msgstr "" +msgstr "Verwende Project-Name = User-Name (%s)" #: ../nova/auth/manager.py:277 #, python-format @@ -2572,7 +2574,7 @@ msgstr "" #: ../nova/auth/manager.py:279 #, python-format msgid "No project called %s could be found" -msgstr "" +msgstr "Es konnte kein Projekt mit dem Namen %s gefunden werden" #: ../nova/auth/manager.py:287 #, python-format @@ -2696,6 +2698,7 @@ msgstr "" #: ../nova/service.py:195 msgid "The service database object disappeared, Recreating it." msgstr "" +"Das Service-Datenbank-Objekt ist verschwunden, es wird erneut erzeugt." #: ../nova/service.py:207 msgid "Recovered model server connection!" @@ -2723,7 +2726,7 @@ msgstr "" #: ../nova/auth/ldapdriver.py:472 #, python-format msgid "Group can't be created because group %s already exists" -msgstr "" +msgstr "Die Gruppe %s kann nicht angelegt werde, da sie bereits existiert" #: ../nova/auth/ldapdriver.py:478 #, python-format @@ -2739,6 +2742,7 @@ msgstr "" #, python-format msgid "User %s can't be added to the group because the user doesn't exist" msgstr "" +"Der User %s kann nicht zur Gruppe hinzugefügt werde, da er nicht existiert" #: ../nova/auth/ldapdriver.py:510 ../nova/auth/ldapdriver.py:521 #, python-format @@ -2755,6 +2759,7 @@ msgstr "" msgid "" "User %s can't be removed from the group because the user doesn't exist" msgstr "" +"Der User %s kann nicht aus der Gruppe entfernt werden, da er nicht existiert" #: ../nova/auth/ldapdriver.py:528 #, python-format @@ -2840,7 +2845,7 @@ msgstr "" #: ../nova/api/ec2/admin.py:200 #, python-format msgid "Delete project: %s" -msgstr "" +msgstr "Lösche Projekt %s" #: ../nova/api/ec2/admin.py:214 #, python-format diff --git a/po/en_AU.po b/po/en_AU.po new file mode 100644 index 000000000..e53f9fc07 --- /dev/null +++ b/po/en_AU.po @@ -0,0 +1,2848 @@ +# English (Australia) translation for nova +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the nova package. +# FIRST AUTHOR <EMAIL@ADDRESS>, 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: nova\n" +"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n" +"POT-Creation-Date: 2011-02-21 10:03-0500\n" +"PO-Revision-Date: 2011-06-30 10:30+0000\n" +"Last-Translator: Benny <Benjamin.Donald.Wilson.K@gmail.com>\n" +"Language-Team: English (Australia) <en_AU@li.org>\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n" +"X-Generator: Launchpad (build 13405)\n" + +#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 +#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 +#: ../nova/scheduler/simple.py:122 +msgid "No hosts found" +msgstr "No hosts found" + +#: ../nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "" + +#: ../nova/exception.py:36 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: ../nova/exception.py:107 +msgid "DB exception wrapped" +msgstr "" + +#. exc_type, exc_value, exc_traceback = sys.exc_info() +#: ../nova/exception.py:120 +msgid "Uncaught exception" +msgstr "" + +#: ../nova/volume/api.py:45 +#, python-format +msgid "Quota exceeeded for %(pid)s, tried to create %(size)sG volume" +msgstr "" + +#: ../nova/volume/api.py:47 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %sG" +msgstr "" + +#: ../nova/volume/api.py:71 ../nova/volume/api.py:96 +msgid "Volume status must be available" +msgstr "" + +#: ../nova/volume/api.py:98 +msgid "Volume is already attached" +msgstr "" + +#: ../nova/volume/api.py:104 +msgid "Volume is already detached" +msgstr "" + +#: ../nova/api/openstack/servers.py:72 +msgid "Failed to read private ip" +msgstr "" + +#: ../nova/api/openstack/servers.py:79 +msgid "Failed to read public ip(s)" +msgstr "" + +#: ../nova/api/openstack/servers.py:152 +#, python-format +msgid "%(param)s property not found for image %(_image_id)s" +msgstr "" + +#: ../nova/api/openstack/servers.py:168 +msgid "No keypairs defined" +msgstr "" + +#: ../nova/api/openstack/servers.py:238 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:253 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:267 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:281 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:292 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:303 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:314 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:325 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: ../nova/twistd.py:157 +msgid "Wrong number of arguments." +msgstr "" + +#: ../nova/twistd.py:209 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "" + +#: ../nova/twistd.py:221 +msgid "No such process" +msgstr "" + +#: ../nova/twistd.py:230 ../nova/service.py:224 +#, python-format +msgid "Serving %s" +msgstr "" + +#: ../nova/twistd.py:262 ../nova/service.py:225 +msgid "Full set of FLAGS:" +msgstr "" + +#: ../nova/twistd.py:266 +#, python-format +msgid "Starting %s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 +#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 +#: ../nova/api/ec2/__init__.py:317 +#, python-format +msgid "Instance %s not found" +msgstr "" + +#. NOTE: No Resource Pool concept so far +#: ../nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %(instance_name)s, %(device_path)s, %(mountpoint)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:80 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:91 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "" + +#. Detach VBD from VM +#: ../nova/virt/xenapi/volumeops.py:104 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:112 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:120 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:127 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "" + +#: ../nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "" + +#: ../nova/crypto.py:46 +msgid "Filename of root CA" +msgstr "" + +#: ../nova/crypto.py:49 +msgid "Filename of private key" +msgstr "" + +#: ../nova/crypto.py:51 +msgid "Filename of root Certificate Revokation List" +msgstr "" + +#: ../nova/crypto.py:53 +msgid "Where we keep our keys" +msgstr "" + +#: ../nova/crypto.py:55 +msgid "Where we keep our root CA" +msgstr "" + +#: ../nova/crypto.py:57 +msgid "Should we use a CA for each project?" +msgstr "" + +#: ../nova/crypto.py:61 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" + +#: ../nova/crypto.py:66 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "" + +#: ../nova/crypto.py:71 +#, python-format +msgid "Subject for certificate for vpns, %s for project, timestamp" +msgstr "" + +#: ../nova/crypto.py:258 +#, python-format +msgid "Flags path: %s" +msgstr "" + +#: ../nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %(topic)s %(host)s for %(method)s" +msgstr "" + +#: ../nova/compute/manager.py:78 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: ../nova/compute/manager.py:80 +#, python-format +msgid "" +"check_instance_lock: arguments: |%(self)s| |%(context)s| |%(instance_id)s|" +msgstr "" + +#: ../nova/compute/manager.py:84 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: ../nova/compute/manager.py:86 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: ../nova/compute/manager.py:91 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: ../nova/compute/manager.py:95 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: ../nova/compute/manager.py:179 +msgid "Instance has already been created" +msgstr "" + +#: ../nova/compute/manager.py:180 +#, python-format +msgid "instance %s: starting..." +msgstr "" + +#. pylint: disable=W0702 +#: ../nova/compute/manager.py:219 +#, python-format +msgid "instance %s: Failed to spawn" +msgstr "" + +#: ../nova/compute/manager.py:233 ../nova/tests/test_cloud.py:286 +#, python-format +msgid "Terminating instance %s" +msgstr "" + +#: ../nova/compute/manager.py:255 +#, python-format +msgid "Deallocating address %s" +msgstr "" + +#: ../nova/compute/manager.py:268 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "" + +#: ../nova/compute/manager.py:282 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: ../nova/compute/manager.py:287 +#, python-format +msgid "" +"trying to reboot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" +msgstr "" + +#: ../nova/compute/manager.py:311 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: ../nova/compute/manager.py:316 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" +msgstr "" + +#: ../nova/compute/manager.py:332 +#, python-format +msgid "" +"trying to reset the password on a non-running instance: %(instance_id)s " +"(state: %(instance_state)s expected: %(expected_state)s)" +msgstr "" + +#: ../nova/compute/manager.py:335 +#, python-format +msgid "instance %s: setting admin password" +msgstr "" + +#: ../nova/compute/manager.py:353 +#, python-format +msgid "" +"trying to inject a file into a non-running instance: %(instance_id)s (state: " +"%(instance_state)s expected: %(expected_state)s)" +msgstr "" + +#: ../nova/compute/manager.py:362 +#, python-format +msgid "instance %(nm)s: injecting file to %(plain_path)s" +msgstr "" + +#: ../nova/compute/manager.py:372 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: ../nova/compute/manager.py:387 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: ../nova/compute/manager.py:406 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: ../nova/compute/manager.py:423 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: ../nova/compute/manager.py:440 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: ../nova/compute/manager.py:453 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: ../nova/compute/manager.py:472 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: ../nova/compute/manager.py:491 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: ../nova/compute/manager.py:503 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: ../nova/compute/manager.py:513 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: ../nova/compute/manager.py:526 +#, python-format +msgid "instance %s: reset network" +msgstr "" + +#: ../nova/compute/manager.py:535 ../nova/api/ec2/cloud.py:515 +#, python-format +msgid "Get console output for instance %s" +msgstr "" + +#: ../nova/compute/manager.py:543 +#, python-format +msgid "instance %s: getting ajax console" +msgstr "" + +#: ../nova/compute/manager.py:553 +#, python-format +msgid "" +"instance %(instance_id)s: attaching volume %(volume_id)s to %(mountpoint)s" +msgstr "" + +#. pylint: disable=W0702 +#. NOTE(vish): The inline callback eats the exception info so we +#. log the traceback here and reraise the same +#. ecxception below. +#: ../nova/compute/manager.py:569 +#, python-format +msgid "instance %(instance_id)s: attach failed %(mountpoint)s, removing" +msgstr "" + +#: ../nova/compute/manager.py:585 +#, python-format +msgid "" +"Detach volume %(volume_id)s from mountpoint %(mp)s on instance " +"%(instance_id)s" +msgstr "" + +#: ../nova/compute/manager.py:588 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: ../nova/scheduler/simple.py:53 +#, python-format +msgid "Host %s is not alive" +msgstr "" + +#: ../nova/scheduler/simple.py:65 +msgid "All hosts have too many cores" +msgstr "" + +#: ../nova/scheduler/simple.py:87 +#, python-format +msgid "Host %s not available" +msgstr "" + +#: ../nova/scheduler/simple.py:99 +msgid "All hosts have too many gigabytes" +msgstr "" + +#: ../nova/scheduler/simple.py:119 +msgid "All hosts have too many networks" +msgstr "" + +#: ../nova/volume/manager.py:85 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: ../nova/volume/manager.py:90 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: ../nova/volume/manager.py:96 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: ../nova/volume/manager.py:108 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: ../nova/volume/manager.py:112 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: ../nova/volume/manager.py:123 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: ../nova/volume/manager.py:131 +msgid "Volume is still attached" +msgstr "" + +#: ../nova/volume/manager.py:133 +msgid "Volume is not local to this node" +msgstr "" + +#: ../nova/volume/manager.py:136 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: ../nova/volume/manager.py:138 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: ../nova/volume/manager.py:147 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:74 +#, python-format +msgid "%(text)s: _db_content => %(content)s" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:304 ../nova/virt/xenapi/fake.py:404 +#: ../nova/virt/xenapi/fake.py:422 ../nova/virt/xenapi/fake.py:478 +msgid "Raising NotImplemented" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:306 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:341 +#, python-format +msgid "Calling %(localname)s %(impl)s" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:346 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:406 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: ../nova/tests/test_cloud.py:256 +msgid "Can't test instances without a real virtual env." +msgstr "" + +#: ../nova/tests/test_cloud.py:268 +#, python-format +msgid "Need to watch instance %s until it's running..." +msgstr "" + +#: ../nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: ../nova/network/linux_net.py:187 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: ../nova/network/linux_net.py:208 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:314 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: ../nova/network/linux_net.py:316 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:358 +#, python-format +msgid "killing radvd threw %s" +msgstr "" + +#: ../nova/network/linux_net.py:360 +#, python-format +msgid "Pid %d is stale, relaunching radvd" +msgstr "" + +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:449 +#, python-format +msgid "Killing dnsmasq threw %s" +msgstr "" + +#: ../nova/utils.py:58 +#, python-format +msgid "Inner Exception: %s" +msgstr "" + +#: ../nova/utils.py:59 +#, python-format +msgid "Class %s cannot be found" +msgstr "" + +#: ../nova/utils.py:118 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: ../nova/utils.py:130 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: ../nova/utils.py:143 ../nova/utils.py:183 +#, python-format +msgid "Result was %s" +msgstr "" + +#: ../nova/utils.py:159 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: ../nova/utils.py:217 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: ../nova/utils.py:222 +#, python-format +msgid "Running %s" +msgstr "" + +#: ../nova/utils.py:262 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: ../nova/utils.py:265 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: ../nova/utils.py:363 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: ../nova/utils.py:374 +#, python-format +msgid "backend %s" +msgstr "" + +#: ../nova/fakerabbit.py:49 +#, python-format +msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +msgstr "" + +#: ../nova/fakerabbit.py:54 +#, python-format +msgid "Publishing to route %s" +msgstr "" + +#: ../nova/fakerabbit.py:84 +#, python-format +msgid "Declaring queue %s" +msgstr "" + +#: ../nova/fakerabbit.py:90 +#, python-format +msgid "Declaring exchange %s" +msgstr "" + +#: ../nova/fakerabbit.py:96 +#, python-format +msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +msgstr "" + +#: ../nova/fakerabbit.py:121 +#, python-format +msgid "Getting from %(queue)s: %(message)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:135 ../nova/virt/hyperv.py:171 +#, python-format +msgid "Created VM %s..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:138 +#, python-format +msgid "Created VM %(instance_name)s as %(vm_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:168 +#, python-format +msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:171 +#, python-format +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:197 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:209 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:224 +#, python-format +msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:227 +#, python-format +msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:246 +#, python-format +msgid "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) on " +"%(sr_ref)s." +msgstr "" + +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vm_utils.py:258 +#, python-format +msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:272 +#, python-format +msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:286 +#, python-format +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:327 +#, python-format +msgid "Size for image %(image)s:%(virtual_size)d" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:332 +#, python-format +msgid "Glance image %s" +msgstr "" + +#. we need to invoke a plugin for copying VDI's +#. content into proper path +#: ../nova/virt/xenapi/vm_utils.py:342 +#, python-format +msgid "Copying VDI %s to /boot/guest on dom0" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:352 +#, python-format +msgid "Kernel/Ramdisk VDI %s destroyed" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:361 +#, python-format +msgid "Asking xapi to fetch %(url)s as %(access)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:386 ../nova/virt/xenapi/vm_utils.py:402 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:397 +#, python-format +msgid "PV Kernel in VDI:%s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:405 +#, python-format +msgid "Running pygrub against %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:411 +#, python-format +msgid "Found Xen kernel %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:413 +msgid "No Xen kernel found. Booting HVM." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:425 ../nova/virt/hyperv.py:431 +#, python-format +msgid "duplicate name found: %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:442 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:463 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:465 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:525 +#, python-format +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:542 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:567 +#, python-format +msgid "" +"VHD coalesce attempts exceeded (%(counter)d > %(max_attempts)d), giving up..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:574 +#, python-format +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:590 +#, python-format +msgid "No VDIs found for VM %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:594 +#, python-format +msgid "Unexpected number of VDIs (%(num_vdis)s) found for VM %(vm_ref)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:653 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:188 +#, python-format +msgid "Creating VBD for VDI %s ... " +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:655 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:190 +#, python-format +msgid "Creating VBD for VDI %s done." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:657 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:192 +#, python-format +msgid "Plugging VBD %s ... " +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:659 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:194 +#, python-format +msgid "Plugging VBD %s done." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:661 +#, python-format +msgid "VBD %(vbd)s plugged as %(orig_dev)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:664 +#, python-format +msgid "VBD %(vbd)s plugged into wrong dev, remapping to %(dev)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:668 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:197 +#, python-format +msgid "Destroying VBD for VDI %s ... " +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:671 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:200 +#, python-format +msgid "Destroying VBD for VDI %s done." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:683 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:211 +msgid "VBD.unplug successful first time." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:688 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:216 +msgid "VBD.unplug rejected: retrying..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:692 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:220 +msgid "VBD.unplug successful eventually." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:695 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:223 +#, python-format +msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:704 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:66 +#, python-format +msgid "Ignoring XenAPI.Failure %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:735 +#, python-format +msgid "" +"Writing partition table %(primary_first)d %(primary_last)d to %(dest)s..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:747 +#, python-format +msgid "Writing partition table %s done." +msgstr "" + +#: ../nova/tests/test_rpc.py:89 +#, python-format +msgid "Nested received %(queue)s, %(value)s" +msgstr "" + +#: ../nova/tests/test_rpc.py:95 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: ../nova/tests/test_rpc.py:120 ../nova/tests/test_rpc.py:126 +#, python-format +msgid "Received %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:44 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:133 +#, python-format +msgid "No service for id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:251 +#, python-format +msgid "No service for %(host)s, %(binary)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:592 +msgid "No fixed ips defined" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:608 +#, python-format +msgid "No floating ip for address %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:629 +#, python-format +msgid "No address for instance %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:961 +#, python-format +msgid "no keypair for user %(user_id)s, name %(name)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1076 ../nova/db/sqlalchemy/api.py:1156 +#, python-format +msgid "No network for id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1086 +msgid "No networks defined" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1115 +#, python-format +msgid "No network for bridge %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1129 ../nova/db/sqlalchemy/api.py:1142 +#, python-format +msgid "No network for instance %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1277 +#, python-format +msgid "Token %s does not exist" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1302 +#, python-format +msgid "No quota for project_id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1455 ../nova/db/sqlalchemy/api.py:1501 +#: ../nova/api/ec2/__init__.py:323 +#, python-format +msgid "Volume %s not found" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1514 +#, python-format +msgid "No export device found for volume %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1527 +#, python-format +msgid "No target id found for volume %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1572 +#, python-format +msgid "No security group with id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1589 +#, python-format +msgid "No security group named %(group_name)s for project: %(project_id)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1682 +#, python-format +msgid "No secuity group rule with id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1756 +#, python-format +msgid "No user for id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1772 +#, python-format +msgid "No user for access key %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1834 +#, python-format +msgid "No project with id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1979 +#, python-format +msgid "No console pool with id %(pool_id)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1996 +#, python-format +msgid "" +"No console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:2035 +#, python-format +msgid "No console for instance %(instance_id)s in pool %(pool_id)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:2057 +#, python-format +msgid "on instance %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:2058 +#, python-format +msgid "No console with id %(console_id)s %(idesc)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:2078 ../nova/db/sqlalchemy/api.py:2097 +#, python-format +msgid "No zone with id %(zone_id)s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:160 +#, python-format +msgid "Checking state of %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:165 +#, python-format +msgid "Current state of %(name)s was %(state)s." +msgstr "" + +#: ../nova/virt/libvirt_conn.py:183 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:196 +msgid "Connection to libvirt broke" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:258 +#, python-format +msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:283 +#, python-format +msgid "Invalid device path %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:313 +#, python-format +msgid "No disk at %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:320 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:336 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:339 +#, python-format +msgid "_wait_for_reboot failed: %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:382 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:385 +#, python-format +msgid "_wait_for_rescue failed: %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:411 +#, python-format +msgid "instance %s: is running" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:422 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:425 ../nova/virt/xenapi/vmops.py:186 +#, python-format +msgid "instance %s: failed to boot" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:436 +#, python-format +msgid "virsh said: %r" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:440 +msgid "cool, it's a device" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:448 +#, python-format +msgid "data: %(data)r, fpath: %(fpath)r" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:456 +#, python-format +msgid "Contents of file %(fpath)s: %(contents)r" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:489 +msgid "Unable to find an open port" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:563 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:646 +#, python-format +msgid "instance %(inst_name)s: injecting key into image %(img_id)s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:649 +#, python-format +msgid "instance %(inst_name)s: injecting net into image %(img_id)s" +msgstr "" + +#. This could be a windows image, or a vmdk format disk +#: ../nova/virt/libvirt_conn.py:657 +#, python-format +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image %(img_id)s " +"(%(e)s)" +msgstr "" + +#. TODO(termie): cache? +#: ../nova/virt/libvirt_conn.py:665 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:732 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:751 +msgid "diagnostics are not supported for libvirt" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:1225 +#, python-format +msgid "Attempted to unfilter instance %s which is not filtered" +msgstr "" + +#: ../nova/api/ec2/metadatarequesthandler.py:76 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: ../nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: ../nova/network/api.py:39 +#, python-format +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "" + +#: ../nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" + +#: ../nova/tests/test_volume.py:162 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: ../nova/virt/images.py:70 +#, python-format +msgid "Finished retreving %(url)s -- placed in %(path)s" +msgstr "" + +#: ../nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "" + +#: ../nova/console/manager.py:70 +msgid "Adding console" +msgstr "" + +#: ../nova/console/manager.py:90 +#, python-format +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "" + +#: ../nova/api/direct.py:149 +msgid "not available" +msgstr "" + +#: ../nova/api/ec2/cloud.py:62 +#, python-format +msgid "The key_pair %s already exists" +msgstr "" + +#. TODO(vish): Do this with M2Crypto instead +#: ../nova/api/ec2/cloud.py:118 +#, python-format +msgid "Generating root CA: %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:303 +#, python-format +msgid "Create key pair %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:311 +#, python-format +msgid "Delete key pair %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:386 +#, python-format +msgid "%s is not a valid ipProtocol" +msgstr "" + +#: ../nova/api/ec2/cloud.py:390 +msgid "Invalid port range" +msgstr "" + +#: ../nova/api/ec2/cloud.py:421 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:430 ../nova/api/ec2/cloud.py:459 +msgid "Not enough parameters to build a valid rule." +msgstr "" + +#: ../nova/api/ec2/cloud.py:443 +msgid "No rule for the specified parameters." +msgstr "" + +#: ../nova/api/ec2/cloud.py:450 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:464 +#, python-format +msgid "This rule already exists in group %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:492 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:495 +#, python-format +msgid "group %s already exists" +msgstr "" + +#: ../nova/api/ec2/cloud.py:507 +#, python-format +msgid "Delete security group %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:584 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: ../nova/api/ec2/cloud.py:612 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:629 +#, python-format +msgid "Detach volume %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:761 +msgid "Allocate address" +msgstr "" + +#: ../nova/api/ec2/cloud.py:766 +#, python-format +msgid "Release address %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:771 +#, python-format +msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:780 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:807 +msgid "Going to start terminating instances" +msgstr "" + +#: ../nova/api/ec2/cloud.py:815 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: ../nova/api/ec2/cloud.py:867 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:875 +#, python-format +msgid "Registered image %(image_location)s with id %(image_id)s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:882 ../nova/api/ec2/cloud.py:900 +#, python-format +msgid "attribute not supported: %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:890 +#, python-format +msgid "invalid id: %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:903 +msgid "user or group not specified" +msgstr "" + +#: ../nova/api/ec2/cloud.py:905 +msgid "only group \"all\" is supported" +msgstr "" + +#: ../nova/api/ec2/cloud.py:907 +msgid "operation_type must be add or remove" +msgstr "" + +#: ../nova/api/ec2/cloud.py:908 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: ../bin/nova-api.py:52 +#, python-format +msgid "Using paste.deploy config at: %s" +msgstr "" + +#: ../bin/nova-api.py:57 +#, python-format +msgid "No paste configuration for app: %s" +msgstr "" + +#: ../bin/nova-api.py:59 +#, python-format +msgid "" +"App Config: %(api)s\n" +"%(config)r" +msgstr "" + +#: ../bin/nova-api.py:64 +#, python-format +msgid "Running %s API" +msgstr "" + +#: ../bin/nova-api.py:69 +#, python-format +msgid "No known API applications configured in %s." +msgstr "" + +#: ../bin/nova-api.py:83 +#, python-format +msgid "Starting nova-api node (version %s)" +msgstr "" + +#: ../bin/nova-api.py:89 +#, python-format +msgid "No paste configuration found for: %s" +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:84 +#, python-format +msgid "Argument %(key)s value %(value)s is too short." +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:89 +#, python-format +msgid "Argument %(key)s value %(value)s contains invalid characters." +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:94 +#, python-format +msgid "Argument %(key)s value %(value)s starts with a hyphen." +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:102 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:130 +#, python-format +msgid "Argument %s is required." +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:117 +#, python-format +msgid "" +"Argument %(key)s may not take value %(value)s. Valid values are ['true', " +"'false']." +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:163 +#, python-format +msgid "" +"Created VDI %(vdi_ref)s (%(label)s, %(size)s, %(read_only)s) on %(sr_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:67 +#, python-format +msgid "Attempted to create non-unique name %s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:73 +#, python-format +msgid "instance %(name)s: not enough free memory" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:148 +#, python-format +msgid "Starting VM %s..." +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:151 +#, python-format +msgid "Spawning VM %(instance_name)s created %(vm_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:162 +#, python-format +msgid "Invalid value for onset_files: '%s'" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:167 +#, python-format +msgid "Injecting file path: '%s'" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:180 +#, python-format +msgid "Instance %s: booted" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:232 +#, python-format +msgid "Instance not present %s" +msgstr "" + +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vmops.py:261 +#, python-format +msgid "Starting snapshot for VM %s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:269 +#, python-format +msgid "Unable to Snapshot %(vm_ref)s: %(exc)s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:280 +#, python-format +msgid "Finished snapshot and upload for VM %s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:356 +#, python-format +msgid "VM %(vm)s already halted, skipping shutdown..." +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:389 +msgid "Removing kernel/ramdisk files" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:399 +msgid "kernel/ramdisk files removed" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:561 +#, python-format +msgid "" +"TIMEOUT: The call to %(method)s timed out. VM id=%(instance_id)s; " +"args=%(strargs)s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:564 +#, python-format +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. VM " +"id=%(instance_id)s; args=%(strargs)s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:569 +#, python-format +msgid "" +"The call to %(method)s returned an error: %(e)s. VM id=%(instance_id)s; " +"args=%(strargs)s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:760 +#, python-format +msgid "OpenSSL error: %s" +msgstr "" + +#: ../nova/tests/test_compute.py:148 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: ../nova/tests/test_compute.py:154 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:97 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: ../nova/db/sqlalchemy/migration.py:35 +msgid "python-migrate is not installed. Exiting." +msgstr "" + +#: ../nova/image/s3.py:99 +#, python-format +msgid "Image %s could not be found" +msgstr "" + +#: ../nova/api/ec2/__init__.py:121 +msgid "Too many failed authentications." +msgstr "" + +#: ../nova/api/ec2/__init__.py:131 +#, python-format +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and " +"will be locked out for %(lock_mins)d minutes." +msgstr "" + +#: ../nova/api/ec2/__init__.py:169 ../nova/objectstore/handler.py:140 +#, python-format +msgid "Authentication Failure: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:182 +#, python-format +msgid "Authenticated Request For %(uname)s:%(pname)s)" +msgstr "" + +#: ../nova/api/ec2/__init__.py:207 +#, python-format +msgid "action: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:209 +#, python-format +msgid "arg: %(key)s\t\tval: %(value)s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:281 +#, python-format +msgid "" +"Unauthorized request for controller=%(controller)s and action=%(action)s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:314 +#, python-format +msgid "InstanceNotFound raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:320 +#, python-format +msgid "VolumeNotFound raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:326 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:329 +#, python-format +msgid "ApiError raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:338 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:343 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: ../nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" +msgstr "" + +#: ../nova/auth/dbdriver.py:106 ../nova/auth/ldapdriver.py:232 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "" + +#: ../nova/auth/dbdriver.py:122 ../nova/auth/ldapdriver.py:243 +#, python-format +msgid "Project can't be created because user %s doesn't exist" +msgstr "" + +#: ../nova/auth/dbdriver.py:135 ../nova/auth/ldapdriver.py:229 +#, python-format +msgid "Project can't be created because project %s already exists" +msgstr "" + +#: ../nova/auth/dbdriver.py:157 ../nova/auth/ldapdriver.py:268 +#, python-format +msgid "Project can't be modified because manager %s doesn't exist" +msgstr "" + +#: ../nova/auth/dbdriver.py:245 +#, python-format +msgid "User \"%s\" not found" +msgstr "" + +#: ../nova/auth/dbdriver.py:248 +#, python-format +msgid "Project \"%s\" not found" +msgstr "" + +#: ../nova/virt/xenapi_conn.py:129 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" +msgstr "" + +#: ../nova/virt/xenapi_conn.py:311 +#, python-format +msgid "Task [%(name)s] %(task)s status: success %(result)s" +msgstr "" + +#: ../nova/virt/xenapi_conn.py:317 +#, python-format +msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +msgstr "" + +#: ../nova/virt/xenapi_conn.py:331 ../nova/virt/xenapi_conn.py:344 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: ../nova/compute/monitor.py:259 +#, python-format +msgid "updating %s..." +msgstr "" + +#: ../nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "" + +#: ../nova/compute/monitor.py:356 +#, python-format +msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" +msgstr "" + +#: ../nova/compute/monitor.py:379 +#, python-format +msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" +msgstr "" + +#: ../nova/compute/monitor.py:414 +msgid "unexpected exception getting connection" +msgstr "" + +#: ../nova/compute/monitor.py:429 +#, python-format +msgid "Found instance: %s" +msgstr "" + +#: ../nova/volume/san.py:67 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: ../nova/api/ec2/apirequest.py:100 +#, python-format +msgid "" +"Unsupported API request: controller = %(controller)s, action = %(action)s" +msgstr "" + +#: ../nova/api/openstack/__init__.py:55 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: ../nova/api/openstack/__init__.py:76 +msgid "Including admin operations in API." +msgstr "" + +#: ../nova/console/xvp.py:99 +msgid "Rebuilding xvp conf" +msgstr "" + +#: ../nova/console/xvp.py:116 +#, python-format +msgid "Re-wrote %s" +msgstr "" + +#: ../nova/console/xvp.py:121 +msgid "Stopping xvp" +msgstr "" + +#: ../nova/console/xvp.py:134 +msgid "Starting xvp" +msgstr "" + +#: ../nova/console/xvp.py:141 +#, python-format +msgid "Error starting xvp: %s" +msgstr "" + +#: ../nova/console/xvp.py:144 +msgid "Restarting xvp" +msgstr "" + +#: ../nova/console/xvp.py:146 +msgid "xvp not running..." +msgstr "" + +#: ../bin/nova-manage.py:272 +msgid "" +"The above error may show that the database has not been created.\n" +"Please create a database using nova-manage sync db before running this " +"command." +msgstr "" + +#: ../bin/nova-manage.py:426 +msgid "" +"No more networks available. If this is a new installation, you need\n" +"to call something like this:\n" +"\n" +" nova-manage network create 10.0.0.0/8 10 64\n" +"\n" +msgstr "" + +#: ../bin/nova-manage.py:431 +msgid "" +"The above error may show that the certificate db has not been created.\n" +"Please create a database by running a nova-api server on this host." +msgstr "" + +#: ../bin/nova-manage.py:447 ../bin/nova-manage.py:536 +msgid "network" +msgstr "" + +#: ../bin/nova-manage.py:448 +msgid "IP address" +msgstr "" + +#: ../bin/nova-manage.py:449 +msgid "MAC address" +msgstr "" + +#: ../bin/nova-manage.py:450 +msgid "hostname" +msgstr "" + +#: ../bin/nova-manage.py:451 +msgid "host" +msgstr "" + +#: ../bin/nova-manage.py:537 +msgid "netmask" +msgstr "" + +#: ../bin/nova-manage.py:538 +msgid "start address" +msgstr "" + +#: ../nova/virt/disk.py:69 +#, python-format +msgid "Failed to load partition: %s" +msgstr "" + +#: ../nova/virt/disk.py:91 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: ../nova/virt/disk.py:124 +#, python-format +msgid "nbd device %s did not show up" +msgstr "" + +#: ../nova/virt/disk.py:128 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: ../nova/virt/disk.py:151 +msgid "No free nbd devices" +msgstr "" + +#: ../doc/ext/nova_todo.py:46 +#, python-format +msgid "%(filename)s, line %(line_info)d" +msgstr "" + +#. FIXME(chiradeep): implement this +#: ../nova/virt/hyperv.py:118 +msgid "In init host" +msgstr "" + +#: ../nova/virt/hyperv.py:131 +#, python-format +msgid "Attempt to create duplicate vm %s" +msgstr "" + +#: ../nova/virt/hyperv.py:148 +#, python-format +msgid "Starting VM %s " +msgstr "" + +#: ../nova/virt/hyperv.py:150 +#, python-format +msgid "Started VM %s " +msgstr "" + +#: ../nova/virt/hyperv.py:152 +#, python-format +msgid "spawn vm failed: %s" +msgstr "" + +#: ../nova/virt/hyperv.py:169 +#, python-format +msgid "Failed to create VM %s" +msgstr "" + +#: ../nova/virt/hyperv.py:188 +#, python-format +msgid "Set memory for vm %s..." +msgstr "" + +#: ../nova/virt/hyperv.py:198 +#, python-format +msgid "Set vcpus for vm %s..." +msgstr "" + +#: ../nova/virt/hyperv.py:202 +#, python-format +msgid "Creating disk for %(vm_name)s by attaching disk file %(vhdfile)s" +msgstr "" + +#: ../nova/virt/hyperv.py:227 +#, python-format +msgid "Failed to add diskdrive to VM %s" +msgstr "" + +#: ../nova/virt/hyperv.py:230 +#, python-format +msgid "New disk drive path is %s" +msgstr "" + +#: ../nova/virt/hyperv.py:247 +#, python-format +msgid "Failed to add vhd file to VM %s" +msgstr "" + +#: ../nova/virt/hyperv.py:249 +#, python-format +msgid "Created disk for %s" +msgstr "" + +#: ../nova/virt/hyperv.py:253 +#, python-format +msgid "Creating nic for %s " +msgstr "" + +#: ../nova/virt/hyperv.py:272 +msgid "Failed creating a port on the external vswitch" +msgstr "" + +#: ../nova/virt/hyperv.py:273 +#, python-format +msgid "Failed creating port for %s" +msgstr "" + +#: ../nova/virt/hyperv.py:276 +#, python-format +msgid "Created switch port %(vm_name)s on switch %(ext_path)s" +msgstr "" + +#: ../nova/virt/hyperv.py:286 +#, python-format +msgid "Failed to add nic to VM %s" +msgstr "" + +#: ../nova/virt/hyperv.py:288 +#, python-format +msgid "Created nic for %s " +msgstr "" + +#: ../nova/virt/hyperv.py:321 +#, python-format +msgid "WMI job failed: %s" +msgstr "" + +#: ../nova/virt/hyperv.py:325 +#, python-format +msgid "WMI job succeeded: %(desc)s, Elapsed=%(elap)s " +msgstr "" + +#: ../nova/virt/hyperv.py:361 +#, python-format +msgid "Got request to destroy vm %s" +msgstr "" + +#: ../nova/virt/hyperv.py:386 +#, python-format +msgid "Failed to destroy vm %s" +msgstr "" + +#: ../nova/virt/hyperv.py:393 +#, python-format +msgid "Del: disk %(vhdfile)s vm %(instance_name)s" +msgstr "" + +#: ../nova/virt/hyperv.py:415 +#, python-format +msgid "" +"Got Info for vm %(instance_id)s: state=%(state)s, mem=%(memusage)s, " +"num_cpu=%(numprocs)s, cpu_time=%(uptime)s" +msgstr "" + +#: ../nova/virt/hyperv.py:451 +#, python-format +msgid "Successfully changed vm state of %(vm_name)s to %(req_state)s" +msgstr "" + +#: ../nova/virt/hyperv.py:454 +#, python-format +msgid "Failed to change vm state of %(vm_name)s to %(req_state)s" +msgstr "" + +#: ../nova/compute/api.py:71 +#, python-format +msgid "Instance %d was not found in get_network_topic" +msgstr "" + +#: ../nova/compute/api.py:77 +#, python-format +msgid "Instance %d has no host" +msgstr "" + +#: ../nova/compute/api.py:97 +#, python-format +msgid "Quota exceeeded for %(pid)s, tried to run %(min_count)s instances" +msgstr "" + +#: ../nova/compute/api.py:99 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." +msgstr "" + +#: ../nova/compute/api.py:112 +msgid "Creating a raw instance" +msgstr "" + +#: ../nova/compute/api.py:160 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: ../nova/compute/api.py:187 +#, python-format +msgid "Casting to scheduler for %(pid)s/%(uid)s's instance %(instance_id)s" +msgstr "" + +#: ../nova/compute/api.py:292 +#, python-format +msgid "Going to try to terminate %s" +msgstr "" + +#: ../nova/compute/api.py:296 +#, python-format +msgid "Instance %d was not found during terminate" +msgstr "" + +#: ../nova/compute/api.py:301 +#, python-format +msgid "Instance %d is already being terminated" +msgstr "" + +#: ../nova/compute/api.py:481 +#, python-format +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "" + +#: ../nova/compute/api.py:496 +msgid "Volume isn't attached to anything!" +msgstr "" + +#: ../nova/rpc.py:98 +#, python-format +msgid "" +"AMQP server on %(fl_host)s:%(fl_port)d is unreachable. Trying again in " +"%(fl_intv)d seconds." +msgstr "" + +#: ../nova/rpc.py:103 +#, python-format +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "" + +#: ../nova/rpc.py:122 +msgid "Reconnected to queue" +msgstr "" + +#: ../nova/rpc.py:129 +msgid "Failed to fetch message from queue" +msgstr "" + +#: ../nova/rpc.py:159 +#, python-format +msgid "Initing the Adapter Consumer for %s" +msgstr "" + +#: ../nova/rpc.py:178 +#, python-format +msgid "received %s" +msgstr "" + +#. NOTE(vish): we may not want to ack here, but that means that bad +#. messages stay in the queue indefinitely, so for now +#. we just log the message and send an error string +#. back to the caller +#: ../nova/rpc.py:191 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: ../nova/rpc.py:192 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: ../nova/rpc.py:253 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: ../nova/rpc.py:294 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: ../nova/rpc.py:313 +msgid "Making asynchronous call..." +msgstr "" + +#: ../nova/rpc.py:316 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: ../nova/rpc.py:354 +msgid "Making asynchronous cast..." +msgstr "" + +#: ../nova/rpc.py:364 +#, python-format +msgid "response %s" +msgstr "" + +#: ../nova/rpc.py:373 +#, python-format +msgid "topic is %s" +msgstr "" + +#: ../nova/rpc.py:374 +#, python-format +msgid "message %s" +msgstr "" + +#: ../nova/volume/driver.py:78 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: ../nova/volume/driver.py:87 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: ../nova/volume/driver.py:220 +#, python-format +msgid "FAKE AOE: %s" +msgstr "" + +#: ../nova/volume/driver.py:233 +msgid "Skipping ensure_export. No iscsi_target " +msgstr "" + +#: ../nova/volume/driver.py:279 ../nova/volume/driver.py:288 +msgid "Skipping remove_export. No iscsi_target " +msgstr "" + +#: ../nova/volume/driver.py:347 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: ../nova/volume/driver.py:359 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: ../nova/volume/driver.py:414 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: ../nova/volume/driver.py:416 +msgid "Sheepdog is not working" +msgstr "" + +#: ../nova/wsgi.py:68 +#, python-format +msgid "Starting %(arg0)s on %(host)s:%(port)s" +msgstr "" + +#: ../nova/wsgi.py:147 +msgid "You must implement __call__" +msgstr "" + +#: ../bin/nova-instancemonitor.py:55 +msgid "Starting instance monitor" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:58 +msgid "leasing ip" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:73 +msgid "Adopted old lease or got a change of mac/hostname" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:80 +msgid "releasing ip" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:123 +#, python-format +msgid "" +"Called %(action)s for mac %(mac)s with ip %(ip)s and hostname %(hostname)s " +"on interface %(interface)s" +msgstr "" + +#: ../nova/virt/fake.py:239 +#, python-format +msgid "Instance %s Not Found" +msgstr "" + +#: ../nova/network/manager.py:153 +#, python-format +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "" + +#: ../nova/network/manager.py:157 +msgid "setting network host" +msgstr "" + +#: ../nova/network/manager.py:212 +#, python-format +msgid "Leasing IP %s" +msgstr "" + +#: ../nova/network/manager.py:216 +#, python-format +msgid "IP %s leased that isn't associated" +msgstr "" + +#: ../nova/network/manager.py:220 +#, python-format +msgid "IP %(address)s leased to bad mac %(inst_addr)s vs %(mac)s" +msgstr "" + +#: ../nova/network/manager.py:228 +#, python-format +msgid "IP %s leased that was already deallocated" +msgstr "" + +#: ../nova/network/manager.py:233 +#, python-format +msgid "Releasing IP %s" +msgstr "" + +#: ../nova/network/manager.py:237 +#, python-format +msgid "IP %s released that isn't associated" +msgstr "" + +#: ../nova/network/manager.py:241 +#, python-format +msgid "IP %(address)s released from bad mac %(inst_addr)s vs %(mac)s" +msgstr "" + +#: ../nova/network/manager.py:244 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: ../nova/network/manager.py:519 +msgid "" +"The sum between the number of networks and the vlan start cannot be greater " +"than 4094" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:57 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:74 +#, python-format +msgid "Introduced %(label)s as %(sr_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:90 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:96 +#, python-format +msgid "Forgetting SR %s ... " +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:101 +#, python-format +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:107 +#, python-format +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:111 +#, python-format +msgid "Forgetting SR %s done." +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:113 +#, python-format +msgid "Ignoring exception %(exc)s when forgetting SR %(sr_ref)s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:123 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:128 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:146 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:175 +#, python-format +msgid "Unable to obtain target information %(device_path)s, %(mountpoint)s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:197 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: ../nova/objectstore/image.py:262 +#, python-format +msgid "Failed to decrypt private key: %s" +msgstr "" + +#: ../nova/objectstore/image.py:269 +#, python-format +msgid "Failed to decrypt initialization vector: %s" +msgstr "" + +#: ../nova/objectstore/image.py:277 +#, python-format +msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgstr "" + +#: ../nova/objectstore/handler.py:106 +#, python-format +msgid "Unknown S3 value type %r" +msgstr "" + +#: ../nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "" + +#: ../nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "" + +#: ../nova/objectstore/handler.py:209 +#, python-format +msgid "List keys for bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:217 +#, python-format +msgid "Unauthorized attempt to access bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:235 +#, python-format +msgid "Creating bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:245 +#, python-format +msgid "Deleting bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:249 +#, python-format +msgid "Unauthorized attempt to delete bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:273 +#, python-format +msgid "Getting object: %(bname)s / %(nm)s" +msgstr "" + +#: ../nova/objectstore/handler.py:276 +#, python-format +msgid "Unauthorized attempt to get object %(nm)s from bucket %(bname)s" +msgstr "" + +#: ../nova/objectstore/handler.py:296 +#, python-format +msgid "Putting object: %(bname)s / %(nm)s" +msgstr "" + +#: ../nova/objectstore/handler.py:299 +#, python-format +msgid "Unauthorized attempt to upload object %(nm)s to bucket %(bname)s" +msgstr "" + +#: ../nova/objectstore/handler.py:318 +#, python-format +msgid "Deleting object: %(bname)s / %(nm)s" +msgstr "" + +#: ../nova/objectstore/handler.py:322 +#, python-format +msgid "Unauthorized attempt to delete object %(nm)s from bucket %(bname)s" +msgstr "" + +#: ../nova/objectstore/handler.py:396 +#, python-format +msgid "Not authorized to upload image: invalid directory %s" +msgstr "" + +#: ../nova/objectstore/handler.py:404 +#, python-format +msgid "Not authorized to upload image: unauthorized bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:409 +#, python-format +msgid "Starting image upload: %s" +msgstr "" + +#: ../nova/objectstore/handler.py:423 +#, python-format +msgid "Not authorized to update attributes of image %s" +msgstr "" + +#: ../nova/objectstore/handler.py:431 +#, python-format +msgid "Toggling publicity flag of image %(image_id)s %(newstatus)r" +msgstr "" + +#. other attributes imply update +#: ../nova/objectstore/handler.py:436 +#, python-format +msgid "Updating user fields on image %s" +msgstr "" + +#: ../nova/objectstore/handler.py:450 +#, python-format +msgid "Unauthorized attempt to delete image %s" +msgstr "" + +#: ../nova/objectstore/handler.py:455 +#, python-format +msgid "Deleted image: %s" +msgstr "" + +#: ../nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: ../nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: ../nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" +msgstr "" + +#: ../nova/auth/manager.py:270 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: ../nova/auth/manager.py:277 +#, python-format +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgstr "" + +#: ../nova/auth/manager.py:279 +#, python-format +msgid "No project called %s could be found" +msgstr "" + +#: ../nova/auth/manager.py:287 +#, python-format +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" +msgstr "" + +#: ../nova/auth/manager.py:289 +#, python-format +msgid "User %(uid)s is not a member of project %(pjid)s" +msgstr "" + +#: ../nova/auth/manager.py:298 ../nova/auth/manager.py:309 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: ../nova/auth/manager.py:299 ../nova/auth/manager.py:310 +msgid "Signature does not match" +msgstr "" + +#: ../nova/auth/manager.py:380 +msgid "Must specify project" +msgstr "" + +#: ../nova/auth/manager.py:414 +#, python-format +msgid "The %s role can not be found" +msgstr "" + +#: ../nova/auth/manager.py:416 +#, python-format +msgid "The %s role is global only" +msgstr "" + +#: ../nova/auth/manager.py:420 +#, python-format +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +msgstr "" + +#: ../nova/auth/manager.py:423 +#, python-format +msgid "Adding sitewide role %(role)s to user %(uid)s" +msgstr "" + +#: ../nova/auth/manager.py:448 +#, python-format +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgstr "" + +#: ../nova/auth/manager.py:451 +#, python-format +msgid "Removing sitewide role %(role)s from user %(uid)s" +msgstr "" + +#: ../nova/auth/manager.py:515 +#, python-format +msgid "Created project %(name)s with manager %(manager_user)s" +msgstr "" + +#: ../nova/auth/manager.py:533 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: ../nova/auth/manager.py:545 +#, python-format +msgid "Adding user %(uid)s to project %(pid)s" +msgstr "" + +#: ../nova/auth/manager.py:566 +#, python-format +msgid "Remove user %(uid)s from project %(pid)s" +msgstr "" + +#: ../nova/auth/manager.py:592 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: ../nova/auth/manager.py:650 +#, python-format +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgstr "" + +#: ../nova/auth/manager.py:659 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: ../nova/auth/manager.py:669 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: ../nova/auth/manager.py:671 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: ../nova/auth/manager.py:673 +#, python-format +msgid "Admin status set to %(admin)r for user %(uid)s" +msgstr "" + +#: ../nova/auth/manager.py:722 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: ../nova/service.py:161 +#, python-format +msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgstr "" + +#: ../nova/service.py:174 +msgid "Service killed that has no database entry" +msgstr "" + +#: ../nova/service.py:195 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: ../nova/service.py:207 +msgid "Recovered model server connection!" +msgstr "" + +#: ../nova/service.py:213 +msgid "model server went away" +msgstr "" + +#: ../nova/auth/ldapdriver.py:174 +#, python-format +msgid "LDAP user %s already exists" +msgstr "" + +#: ../nova/auth/ldapdriver.py:205 +#, python-format +msgid "LDAP object for %s doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:348 +#, python-format +msgid "User %s doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:472 +#, python-format +msgid "Group can't be created because group %s already exists" +msgstr "" + +#: ../nova/auth/ldapdriver.py:478 +#, python-format +msgid "Group can't be created because user %s doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:495 +#, python-format +msgid "User %s can't be searched in group because the user doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:507 +#, python-format +msgid "User %s can't be added to the group because the user doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:510 ../nova/auth/ldapdriver.py:521 +#, python-format +msgid "The group at dn %s doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:513 +#, python-format +msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgstr "" + +#: ../nova/auth/ldapdriver.py:524 +#, python-format +msgid "" +"User %s can't be removed from the group because the user doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:528 +#, python-format +msgid "User %s is not a member of the group" +msgstr "" + +#: ../nova/auth/ldapdriver.py:542 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: ../nova/auth/ldapdriver.py:549 +#, python-format +msgid "User %s can't be removed from all because the user doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:564 +#, python-format +msgid "Group at dn %s doesn't exist" +msgstr "" + +#: ../nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: ../nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:97 +#, python-format +msgid "Creating new user: %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:105 +#, python-format +msgid "Deleting user: %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:127 +#, python-format +msgid "Adding role %(role)s to user %(user)s for project %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:131 +#, python-format +msgid "Adding sitewide role %(role)s to user %(user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:137 +#, python-format +msgid "Removing role %(role)s from user %(user)s for project %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:141 +#, python-format +msgid "Removing sitewide role %(role)s from user %(user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:146 ../nova/api/ec2/admin.py:223 +msgid "operation must be add or remove" +msgstr "" + +#: ../nova/api/ec2/admin.py:159 +#, python-format +msgid "Getting x509 for user: %(name)s on project: %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:177 +#, python-format +msgid "Create project %(name)s managed by %(manager_user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:190 +#, python-format +msgid "Modify project: %(name)s managed by %(manager_user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:200 +#, python-format +msgid "Delete project: %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:214 +#, python-format +msgid "Adding user %(user)s to project %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:218 +#, python-format +msgid "Removing user %(user)s from project %(project)s" +msgstr "" diff --git a/po/en_GB.po b/po/en_GB.po new file mode 100644 index 000000000..601f6170b --- /dev/null +++ b/po/en_GB.po @@ -0,0 +1,2873 @@ +# English (United Kingdom) translation for nova +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the nova package. +# FIRST AUTHOR <EMAIL@ADDRESS>, 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: nova\n" +"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n" +"POT-Creation-Date: 2011-02-21 10:03-0500\n" +"PO-Revision-Date: 2011-06-19 18:14+0000\n" +"Last-Translator: Dave Walker <davewalker@ubuntu.com>\n" +"Language-Team: English (United Kingdom) <en_GB@li.org>\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n" +"X-Generator: Launchpad (build 13405)\n" + +#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 +#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 +#: ../nova/scheduler/simple.py:122 +msgid "No hosts found" +msgstr "No hosts found" + +#: ../nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "Unexpected error while running command." + +#: ../nova/exception.py:36 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: ../nova/exception.py:107 +msgid "DB exception wrapped" +msgstr "DB exception wrapped" + +#. exc_type, exc_value, exc_traceback = sys.exc_info() +#: ../nova/exception.py:120 +msgid "Uncaught exception" +msgstr "Uncaught exception" + +#: ../nova/volume/api.py:45 +#, python-format +msgid "Quota exceeeded for %(pid)s, tried to create %(size)sG volume" +msgstr "Quota exceeeded for %(pid)s, tried to create %(size)sG volume" + +#: ../nova/volume/api.py:47 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %sG" +msgstr "Volume quota exceeded. You cannot create a volume of size %sG" + +#: ../nova/volume/api.py:71 ../nova/volume/api.py:96 +msgid "Volume status must be available" +msgstr "Volume status must be available" + +#: ../nova/volume/api.py:98 +msgid "Volume is already attached" +msgstr "Volume is already attached" + +#: ../nova/volume/api.py:104 +msgid "Volume is already detached" +msgstr "Volume is already detached" + +#: ../nova/api/openstack/servers.py:72 +msgid "Failed to read private ip" +msgstr "Failed to read private ip" + +#: ../nova/api/openstack/servers.py:79 +msgid "Failed to read public ip(s)" +msgstr "Failed to read public ip(s)" + +#: ../nova/api/openstack/servers.py:152 +#, python-format +msgid "%(param)s property not found for image %(_image_id)s" +msgstr "%(param)s property not found for image %(_image_id)s" + +#: ../nova/api/openstack/servers.py:168 +msgid "No keypairs defined" +msgstr "No keypairs defined" + +#: ../nova/api/openstack/servers.py:238 +#, python-format +msgid "Compute.api::lock %s" +msgstr "Compute.api::lock %s" + +#: ../nova/api/openstack/servers.py:253 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "Compute.api::unlock %s" + +#: ../nova/api/openstack/servers.py:267 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "Compute.api::get_lock %s" + +#: ../nova/api/openstack/servers.py:281 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "Compute.api::reset_network %s" + +#: ../nova/api/openstack/servers.py:292 +#, python-format +msgid "Compute.api::pause %s" +msgstr "Compute.api::pause %s" + +#: ../nova/api/openstack/servers.py:303 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "Compute.api::unpause %s" + +#: ../nova/api/openstack/servers.py:314 +#, python-format +msgid "compute.api::suspend %s" +msgstr "compute.api::suspend %s" + +#: ../nova/api/openstack/servers.py:325 +#, python-format +msgid "compute.api::resume %s" +msgstr "compute.api::resume %s" + +#: ../nova/twistd.py:157 +msgid "Wrong number of arguments." +msgstr "Wrong number of arguments." + +#: ../nova/twistd.py:209 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "pidfile %s does not exist. Daemon not running?\n" + +#: ../nova/twistd.py:221 +msgid "No such process" +msgstr "No such process" + +#: ../nova/twistd.py:230 ../nova/service.py:224 +#, python-format +msgid "Serving %s" +msgstr "Serving %s" + +#: ../nova/twistd.py:262 ../nova/service.py:225 +msgid "Full set of FLAGS:" +msgstr "Full set of FLAGS:" + +#: ../nova/twistd.py:266 +#, python-format +msgid "Starting %s" +msgstr "Starting %s" + +#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 +#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 +#: ../nova/api/ec2/__init__.py:317 +#, python-format +msgid "Instance %s not found" +msgstr "Instance %s not found" + +#. NOTE: No Resource Pool concept so far +#: ../nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %(instance_name)s, %(device_path)s, %(mountpoint)s" +msgstr "Attach_volume: %(instance_name)s, %(device_path)s, %(mountpoint)s" + +#: ../nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" + +#: ../nova/virt/xenapi/volumeops.py:80 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "Unable to use SR %(sr_ref)s for instance %(instance_name)s" + +#: ../nova/virt/xenapi/volumeops.py:91 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "Unable to attach volume to instance %s" + +#: ../nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" + +#. Detach VBD from VM +#: ../nova/virt/xenapi/volumeops.py:104 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "Detach_volume: %(instance_name)s, %(mountpoint)s" + +#: ../nova/virt/xenapi/volumeops.py:112 +#, python-format +msgid "Unable to locate volume %s" +msgstr "Unable to locate volume %s" + +#: ../nova/virt/xenapi/volumeops.py:120 +#, python-format +msgid "Unable to detach volume %s" +msgstr "Unable to detach volume %s" + +#: ../nova/virt/xenapi/volumeops.py:127 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" + +#: ../nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "Unknown instance type: %s" + +#: ../nova/crypto.py:46 +msgid "Filename of root CA" +msgstr "Filename of root CA" + +#: ../nova/crypto.py:49 +msgid "Filename of private key" +msgstr "Filename of private key" + +#: ../nova/crypto.py:51 +msgid "Filename of root Certificate Revokation List" +msgstr "Filename of root Certificate Revocation List" + +#: ../nova/crypto.py:53 +msgid "Where we keep our keys" +msgstr "Where we keep our keys" + +#: ../nova/crypto.py:55 +msgid "Where we keep our root CA" +msgstr "Where we keep our root CA" + +#: ../nova/crypto.py:57 +msgid "Should we use a CA for each project?" +msgstr "Should we use a CA for each project?" + +#: ../nova/crypto.py:61 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "Subject for certificate for users, %s for project, user, timestamp" + +#: ../nova/crypto.py:66 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "Subject for certificate for projects, %s for project, timestamp" + +#: ../nova/crypto.py:71 +#, python-format +msgid "Subject for certificate for vpns, %s for project, timestamp" +msgstr "Subject for certificate for vpns, %s for project, timestamp" + +#: ../nova/crypto.py:258 +#, python-format +msgid "Flags path: %s" +msgstr "Flags path: %s" + +#: ../nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %(topic)s %(host)s for %(method)s" +msgstr "Casting to %(topic)s %(host)s for %(method)s" + +#: ../nova/compute/manager.py:78 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "check_instance_lock: decorating: |%s|" + +#: ../nova/compute/manager.py:80 +#, python-format +msgid "" +"check_instance_lock: arguments: |%(self)s| |%(context)s| |%(instance_id)s|" +msgstr "" +"check_instance_lock: arguments: |%(self)s| |%(context)s| |%(instance_id)s|" + +#: ../nova/compute/manager.py:84 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "check_instance_lock: locked: |%s|" + +#: ../nova/compute/manager.py:86 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "check_instance_lock: admin: |%s|" + +#: ../nova/compute/manager.py:91 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "check_instance_lock: executing: |%s|" + +#: ../nova/compute/manager.py:95 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "check_instance_lock: not executing |%s|" + +#: ../nova/compute/manager.py:179 +msgid "Instance has already been created" +msgstr "Instance has already been created" + +#: ../nova/compute/manager.py:180 +#, python-format +msgid "instance %s: starting..." +msgstr "instance %s: starting..." + +#. pylint: disable=W0702 +#: ../nova/compute/manager.py:219 +#, python-format +msgid "instance %s: Failed to spawn" +msgstr "instance %s: Failed to spawn" + +#: ../nova/compute/manager.py:233 ../nova/tests/test_cloud.py:286 +#, python-format +msgid "Terminating instance %s" +msgstr "Terminating instance %s" + +#: ../nova/compute/manager.py:255 +#, python-format +msgid "Deallocating address %s" +msgstr "Deallocating address %s" + +#: ../nova/compute/manager.py:268 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "trying to destroy already destroyed instance: %s" + +#: ../nova/compute/manager.py:282 +#, python-format +msgid "Rebooting instance %s" +msgstr "Rebooting instance %s" + +#: ../nova/compute/manager.py:287 +#, python-format +msgid "" +"trying to reboot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" +msgstr "" +"trying to reboot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" + +#: ../nova/compute/manager.py:311 +#, python-format +msgid "instance %s: snapshotting" +msgstr "instance %s: snapshotting" + +#: ../nova/compute/manager.py:316 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" +msgstr "" +"trying to snapshot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" + +#: ../nova/compute/manager.py:332 +#, python-format +msgid "" +"trying to reset the password on a non-running instance: %(instance_id)s " +"(state: %(instance_state)s expected: %(expected_state)s)" +msgstr "" +"trying to reset the password on a non-running instance: %(instance_id)s " +"(state: %(instance_state)s expected: %(expected_state)s)" + +#: ../nova/compute/manager.py:335 +#, python-format +msgid "instance %s: setting admin password" +msgstr "instance %s: setting admin password" + +#: ../nova/compute/manager.py:353 +#, python-format +msgid "" +"trying to inject a file into a non-running instance: %(instance_id)s (state: " +"%(instance_state)s expected: %(expected_state)s)" +msgstr "" +"trying to inject a file into a non-running instance: %(instance_id)s (state: " +"%(instance_state)s expected: %(expected_state)s)" + +#: ../nova/compute/manager.py:362 +#, python-format +msgid "instance %(nm)s: injecting file to %(plain_path)s" +msgstr "instance %(nm)s: injecting file to %(plain_path)s" + +#: ../nova/compute/manager.py:372 +#, python-format +msgid "instance %s: rescuing" +msgstr "instance %s: rescuing" + +#: ../nova/compute/manager.py:387 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: ../nova/compute/manager.py:406 +#, python-format +msgid "instance %s: pausing" +msgstr "instance %s: pausing" + +#: ../nova/compute/manager.py:423 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: ../nova/compute/manager.py:440 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "instance %s: retrieving diagnostics" + +#: ../nova/compute/manager.py:453 +#, python-format +msgid "instance %s: suspending" +msgstr "instance %s: suspending" + +#: ../nova/compute/manager.py:472 +#, python-format +msgid "instance %s: resuming" +msgstr "instance %s: resuming" + +#: ../nova/compute/manager.py:491 +#, python-format +msgid "instance %s: locking" +msgstr "instance %s: locking" + +#: ../nova/compute/manager.py:503 +#, python-format +msgid "instance %s: unlocking" +msgstr "instance %s: unlocking" + +#: ../nova/compute/manager.py:513 +#, python-format +msgid "instance %s: getting locked state" +msgstr "instance %s: getting locked state" + +#: ../nova/compute/manager.py:526 +#, python-format +msgid "instance %s: reset network" +msgstr "instance %s: reset network" + +#: ../nova/compute/manager.py:535 ../nova/api/ec2/cloud.py:515 +#, python-format +msgid "Get console output for instance %s" +msgstr "Get console output for instance %s" + +#: ../nova/compute/manager.py:543 +#, python-format +msgid "instance %s: getting ajax console" +msgstr "instance %s: getting ajax console" + +#: ../nova/compute/manager.py:553 +#, python-format +msgid "" +"instance %(instance_id)s: attaching volume %(volume_id)s to %(mountpoint)s" +msgstr "" +"instance %(instance_id)s: attaching volume %(volume_id)s to %(mountpoint)s" + +#. pylint: disable=W0702 +#. NOTE(vish): The inline callback eats the exception info so we +#. log the traceback here and reraise the same +#. ecxception below. +#: ../nova/compute/manager.py:569 +#, python-format +msgid "instance %(instance_id)s: attach failed %(mountpoint)s, removing" +msgstr "instance %(instance_id)s: attach failed %(mountpoint)s, removing" + +#: ../nova/compute/manager.py:585 +#, python-format +msgid "" +"Detach volume %(volume_id)s from mountpoint %(mp)s on instance " +"%(instance_id)s" +msgstr "" +"Detach volume %(volume_id)s from mountpoint %(mp)s on instance " +"%(instance_id)s" + +#: ../nova/compute/manager.py:588 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "Detaching volume from unknown instance %s" + +#: ../nova/scheduler/simple.py:53 +#, python-format +msgid "Host %s is not alive" +msgstr "Host %s is not alive" + +#: ../nova/scheduler/simple.py:65 +msgid "All hosts have too many cores" +msgstr "All hosts have too many cores" + +#: ../nova/scheduler/simple.py:87 +#, python-format +msgid "Host %s not available" +msgstr "Host %s not available" + +#: ../nova/scheduler/simple.py:99 +msgid "All hosts have too many gigabytes" +msgstr "All hosts have too many gigabytes" + +#: ../nova/scheduler/simple.py:119 +msgid "All hosts have too many networks" +msgstr "All hosts have too many networks" + +#: ../nova/volume/manager.py:85 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "Re-exporting %s volumes" + +#: ../nova/volume/manager.py:90 +#, python-format +msgid "volume %s: skipping export" +msgstr "volume %s: skipping export" + +#: ../nova/volume/manager.py:96 +#, python-format +msgid "volume %s: creating" +msgstr "volume %s: creating" + +#: ../nova/volume/manager.py:108 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "volume %(vol_name)s: creating lv of size %(vol_size)sG" + +#: ../nova/volume/manager.py:112 +#, python-format +msgid "volume %s: creating export" +msgstr "volume %s: creating export" + +#: ../nova/volume/manager.py:123 +#, python-format +msgid "volume %s: created successfully" +msgstr "volume %s: created successfully" + +#: ../nova/volume/manager.py:131 +msgid "Volume is still attached" +msgstr "Volume is still attached" + +#: ../nova/volume/manager.py:133 +msgid "Volume is not local to this node" +msgstr "Volume is not local to this node" + +#: ../nova/volume/manager.py:136 +#, python-format +msgid "volume %s: removing export" +msgstr "volume %s: removing export" + +#: ../nova/volume/manager.py:138 +#, python-format +msgid "volume %s: deleting" +msgstr "volume %s: deleting" + +#: ../nova/volume/manager.py:147 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "volume %s: deleted successfully" + +#: ../nova/virt/xenapi/fake.py:74 +#, python-format +msgid "%(text)s: _db_content => %(content)s" +msgstr "%(text)s: _db_content => %(content)s" + +#: ../nova/virt/xenapi/fake.py:304 ../nova/virt/xenapi/fake.py:404 +#: ../nova/virt/xenapi/fake.py:422 ../nova/virt/xenapi/fake.py:478 +msgid "Raising NotImplemented" +msgstr "Raising NotImplemented" + +#: ../nova/virt/xenapi/fake.py:306 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "xenapi.fake does not have an implementation for %s" + +#: ../nova/virt/xenapi/fake.py:341 +#, python-format +msgid "Calling %(localname)s %(impl)s" +msgstr "Calling %(localname)s %(impl)s" + +#: ../nova/virt/xenapi/fake.py:346 +#, python-format +msgid "Calling getter %s" +msgstr "Calling getter %s" + +#: ../nova/virt/xenapi/fake.py:406 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" + +#: ../nova/tests/test_cloud.py:256 +msgid "Can't test instances without a real virtual env." +msgstr "Can't test instances without a real virtual env." + +#: ../nova/tests/test_cloud.py:268 +#, python-format +msgid "Need to watch instance %s until it's running..." +msgstr "Need to watch instance %s until it's running..." + +#: ../nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "Failed to open connection to the hypervisor" + +#: ../nova/network/linux_net.py:187 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "Starting VLAN inteface %s" + +#: ../nova/network/linux_net.py:208 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "Starting Bridge interface for %s" + +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:314 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "Hupping dnsmasq threw %s" + +#: ../nova/network/linux_net.py:316 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "Pid %d is stale, relaunching dnsmasq" + +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:358 +#, python-format +msgid "killing radvd threw %s" +msgstr "killing radvd threw %s" + +#: ../nova/network/linux_net.py:360 +#, python-format +msgid "Pid %d is stale, relaunching radvd" +msgstr "Pid %d is stale, relaunching radvd" + +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:449 +#, python-format +msgid "Killing dnsmasq threw %s" +msgstr "Killing dnsmasq threw %s" + +#: ../nova/utils.py:58 +#, python-format +msgid "Inner Exception: %s" +msgstr "Inner Exception: %s" + +#: ../nova/utils.py:59 +#, python-format +msgid "Class %s cannot be found" +msgstr "Class %s cannot be found" + +#: ../nova/utils.py:118 +#, python-format +msgid "Fetching %s" +msgstr "Fetching %s" + +#: ../nova/utils.py:130 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Running cmd (subprocess): %s" + +#: ../nova/utils.py:143 ../nova/utils.py:183 +#, python-format +msgid "Result was %s" +msgstr "Result was %s" + +#: ../nova/utils.py:159 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "Running cmd (SSH): %s" + +#: ../nova/utils.py:217 +#, python-format +msgid "debug in callback: %s" +msgstr "debug in callback: %s" + +#: ../nova/utils.py:222 +#, python-format +msgid "Running %s" +msgstr "Running %s" + +#: ../nova/utils.py:262 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "Link Local address is not found.:%s" + +#: ../nova/utils.py:265 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "Couldn't get Link Local IP of %(interface)s :%(ex)s" + +#: ../nova/utils.py:363 +#, python-format +msgid "Invalid backend: %s" +msgstr "Invalid backend: %s" + +#: ../nova/utils.py:374 +#, python-format +msgid "backend %s" +msgstr "backend %s" + +#: ../nova/fakerabbit.py:49 +#, python-format +msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +msgstr "(%(nm)s) publish (key: %(routing_key)s) %(message)s" + +#: ../nova/fakerabbit.py:54 +#, python-format +msgid "Publishing to route %s" +msgstr "Publishing to route %s" + +#: ../nova/fakerabbit.py:84 +#, python-format +msgid "Declaring queue %s" +msgstr "Declaring queue %s" + +#: ../nova/fakerabbit.py:90 +#, python-format +msgid "Declaring exchange %s" +msgstr "Declaring exchange %s" + +#: ../nova/fakerabbit.py:96 +#, python-format +msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +msgstr "Binding %(queue)s to %(exchange)s with key %(routing_key)s" + +#: ../nova/fakerabbit.py:121 +#, python-format +msgid "Getting from %(queue)s: %(message)s" +msgstr "Getting from %(queue)s: %(message)s" + +#: ../nova/virt/xenapi/vm_utils.py:135 ../nova/virt/hyperv.py:171 +#, python-format +msgid "Created VM %s..." +msgstr "Created VM %s..." + +#: ../nova/virt/xenapi/vm_utils.py:138 +#, python-format +msgid "Created VM %(instance_name)s as %(vm_ref)s." +msgstr "Created VM %(instance_name)s as %(vm_ref)s." + +#: ../nova/virt/xenapi/vm_utils.py:168 +#, python-format +msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgstr "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " + +#: ../nova/virt/xenapi/vm_utils.py:171 +#, python-format +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgstr "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." + +#: ../nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "VBD not found in instance %s" +msgstr "VBD not found in instance %s" + +#: ../nova/virt/xenapi/vm_utils.py:197 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "Unable to unplug VBD %s" + +#: ../nova/virt/xenapi/vm_utils.py:209 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "Unable to destroy VBD %s" + +#: ../nova/virt/xenapi/vm_utils.py:224 +#, python-format +msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." +msgstr "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." + +#: ../nova/virt/xenapi/vm_utils.py:227 +#, python-format +msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +msgstr "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." + +#: ../nova/virt/xenapi/vm_utils.py:246 +#, python-format +msgid "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) on " +"%(sr_ref)s." +msgstr "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) on " +"%(sr_ref)s." + +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vm_utils.py:258 +#, python-format +msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." +msgstr "Snapshotting VM %(vm_ref)s with label '%(label)s'..." + +#: ../nova/virt/xenapi/vm_utils.py:272 +#, python-format +msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." +msgstr "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." + +#: ../nova/virt/xenapi/vm_utils.py:286 +#, python-format +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgstr "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" + +#: ../nova/virt/xenapi/vm_utils.py:327 +#, python-format +msgid "Size for image %(image)s:%(virtual_size)d" +msgstr "Size for image %(image)s:%(virtual_size)d" + +#: ../nova/virt/xenapi/vm_utils.py:332 +#, python-format +msgid "Glance image %s" +msgstr "Glance image %s" + +#. we need to invoke a plugin for copying VDI's +#. content into proper path +#: ../nova/virt/xenapi/vm_utils.py:342 +#, python-format +msgid "Copying VDI %s to /boot/guest on dom0" +msgstr "Copying VDI %s to /boot/guest on dom0" + +#: ../nova/virt/xenapi/vm_utils.py:352 +#, python-format +msgid "Kernel/Ramdisk VDI %s destroyed" +msgstr "Kernel/Ramdisk VDI %s destroyed" + +#: ../nova/virt/xenapi/vm_utils.py:361 +#, python-format +msgid "Asking xapi to fetch %(url)s as %(access)s" +msgstr "Asking xapi to fetch %(url)s as %(access)s" + +#: ../nova/virt/xenapi/vm_utils.py:386 ../nova/virt/xenapi/vm_utils.py:402 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "Looking up vdi %s for PV kernel" + +#: ../nova/virt/xenapi/vm_utils.py:397 +#, python-format +msgid "PV Kernel in VDI:%s" +msgstr "PV Kernel in VDI:%s" + +#: ../nova/virt/xenapi/vm_utils.py:405 +#, python-format +msgid "Running pygrub against %s" +msgstr "Running pygrub against %s" + +#: ../nova/virt/xenapi/vm_utils.py:411 +#, python-format +msgid "Found Xen kernel %s" +msgstr "Found Xen kernel %s" + +#: ../nova/virt/xenapi/vm_utils.py:413 +msgid "No Xen kernel found. Booting HVM." +msgstr "No Xen kernel found. Booting HVM." + +#: ../nova/virt/xenapi/vm_utils.py:425 ../nova/virt/hyperv.py:431 +#, python-format +msgid "duplicate name found: %s" +msgstr "duplicate name found: %s" + +#: ../nova/virt/xenapi/vm_utils.py:442 +#, python-format +msgid "VDI %s is still available" +msgstr "VDI %s is still available" + +#: ../nova/virt/xenapi/vm_utils.py:463 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "(VM_UTILS) xenserver vm state -> |%s|" + +#: ../nova/virt/xenapi/vm_utils.py:465 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "(VM_UTILS) xenapi power_state -> |%s|" + +#: ../nova/virt/xenapi/vm_utils.py:525 +#, python-format +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgstr "VHD %(vdi_uuid)s has parent %(parent_ref)s" + +#: ../nova/virt/xenapi/vm_utils.py:542 +#, python-format +msgid "Re-scanning SR %s" +msgstr "Re-scanning SR %s" + +#: ../nova/virt/xenapi/vm_utils.py:567 +#, python-format +msgid "" +"VHD coalesce attempts exceeded (%(counter)d > %(max_attempts)d), giving up..." +msgstr "" +"VHD coalesce attempts exceeded (%(counter)d > %(max_attempts)d), giving up..." + +#: ../nova/virt/xenapi/vm_utils.py:574 +#, python-format +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." +msgstr "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." + +#: ../nova/virt/xenapi/vm_utils.py:590 +#, python-format +msgid "No VDIs found for VM %s" +msgstr "No VDIs found for VM %s" + +#: ../nova/virt/xenapi/vm_utils.py:594 +#, python-format +msgid "Unexpected number of VDIs (%(num_vdis)s) found for VM %(vm_ref)s" +msgstr "Unexpected number of VDIs (%(num_vdis)s) found for VM %(vm_ref)s" + +#: ../nova/virt/xenapi/vm_utils.py:653 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:188 +#, python-format +msgid "Creating VBD for VDI %s ... " +msgstr "Creating VBD for VDI %s ... " + +#: ../nova/virt/xenapi/vm_utils.py:655 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:190 +#, python-format +msgid "Creating VBD for VDI %s done." +msgstr "Creating VBD for VDI %s done." + +#: ../nova/virt/xenapi/vm_utils.py:657 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:192 +#, python-format +msgid "Plugging VBD %s ... " +msgstr "Plugging VBD %s ... " + +#: ../nova/virt/xenapi/vm_utils.py:659 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:194 +#, python-format +msgid "Plugging VBD %s done." +msgstr "Plugging VBD %s done." + +#: ../nova/virt/xenapi/vm_utils.py:661 +#, python-format +msgid "VBD %(vbd)s plugged as %(orig_dev)s" +msgstr "VBD %(vbd)s plugged as %(orig_dev)s" + +#: ../nova/virt/xenapi/vm_utils.py:664 +#, python-format +msgid "VBD %(vbd)s plugged into wrong dev, remapping to %(dev)s" +msgstr "VBD %(vbd)s plugged into wrong dev, remapping to %(dev)s" + +#: ../nova/virt/xenapi/vm_utils.py:668 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:197 +#, python-format +msgid "Destroying VBD for VDI %s ... " +msgstr "Destroying VBD for VDI %s ... " + +#: ../nova/virt/xenapi/vm_utils.py:671 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:200 +#, python-format +msgid "Destroying VBD for VDI %s done." +msgstr "Destroying VBD for VDI %s done." + +#: ../nova/virt/xenapi/vm_utils.py:683 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:211 +msgid "VBD.unplug successful first time." +msgstr "VBD.unplug successful first time." + +#: ../nova/virt/xenapi/vm_utils.py:688 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:216 +msgid "VBD.unplug rejected: retrying..." +msgstr "VBD.unplug rejected: retrying..." + +#: ../nova/virt/xenapi/vm_utils.py:692 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:220 +msgid "VBD.unplug successful eventually." +msgstr "VBD.unplug successful eventually." + +#: ../nova/virt/xenapi/vm_utils.py:695 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:223 +#, python-format +msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +msgstr "Ignoring XenAPI.Failure in VBD.unplug: %s" + +#: ../nova/virt/xenapi/vm_utils.py:704 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:66 +#, python-format +msgid "Ignoring XenAPI.Failure %s" +msgstr "Ignoring XenAPI.Failure %s" + +#: ../nova/virt/xenapi/vm_utils.py:735 +#, python-format +msgid "" +"Writing partition table %(primary_first)d %(primary_last)d to %(dest)s..." +msgstr "" +"Writing partition table %(primary_first)d %(primary_last)d to %(dest)s..." + +#: ../nova/virt/xenapi/vm_utils.py:747 +#, python-format +msgid "Writing partition table %s done." +msgstr "Writing partition table %s done." + +#: ../nova/tests/test_rpc.py:89 +#, python-format +msgid "Nested received %(queue)s, %(value)s" +msgstr "Nested received %(queue)s, %(value)s" + +#: ../nova/tests/test_rpc.py:95 +#, python-format +msgid "Nested return %s" +msgstr "Nested return %s" + +#: ../nova/tests/test_rpc.py:120 ../nova/tests/test_rpc.py:126 +#, python-format +msgid "Received %s" +msgstr "Received %s" + +#: ../nova/db/sqlalchemy/api.py:44 +msgid "Use of empty request context is deprecated" +msgstr "Use of empty request context is deprecated" + +#: ../nova/db/sqlalchemy/api.py:133 +#, python-format +msgid "No service for id %s" +msgstr "No service for id %s" + +#: ../nova/db/sqlalchemy/api.py:251 +#, python-format +msgid "No service for %(host)s, %(binary)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:592 +msgid "No fixed ips defined" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:608 +#, python-format +msgid "No floating ip for address %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:629 +#, python-format +msgid "No address for instance %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:961 +#, python-format +msgid "no keypair for user %(user_id)s, name %(name)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1076 ../nova/db/sqlalchemy/api.py:1156 +#, python-format +msgid "No network for id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1086 +msgid "No networks defined" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1115 +#, python-format +msgid "No network for bridge %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1129 ../nova/db/sqlalchemy/api.py:1142 +#, python-format +msgid "No network for instance %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1277 +#, python-format +msgid "Token %s does not exist" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1302 +#, python-format +msgid "No quota for project_id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1455 ../nova/db/sqlalchemy/api.py:1501 +#: ../nova/api/ec2/__init__.py:323 +#, python-format +msgid "Volume %s not found" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1514 +#, python-format +msgid "No export device found for volume %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1527 +#, python-format +msgid "No target id found for volume %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1572 +#, python-format +msgid "No security group with id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1589 +#, python-format +msgid "No security group named %(group_name)s for project: %(project_id)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1682 +#, python-format +msgid "No secuity group rule with id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1756 +#, python-format +msgid "No user for id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1772 +#, python-format +msgid "No user for access key %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1834 +#, python-format +msgid "No project with id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1979 +#, python-format +msgid "No console pool with id %(pool_id)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1996 +#, python-format +msgid "" +"No console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:2035 +#, python-format +msgid "No console for instance %(instance_id)s in pool %(pool_id)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:2057 +#, python-format +msgid "on instance %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:2058 +#, python-format +msgid "No console with id %(console_id)s %(idesc)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:2078 ../nova/db/sqlalchemy/api.py:2097 +#, python-format +msgid "No zone with id %(zone_id)s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:160 +#, python-format +msgid "Checking state of %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:165 +#, python-format +msgid "Current state of %(name)s was %(state)s." +msgstr "" + +#: ../nova/virt/libvirt_conn.py:183 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:196 +msgid "Connection to libvirt broke" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:258 +#, python-format +msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:283 +#, python-format +msgid "Invalid device path %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:313 +#, python-format +msgid "No disk at %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:320 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:336 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:339 +#, python-format +msgid "_wait_for_reboot failed: %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:382 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:385 +#, python-format +msgid "_wait_for_rescue failed: %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:411 +#, python-format +msgid "instance %s: is running" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:422 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:425 ../nova/virt/xenapi/vmops.py:186 +#, python-format +msgid "instance %s: failed to boot" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:436 +#, python-format +msgid "virsh said: %r" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:440 +msgid "cool, it's a device" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:448 +#, python-format +msgid "data: %(data)r, fpath: %(fpath)r" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:456 +#, python-format +msgid "Contents of file %(fpath)s: %(contents)r" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:489 +msgid "Unable to find an open port" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:563 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:646 +#, python-format +msgid "instance %(inst_name)s: injecting key into image %(img_id)s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:649 +#, python-format +msgid "instance %(inst_name)s: injecting net into image %(img_id)s" +msgstr "" + +#. This could be a windows image, or a vmdk format disk +#: ../nova/virt/libvirt_conn.py:657 +#, python-format +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image %(img_id)s " +"(%(e)s)" +msgstr "" + +#. TODO(termie): cache? +#: ../nova/virt/libvirt_conn.py:665 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:732 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:751 +msgid "diagnostics are not supported for libvirt" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:1225 +#, python-format +msgid "Attempted to unfilter instance %s which is not filtered" +msgstr "" + +#: ../nova/api/ec2/metadatarequesthandler.py:76 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: ../nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: ../nova/network/api.py:39 +#, python-format +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "" + +#: ../nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" + +#: ../nova/tests/test_volume.py:162 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: ../nova/virt/images.py:70 +#, python-format +msgid "Finished retreving %(url)s -- placed in %(path)s" +msgstr "" + +#: ../nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "" + +#: ../nova/console/manager.py:70 +msgid "Adding console" +msgstr "" + +#: ../nova/console/manager.py:90 +#, python-format +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "" + +#: ../nova/api/direct.py:149 +msgid "not available" +msgstr "" + +#: ../nova/api/ec2/cloud.py:62 +#, python-format +msgid "The key_pair %s already exists" +msgstr "" + +#. TODO(vish): Do this with M2Crypto instead +#: ../nova/api/ec2/cloud.py:118 +#, python-format +msgid "Generating root CA: %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:303 +#, python-format +msgid "Create key pair %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:311 +#, python-format +msgid "Delete key pair %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:386 +#, python-format +msgid "%s is not a valid ipProtocol" +msgstr "" + +#: ../nova/api/ec2/cloud.py:390 +msgid "Invalid port range" +msgstr "" + +#: ../nova/api/ec2/cloud.py:421 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:430 ../nova/api/ec2/cloud.py:459 +msgid "Not enough parameters to build a valid rule." +msgstr "" + +#: ../nova/api/ec2/cloud.py:443 +msgid "No rule for the specified parameters." +msgstr "" + +#: ../nova/api/ec2/cloud.py:450 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:464 +#, python-format +msgid "This rule already exists in group %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:492 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:495 +#, python-format +msgid "group %s already exists" +msgstr "" + +#: ../nova/api/ec2/cloud.py:507 +#, python-format +msgid "Delete security group %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:584 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: ../nova/api/ec2/cloud.py:612 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:629 +#, python-format +msgid "Detach volume %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:761 +msgid "Allocate address" +msgstr "" + +#: ../nova/api/ec2/cloud.py:766 +#, python-format +msgid "Release address %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:771 +#, python-format +msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:780 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:807 +msgid "Going to start terminating instances" +msgstr "" + +#: ../nova/api/ec2/cloud.py:815 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: ../nova/api/ec2/cloud.py:867 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:875 +#, python-format +msgid "Registered image %(image_location)s with id %(image_id)s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:882 ../nova/api/ec2/cloud.py:900 +#, python-format +msgid "attribute not supported: %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:890 +#, python-format +msgid "invalid id: %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:903 +msgid "user or group not specified" +msgstr "" + +#: ../nova/api/ec2/cloud.py:905 +msgid "only group \"all\" is supported" +msgstr "" + +#: ../nova/api/ec2/cloud.py:907 +msgid "operation_type must be add or remove" +msgstr "" + +#: ../nova/api/ec2/cloud.py:908 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: ../bin/nova-api.py:52 +#, python-format +msgid "Using paste.deploy config at: %s" +msgstr "" + +#: ../bin/nova-api.py:57 +#, python-format +msgid "No paste configuration for app: %s" +msgstr "" + +#: ../bin/nova-api.py:59 +#, python-format +msgid "" +"App Config: %(api)s\n" +"%(config)r" +msgstr "" + +#: ../bin/nova-api.py:64 +#, python-format +msgid "Running %s API" +msgstr "" + +#: ../bin/nova-api.py:69 +#, python-format +msgid "No known API applications configured in %s." +msgstr "" + +#: ../bin/nova-api.py:83 +#, python-format +msgid "Starting nova-api node (version %s)" +msgstr "" + +#: ../bin/nova-api.py:89 +#, python-format +msgid "No paste configuration found for: %s" +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:84 +#, python-format +msgid "Argument %(key)s value %(value)s is too short." +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:89 +#, python-format +msgid "Argument %(key)s value %(value)s contains invalid characters." +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:94 +#, python-format +msgid "Argument %(key)s value %(value)s starts with a hyphen." +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:102 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:130 +#, python-format +msgid "Argument %s is required." +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:117 +#, python-format +msgid "" +"Argument %(key)s may not take value %(value)s. Valid values are ['true', " +"'false']." +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:163 +#, python-format +msgid "" +"Created VDI %(vdi_ref)s (%(label)s, %(size)s, %(read_only)s) on %(sr_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:67 +#, python-format +msgid "Attempted to create non-unique name %s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:73 +#, python-format +msgid "instance %(name)s: not enough free memory" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:148 +#, python-format +msgid "Starting VM %s..." +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:151 +#, python-format +msgid "Spawning VM %(instance_name)s created %(vm_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:162 +#, python-format +msgid "Invalid value for onset_files: '%s'" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:167 +#, python-format +msgid "Injecting file path: '%s'" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:180 +#, python-format +msgid "Instance %s: booted" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:232 +#, python-format +msgid "Instance not present %s" +msgstr "" + +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vmops.py:261 +#, python-format +msgid "Starting snapshot for VM %s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:269 +#, python-format +msgid "Unable to Snapshot %(vm_ref)s: %(exc)s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:280 +#, python-format +msgid "Finished snapshot and upload for VM %s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:356 +#, python-format +msgid "VM %(vm)s already halted, skipping shutdown..." +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:389 +msgid "Removing kernel/ramdisk files" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:399 +msgid "kernel/ramdisk files removed" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:561 +#, python-format +msgid "" +"TIMEOUT: The call to %(method)s timed out. VM id=%(instance_id)s; " +"args=%(strargs)s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:564 +#, python-format +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. VM " +"id=%(instance_id)s; args=%(strargs)s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:569 +#, python-format +msgid "" +"The call to %(method)s returned an error: %(e)s. VM id=%(instance_id)s; " +"args=%(strargs)s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:760 +#, python-format +msgid "OpenSSL error: %s" +msgstr "" + +#: ../nova/tests/test_compute.py:148 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: ../nova/tests/test_compute.py:154 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:97 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: ../nova/db/sqlalchemy/migration.py:35 +msgid "python-migrate is not installed. Exiting." +msgstr "" + +#: ../nova/image/s3.py:99 +#, python-format +msgid "Image %s could not be found" +msgstr "" + +#: ../nova/api/ec2/__init__.py:121 +msgid "Too many failed authentications." +msgstr "" + +#: ../nova/api/ec2/__init__.py:131 +#, python-format +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and " +"will be locked out for %(lock_mins)d minutes." +msgstr "" + +#: ../nova/api/ec2/__init__.py:169 ../nova/objectstore/handler.py:140 +#, python-format +msgid "Authentication Failure: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:182 +#, python-format +msgid "Authenticated Request For %(uname)s:%(pname)s)" +msgstr "" + +#: ../nova/api/ec2/__init__.py:207 +#, python-format +msgid "action: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:209 +#, python-format +msgid "arg: %(key)s\t\tval: %(value)s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:281 +#, python-format +msgid "" +"Unauthorized request for controller=%(controller)s and action=%(action)s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:314 +#, python-format +msgid "InstanceNotFound raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:320 +#, python-format +msgid "VolumeNotFound raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:326 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:329 +#, python-format +msgid "ApiError raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:338 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:343 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: ../nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" +msgstr "" + +#: ../nova/auth/dbdriver.py:106 ../nova/auth/ldapdriver.py:232 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "" + +#: ../nova/auth/dbdriver.py:122 ../nova/auth/ldapdriver.py:243 +#, python-format +msgid "Project can't be created because user %s doesn't exist" +msgstr "" + +#: ../nova/auth/dbdriver.py:135 ../nova/auth/ldapdriver.py:229 +#, python-format +msgid "Project can't be created because project %s already exists" +msgstr "" + +#: ../nova/auth/dbdriver.py:157 ../nova/auth/ldapdriver.py:268 +#, python-format +msgid "Project can't be modified because manager %s doesn't exist" +msgstr "" + +#: ../nova/auth/dbdriver.py:245 +#, python-format +msgid "User \"%s\" not found" +msgstr "" + +#: ../nova/auth/dbdriver.py:248 +#, python-format +msgid "Project \"%s\" not found" +msgstr "" + +#: ../nova/virt/xenapi_conn.py:129 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" +msgstr "" + +#: ../nova/virt/xenapi_conn.py:311 +#, python-format +msgid "Task [%(name)s] %(task)s status: success %(result)s" +msgstr "" + +#: ../nova/virt/xenapi_conn.py:317 +#, python-format +msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +msgstr "" + +#: ../nova/virt/xenapi_conn.py:331 ../nova/virt/xenapi_conn.py:344 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: ../nova/compute/monitor.py:259 +#, python-format +msgid "updating %s..." +msgstr "" + +#: ../nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "" + +#: ../nova/compute/monitor.py:356 +#, python-format +msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" +msgstr "" + +#: ../nova/compute/monitor.py:379 +#, python-format +msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" +msgstr "" + +#: ../nova/compute/monitor.py:414 +msgid "unexpected exception getting connection" +msgstr "" + +#: ../nova/compute/monitor.py:429 +#, python-format +msgid "Found instance: %s" +msgstr "" + +#: ../nova/volume/san.py:67 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: ../nova/api/ec2/apirequest.py:100 +#, python-format +msgid "" +"Unsupported API request: controller = %(controller)s, action = %(action)s" +msgstr "" + +#: ../nova/api/openstack/__init__.py:55 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: ../nova/api/openstack/__init__.py:76 +msgid "Including admin operations in API." +msgstr "" + +#: ../nova/console/xvp.py:99 +msgid "Rebuilding xvp conf" +msgstr "" + +#: ../nova/console/xvp.py:116 +#, python-format +msgid "Re-wrote %s" +msgstr "" + +#: ../nova/console/xvp.py:121 +msgid "Stopping xvp" +msgstr "" + +#: ../nova/console/xvp.py:134 +msgid "Starting xvp" +msgstr "" + +#: ../nova/console/xvp.py:141 +#, python-format +msgid "Error starting xvp: %s" +msgstr "" + +#: ../nova/console/xvp.py:144 +msgid "Restarting xvp" +msgstr "" + +#: ../nova/console/xvp.py:146 +msgid "xvp not running..." +msgstr "" + +#: ../bin/nova-manage.py:272 +msgid "" +"The above error may show that the database has not been created.\n" +"Please create a database using nova-manage sync db before running this " +"command." +msgstr "" + +#: ../bin/nova-manage.py:426 +msgid "" +"No more networks available. If this is a new installation, you need\n" +"to call something like this:\n" +"\n" +" nova-manage network create 10.0.0.0/8 10 64\n" +"\n" +msgstr "" + +#: ../bin/nova-manage.py:431 +msgid "" +"The above error may show that the certificate db has not been created.\n" +"Please create a database by running a nova-api server on this host." +msgstr "" + +#: ../bin/nova-manage.py:447 ../bin/nova-manage.py:536 +msgid "network" +msgstr "" + +#: ../bin/nova-manage.py:448 +msgid "IP address" +msgstr "" + +#: ../bin/nova-manage.py:449 +msgid "MAC address" +msgstr "" + +#: ../bin/nova-manage.py:450 +msgid "hostname" +msgstr "" + +#: ../bin/nova-manage.py:451 +msgid "host" +msgstr "" + +#: ../bin/nova-manage.py:537 +msgid "netmask" +msgstr "" + +#: ../bin/nova-manage.py:538 +msgid "start address" +msgstr "" + +#: ../nova/virt/disk.py:69 +#, python-format +msgid "Failed to load partition: %s" +msgstr "" + +#: ../nova/virt/disk.py:91 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: ../nova/virt/disk.py:124 +#, python-format +msgid "nbd device %s did not show up" +msgstr "" + +#: ../nova/virt/disk.py:128 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: ../nova/virt/disk.py:151 +msgid "No free nbd devices" +msgstr "" + +#: ../doc/ext/nova_todo.py:46 +#, python-format +msgid "%(filename)s, line %(line_info)d" +msgstr "" + +#. FIXME(chiradeep): implement this +#: ../nova/virt/hyperv.py:118 +msgid "In init host" +msgstr "" + +#: ../nova/virt/hyperv.py:131 +#, python-format +msgid "Attempt to create duplicate vm %s" +msgstr "" + +#: ../nova/virt/hyperv.py:148 +#, python-format +msgid "Starting VM %s " +msgstr "" + +#: ../nova/virt/hyperv.py:150 +#, python-format +msgid "Started VM %s " +msgstr "" + +#: ../nova/virt/hyperv.py:152 +#, python-format +msgid "spawn vm failed: %s" +msgstr "" + +#: ../nova/virt/hyperv.py:169 +#, python-format +msgid "Failed to create VM %s" +msgstr "" + +#: ../nova/virt/hyperv.py:188 +#, python-format +msgid "Set memory for vm %s..." +msgstr "" + +#: ../nova/virt/hyperv.py:198 +#, python-format +msgid "Set vcpus for vm %s..." +msgstr "" + +#: ../nova/virt/hyperv.py:202 +#, python-format +msgid "Creating disk for %(vm_name)s by attaching disk file %(vhdfile)s" +msgstr "" + +#: ../nova/virt/hyperv.py:227 +#, python-format +msgid "Failed to add diskdrive to VM %s" +msgstr "" + +#: ../nova/virt/hyperv.py:230 +#, python-format +msgid "New disk drive path is %s" +msgstr "" + +#: ../nova/virt/hyperv.py:247 +#, python-format +msgid "Failed to add vhd file to VM %s" +msgstr "" + +#: ../nova/virt/hyperv.py:249 +#, python-format +msgid "Created disk for %s" +msgstr "" + +#: ../nova/virt/hyperv.py:253 +#, python-format +msgid "Creating nic for %s " +msgstr "" + +#: ../nova/virt/hyperv.py:272 +msgid "Failed creating a port on the external vswitch" +msgstr "" + +#: ../nova/virt/hyperv.py:273 +#, python-format +msgid "Failed creating port for %s" +msgstr "" + +#: ../nova/virt/hyperv.py:276 +#, python-format +msgid "Created switch port %(vm_name)s on switch %(ext_path)s" +msgstr "" + +#: ../nova/virt/hyperv.py:286 +#, python-format +msgid "Failed to add nic to VM %s" +msgstr "" + +#: ../nova/virt/hyperv.py:288 +#, python-format +msgid "Created nic for %s " +msgstr "" + +#: ../nova/virt/hyperv.py:321 +#, python-format +msgid "WMI job failed: %s" +msgstr "" + +#: ../nova/virt/hyperv.py:325 +#, python-format +msgid "WMI job succeeded: %(desc)s, Elapsed=%(elap)s " +msgstr "" + +#: ../nova/virt/hyperv.py:361 +#, python-format +msgid "Got request to destroy vm %s" +msgstr "" + +#: ../nova/virt/hyperv.py:386 +#, python-format +msgid "Failed to destroy vm %s" +msgstr "" + +#: ../nova/virt/hyperv.py:393 +#, python-format +msgid "Del: disk %(vhdfile)s vm %(instance_name)s" +msgstr "" + +#: ../nova/virt/hyperv.py:415 +#, python-format +msgid "" +"Got Info for vm %(instance_id)s: state=%(state)s, mem=%(memusage)s, " +"num_cpu=%(numprocs)s, cpu_time=%(uptime)s" +msgstr "" + +#: ../nova/virt/hyperv.py:451 +#, python-format +msgid "Successfully changed vm state of %(vm_name)s to %(req_state)s" +msgstr "" + +#: ../nova/virt/hyperv.py:454 +#, python-format +msgid "Failed to change vm state of %(vm_name)s to %(req_state)s" +msgstr "" + +#: ../nova/compute/api.py:71 +#, python-format +msgid "Instance %d was not found in get_network_topic" +msgstr "" + +#: ../nova/compute/api.py:77 +#, python-format +msgid "Instance %d has no host" +msgstr "" + +#: ../nova/compute/api.py:97 +#, python-format +msgid "Quota exceeeded for %(pid)s, tried to run %(min_count)s instances" +msgstr "" + +#: ../nova/compute/api.py:99 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." +msgstr "" + +#: ../nova/compute/api.py:112 +msgid "Creating a raw instance" +msgstr "" + +#: ../nova/compute/api.py:160 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: ../nova/compute/api.py:187 +#, python-format +msgid "Casting to scheduler for %(pid)s/%(uid)s's instance %(instance_id)s" +msgstr "" + +#: ../nova/compute/api.py:292 +#, python-format +msgid "Going to try to terminate %s" +msgstr "" + +#: ../nova/compute/api.py:296 +#, python-format +msgid "Instance %d was not found during terminate" +msgstr "" + +#: ../nova/compute/api.py:301 +#, python-format +msgid "Instance %d is already being terminated" +msgstr "" + +#: ../nova/compute/api.py:481 +#, python-format +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "" + +#: ../nova/compute/api.py:496 +msgid "Volume isn't attached to anything!" +msgstr "" + +#: ../nova/rpc.py:98 +#, python-format +msgid "" +"AMQP server on %(fl_host)s:%(fl_port)d is unreachable. Trying again in " +"%(fl_intv)d seconds." +msgstr "" + +#: ../nova/rpc.py:103 +#, python-format +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "" + +#: ../nova/rpc.py:122 +msgid "Reconnected to queue" +msgstr "" + +#: ../nova/rpc.py:129 +msgid "Failed to fetch message from queue" +msgstr "" + +#: ../nova/rpc.py:159 +#, python-format +msgid "Initing the Adapter Consumer for %s" +msgstr "" + +#: ../nova/rpc.py:178 +#, python-format +msgid "received %s" +msgstr "" + +#. NOTE(vish): we may not want to ack here, but that means that bad +#. messages stay in the queue indefinitely, so for now +#. we just log the message and send an error string +#. back to the caller +#: ../nova/rpc.py:191 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: ../nova/rpc.py:192 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: ../nova/rpc.py:253 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: ../nova/rpc.py:294 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: ../nova/rpc.py:313 +msgid "Making asynchronous call..." +msgstr "" + +#: ../nova/rpc.py:316 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: ../nova/rpc.py:354 +msgid "Making asynchronous cast..." +msgstr "" + +#: ../nova/rpc.py:364 +#, python-format +msgid "response %s" +msgstr "" + +#: ../nova/rpc.py:373 +#, python-format +msgid "topic is %s" +msgstr "" + +#: ../nova/rpc.py:374 +#, python-format +msgid "message %s" +msgstr "" + +#: ../nova/volume/driver.py:78 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: ../nova/volume/driver.py:87 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: ../nova/volume/driver.py:220 +#, python-format +msgid "FAKE AOE: %s" +msgstr "" + +#: ../nova/volume/driver.py:233 +msgid "Skipping ensure_export. No iscsi_target " +msgstr "" + +#: ../nova/volume/driver.py:279 ../nova/volume/driver.py:288 +msgid "Skipping remove_export. No iscsi_target " +msgstr "" + +#: ../nova/volume/driver.py:347 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: ../nova/volume/driver.py:359 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: ../nova/volume/driver.py:414 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: ../nova/volume/driver.py:416 +msgid "Sheepdog is not working" +msgstr "" + +#: ../nova/wsgi.py:68 +#, python-format +msgid "Starting %(arg0)s on %(host)s:%(port)s" +msgstr "" + +#: ../nova/wsgi.py:147 +msgid "You must implement __call__" +msgstr "" + +#: ../bin/nova-instancemonitor.py:55 +msgid "Starting instance monitor" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:58 +msgid "leasing ip" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:73 +msgid "Adopted old lease or got a change of mac/hostname" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:80 +msgid "releasing ip" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:123 +#, python-format +msgid "" +"Called %(action)s for mac %(mac)s with ip %(ip)s and hostname %(hostname)s " +"on interface %(interface)s" +msgstr "" + +#: ../nova/virt/fake.py:239 +#, python-format +msgid "Instance %s Not Found" +msgstr "" + +#: ../nova/network/manager.py:153 +#, python-format +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "" + +#: ../nova/network/manager.py:157 +msgid "setting network host" +msgstr "" + +#: ../nova/network/manager.py:212 +#, python-format +msgid "Leasing IP %s" +msgstr "" + +#: ../nova/network/manager.py:216 +#, python-format +msgid "IP %s leased that isn't associated" +msgstr "" + +#: ../nova/network/manager.py:220 +#, python-format +msgid "IP %(address)s leased to bad mac %(inst_addr)s vs %(mac)s" +msgstr "" + +#: ../nova/network/manager.py:228 +#, python-format +msgid "IP %s leased that was already deallocated" +msgstr "" + +#: ../nova/network/manager.py:233 +#, python-format +msgid "Releasing IP %s" +msgstr "" + +#: ../nova/network/manager.py:237 +#, python-format +msgid "IP %s released that isn't associated" +msgstr "" + +#: ../nova/network/manager.py:241 +#, python-format +msgid "IP %(address)s released from bad mac %(inst_addr)s vs %(mac)s" +msgstr "" + +#: ../nova/network/manager.py:244 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: ../nova/network/manager.py:519 +msgid "" +"The sum between the number of networks and the vlan start cannot be greater " +"than 4094" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:57 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:74 +#, python-format +msgid "Introduced %(label)s as %(sr_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:90 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:96 +#, python-format +msgid "Forgetting SR %s ... " +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:101 +#, python-format +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:107 +#, python-format +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:111 +#, python-format +msgid "Forgetting SR %s done." +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:113 +#, python-format +msgid "Ignoring exception %(exc)s when forgetting SR %(sr_ref)s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:123 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:128 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:146 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:175 +#, python-format +msgid "Unable to obtain target information %(device_path)s, %(mountpoint)s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:197 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: ../nova/objectstore/image.py:262 +#, python-format +msgid "Failed to decrypt private key: %s" +msgstr "" + +#: ../nova/objectstore/image.py:269 +#, python-format +msgid "Failed to decrypt initialization vector: %s" +msgstr "" + +#: ../nova/objectstore/image.py:277 +#, python-format +msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgstr "" + +#: ../nova/objectstore/handler.py:106 +#, python-format +msgid "Unknown S3 value type %r" +msgstr "" + +#: ../nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "" + +#: ../nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "" + +#: ../nova/objectstore/handler.py:209 +#, python-format +msgid "List keys for bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:217 +#, python-format +msgid "Unauthorized attempt to access bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:235 +#, python-format +msgid "Creating bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:245 +#, python-format +msgid "Deleting bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:249 +#, python-format +msgid "Unauthorized attempt to delete bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:273 +#, python-format +msgid "Getting object: %(bname)s / %(nm)s" +msgstr "" + +#: ../nova/objectstore/handler.py:276 +#, python-format +msgid "Unauthorized attempt to get object %(nm)s from bucket %(bname)s" +msgstr "" + +#: ../nova/objectstore/handler.py:296 +#, python-format +msgid "Putting object: %(bname)s / %(nm)s" +msgstr "" + +#: ../nova/objectstore/handler.py:299 +#, python-format +msgid "Unauthorized attempt to upload object %(nm)s to bucket %(bname)s" +msgstr "" + +#: ../nova/objectstore/handler.py:318 +#, python-format +msgid "Deleting object: %(bname)s / %(nm)s" +msgstr "" + +#: ../nova/objectstore/handler.py:322 +#, python-format +msgid "Unauthorized attempt to delete object %(nm)s from bucket %(bname)s" +msgstr "" + +#: ../nova/objectstore/handler.py:396 +#, python-format +msgid "Not authorized to upload image: invalid directory %s" +msgstr "" + +#: ../nova/objectstore/handler.py:404 +#, python-format +msgid "Not authorized to upload image: unauthorized bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:409 +#, python-format +msgid "Starting image upload: %s" +msgstr "" + +#: ../nova/objectstore/handler.py:423 +#, python-format +msgid "Not authorized to update attributes of image %s" +msgstr "" + +#: ../nova/objectstore/handler.py:431 +#, python-format +msgid "Toggling publicity flag of image %(image_id)s %(newstatus)r" +msgstr "" + +#. other attributes imply update +#: ../nova/objectstore/handler.py:436 +#, python-format +msgid "Updating user fields on image %s" +msgstr "" + +#: ../nova/objectstore/handler.py:450 +#, python-format +msgid "Unauthorized attempt to delete image %s" +msgstr "" + +#: ../nova/objectstore/handler.py:455 +#, python-format +msgid "Deleted image: %s" +msgstr "" + +#: ../nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: ../nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: ../nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" +msgstr "" + +#: ../nova/auth/manager.py:270 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: ../nova/auth/manager.py:277 +#, python-format +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgstr "" + +#: ../nova/auth/manager.py:279 +#, python-format +msgid "No project called %s could be found" +msgstr "" + +#: ../nova/auth/manager.py:287 +#, python-format +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" +msgstr "" + +#: ../nova/auth/manager.py:289 +#, python-format +msgid "User %(uid)s is not a member of project %(pjid)s" +msgstr "" + +#: ../nova/auth/manager.py:298 ../nova/auth/manager.py:309 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: ../nova/auth/manager.py:299 ../nova/auth/manager.py:310 +msgid "Signature does not match" +msgstr "" + +#: ../nova/auth/manager.py:380 +msgid "Must specify project" +msgstr "" + +#: ../nova/auth/manager.py:414 +#, python-format +msgid "The %s role can not be found" +msgstr "" + +#: ../nova/auth/manager.py:416 +#, python-format +msgid "The %s role is global only" +msgstr "" + +#: ../nova/auth/manager.py:420 +#, python-format +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +msgstr "" + +#: ../nova/auth/manager.py:423 +#, python-format +msgid "Adding sitewide role %(role)s to user %(uid)s" +msgstr "" + +#: ../nova/auth/manager.py:448 +#, python-format +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgstr "" + +#: ../nova/auth/manager.py:451 +#, python-format +msgid "Removing sitewide role %(role)s from user %(uid)s" +msgstr "" + +#: ../nova/auth/manager.py:515 +#, python-format +msgid "Created project %(name)s with manager %(manager_user)s" +msgstr "" + +#: ../nova/auth/manager.py:533 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: ../nova/auth/manager.py:545 +#, python-format +msgid "Adding user %(uid)s to project %(pid)s" +msgstr "" + +#: ../nova/auth/manager.py:566 +#, python-format +msgid "Remove user %(uid)s from project %(pid)s" +msgstr "" + +#: ../nova/auth/manager.py:592 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: ../nova/auth/manager.py:650 +#, python-format +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgstr "" + +#: ../nova/auth/manager.py:659 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: ../nova/auth/manager.py:669 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: ../nova/auth/manager.py:671 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: ../nova/auth/manager.py:673 +#, python-format +msgid "Admin status set to %(admin)r for user %(uid)s" +msgstr "" + +#: ../nova/auth/manager.py:722 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: ../nova/service.py:161 +#, python-format +msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgstr "" + +#: ../nova/service.py:174 +msgid "Service killed that has no database entry" +msgstr "" + +#: ../nova/service.py:195 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: ../nova/service.py:207 +msgid "Recovered model server connection!" +msgstr "" + +#: ../nova/service.py:213 +msgid "model server went away" +msgstr "" + +#: ../nova/auth/ldapdriver.py:174 +#, python-format +msgid "LDAP user %s already exists" +msgstr "" + +#: ../nova/auth/ldapdriver.py:205 +#, python-format +msgid "LDAP object for %s doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:348 +#, python-format +msgid "User %s doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:472 +#, python-format +msgid "Group can't be created because group %s already exists" +msgstr "" + +#: ../nova/auth/ldapdriver.py:478 +#, python-format +msgid "Group can't be created because user %s doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:495 +#, python-format +msgid "User %s can't be searched in group because the user doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:507 +#, python-format +msgid "User %s can't be added to the group because the user doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:510 ../nova/auth/ldapdriver.py:521 +#, python-format +msgid "The group at dn %s doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:513 +#, python-format +msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgstr "" + +#: ../nova/auth/ldapdriver.py:524 +#, python-format +msgid "" +"User %s can't be removed from the group because the user doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:528 +#, python-format +msgid "User %s is not a member of the group" +msgstr "" + +#: ../nova/auth/ldapdriver.py:542 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: ../nova/auth/ldapdriver.py:549 +#, python-format +msgid "User %s can't be removed from all because the user doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:564 +#, python-format +msgid "Group at dn %s doesn't exist" +msgstr "" + +#: ../nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: ../nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:97 +#, python-format +msgid "Creating new user: %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:105 +#, python-format +msgid "Deleting user: %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:127 +#, python-format +msgid "Adding role %(role)s to user %(user)s for project %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:131 +#, python-format +msgid "Adding sitewide role %(role)s to user %(user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:137 +#, python-format +msgid "Removing role %(role)s from user %(user)s for project %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:141 +#, python-format +msgid "Removing sitewide role %(role)s from user %(user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:146 ../nova/api/ec2/admin.py:223 +msgid "operation must be add or remove" +msgstr "" + +#: ../nova/api/ec2/admin.py:159 +#, python-format +msgid "Getting x509 for user: %(name)s on project: %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:177 +#, python-format +msgid "Create project %(name)s managed by %(manager_user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:190 +#, python-format +msgid "Modify project: %(name)s managed by %(manager_user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:200 +#, python-format +msgid "Delete project: %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:214 +#, python-format +msgid "Adding user %(user)s to project %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:218 +#, python-format +msgid "Removing user %(user)s from project %(project)s" +msgstr "" @@ -8,14 +8,14 @@ msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n" "POT-Creation-Date: 2011-02-21 10:03-0500\n" -"PO-Revision-Date: 2011-03-17 15:54+0000\n" -"Last-Translator: Erick Huezo <erickhuezo@gmail.com>\n" +"PO-Revision-Date: 2011-06-30 16:42+0000\n" +"Last-Translator: David Caro <Unknown>\n" "Language-Team: Spanish <es@li.org>\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-03-19 06:19+0000\n" -"X-Generator: Launchpad (build 12559)\n" +"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n" +"X-Generator: Launchpad (build 13405)\n" #: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 #: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 @@ -36,10 +36,15 @@ msgid "" "Stdout: %(stdout)r\n" "Stderr: %(stderr)r" msgstr "" +"%(description)s\n" +"Comando: %(cmd)s\n" +"Código de salida: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" #: ../nova/exception.py:107 msgid "DB exception wrapped" -msgstr "" +msgstr "Excepción DB encapsulada" #. exc_type, exc_value, exc_traceback = sys.exc_info() #: ../nova/exception.py:120 @@ -49,12 +54,12 @@ msgstr "Excepción no controlada" #: ../nova/volume/api.py:45 #, python-format msgid "Quota exceeeded for %(pid)s, tried to create %(size)sG volume" -msgstr "" +msgstr "Cuota excedida por %(pid)s, se intentó crear el volumen %(size)sG" #: ../nova/volume/api.py:47 #, python-format msgid "Volume quota exceeded. You cannot create a volume of size %sG" -msgstr "Cuota excedida. No puedes crear un volumen con tamaño %sG" +msgstr "Cuota excedida. No puede crear un volumen con tamaño %sG" #: ../nova/volume/api.py:71 ../nova/volume/api.py:96 msgid "Volume status must be available" @@ -83,7 +88,7 @@ msgstr "%(param)s propiedad no encontrada para la imagen %(_image_id)s" #: ../nova/api/openstack/servers.py:168 msgid "No keypairs defined" -msgstr "No se definio una Keypairs" +msgstr "No se definio un par de llaves (Keypair)" #: ../nova/api/openstack/servers.py:238 #, python-format @@ -103,7 +108,7 @@ msgstr "Compute.api::get_lock %s" #: ../nova/api/openstack/servers.py:281 #, python-format msgid "Compute.api::reset_network %s" -msgstr "" +msgstr "Compute.api::reset_network %s" #: ../nova/api/openstack/servers.py:292 #, python-format @@ -127,16 +132,16 @@ msgstr "compute.api::resume %s" #: ../nova/twistd.py:157 msgid "Wrong number of arguments." -msgstr "Numero de argumentos incorrectos" +msgstr "Cantidad de argumentos incorrecta" #: ../nova/twistd.py:209 #, python-format msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "el pidfile %s no existe. ¿No estará el demonio parado?\n" +msgstr "El \"pidfile\" %s no existe. Quizás el servicio no este corriendo.\n" #: ../nova/twistd.py:221 msgid "No such process" -msgstr "No se encontró proceso" +msgstr "No existe el proceso" #: ../nova/twistd.py:230 ../nova/service.py:224 #, python-format @@ -145,12 +150,12 @@ msgstr "Sirviendo %s" #: ../nova/twistd.py:262 ../nova/service.py:225 msgid "Full set of FLAGS:" -msgstr "Conjunto completo de opciones:" +msgstr "Conjunto completo de opciones (FLAGS):" #: ../nova/twistd.py:266 #, python-format msgid "Starting %s" -msgstr "Comenzando %s" +msgstr "Iniciando %s" #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 @@ -163,17 +168,19 @@ msgstr "La instancia %s no se ha encontrado" #: ../nova/virt/xenapi/volumeops.py:51 #, python-format msgid "Attach_volume: %(instance_name)s, %(device_path)s, %(mountpoint)s" -msgstr "" +msgstr "Volumen_unido: %(instance_name)s, %(device_path)s, %(mountpoint)s" #: ../nova/virt/xenapi/volumeops.py:69 #, python-format msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" msgstr "" +"No es posible crear el VDI en SR %(sr_ref)s para la instancia " +"%(instance_name)s" #: ../nova/virt/xenapi/volumeops.py:80 #, python-format msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" -msgstr "" +msgstr "No es posible usar SR %(sr_ref)s para la instancia %(instance_name)s" #: ../nova/virt/xenapi/volumeops.py:91 #, python-format @@ -184,12 +191,14 @@ msgstr "Imposible adjuntar volumen a la instancia %s" #, python-format msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" msgstr "" +"El punto de montaje %(mountpoint)s esta unido a la instancia " +"%(instance_name)s" #. Detach VBD from VM #: ../nova/virt/xenapi/volumeops.py:104 #, python-format msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" -msgstr "" +msgstr "Volume_separado: %(instance_name)s, %(mountpoint)s" #: ../nova/virt/xenapi/volumeops.py:112 #, python-format @@ -205,6 +214,8 @@ msgstr "Imposible desasociar volumen %s" #, python-format msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" msgstr "" +"El punto de montaje %(mountpoint)s se desligó de la instancia " +"%(instance_name)s" #: ../nova/compute/instance_types.py:41 #, python-format @@ -259,7 +270,7 @@ msgstr "" #: ../nova/crypto.py:258 #, python-format msgid "Flags path: %s" -msgstr "" +msgstr "Ruta a las opciones: %s" #: ../nova/scheduler/manager.py:69 #, python-format @@ -276,6 +287,7 @@ msgstr "check_instance_lock: decorating: |%s|" msgid "" "check_instance_lock: arguments: |%(self)s| |%(context)s| |%(instance_id)s|" msgstr "" +"check_instance_lock: argumentos: |%(self)s| |%(context)s| |%(instance_id)s|" #: ../nova/compute/manager.py:84 #, python-format @@ -338,6 +350,8 @@ msgid "" "trying to reboot a non-running instance: %(instance_id)s (state: %(state)s " "expected: %(running)s)" msgstr "" +"intentando reiniciar una instancia no ejecutada: %(instance_id)s (state: " +"%(state)s expected: %(running)s)" #: ../nova/compute/manager.py:311 #, python-format @@ -350,6 +364,8 @@ msgid "" "trying to snapshot a non-running instance: %(instance_id)s (state: %(state)s " "expected: %(running)s)" msgstr "" +"intentando crear una imagen instantanea(snapshot) de una maquina no " +"ejecutada: %(instance_id)s (state: %(state)s expected: %(running)s)" #: ../nova/compute/manager.py:332 #, python-format @@ -357,11 +373,13 @@ msgid "" "trying to reset the password on a non-running instance: %(instance_id)s " "(state: %(instance_state)s expected: %(expected_state)s)" msgstr "" +"intentando restablecer el password en una instancia: %(instance_id)s " +"(estado: %(instance_state)s esperado: %(expected_state)s)" #: ../nova/compute/manager.py:335 #, python-format msgid "instance %s: setting admin password" -msgstr "" +msgstr "instancia %s: estableciendo password de administrador" #: ../nova/compute/manager.py:353 #, python-format @@ -369,11 +387,13 @@ msgid "" "trying to inject a file into a non-running instance: %(instance_id)s (state: " "%(instance_state)s expected: %(expected_state)s)" msgstr "" +"intentando inyectar un archivo dentro de una instancia parada: " +"%(instance_id)s (estado: %(instance_state)s esperado: %(expected_state)s)" #: ../nova/compute/manager.py:362 #, python-format msgid "instance %(nm)s: injecting file to %(plain_path)s" -msgstr "" +msgstr "instancia %(nm)s: inyectando archivo en %(plain_path)s" #: ../nova/compute/manager.py:372 #, python-format @@ -393,7 +413,7 @@ msgstr "instancia %s: pausando" #: ../nova/compute/manager.py:423 #, python-format msgid "instance %s: unpausing" -msgstr "instnacia %s: continuando tras pausa" +msgstr "instancia %s: continuando tras pausa" #: ../nova/compute/manager.py:440 #, python-format @@ -403,7 +423,7 @@ msgstr "instancia %s: obteniendo los diagnosticos" #: ../nova/compute/manager.py:453 #, python-format msgid "instance %s: suspending" -msgstr "" +msgstr "instancia %s: suspendiendo" #: ../nova/compute/manager.py:472 #, python-format @@ -501,7 +521,7 @@ msgstr "Exportando de nuevo los volumenes %s" #: ../nova/volume/manager.py:90 #, python-format msgid "volume %s: skipping export" -msgstr "" +msgstr "volume %s: saltando exportación" #: ../nova/volume/manager.py:96 #, python-format @@ -511,7 +531,7 @@ msgstr "volumen %s: creando" #: ../nova/volume/manager.py:108 #, python-format msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" -msgstr "" +msgstr "volume %(vol_name)s: creando lv del tamaño %(vol_size)sG" #: ../nova/volume/manager.py:112 #, python-format @@ -549,7 +569,7 @@ msgstr "volumen %s: eliminado satisfactoriamente" #: ../nova/virt/xenapi/fake.py:74 #, python-format msgid "%(text)s: _db_content => %(content)s" -msgstr "" +msgstr "%(text)s: _db_content => %(content)s" #: ../nova/virt/xenapi/fake.py:304 ../nova/virt/xenapi/fake.py:404 #: ../nova/virt/xenapi/fake.py:422 ../nova/virt/xenapi/fake.py:478 @@ -564,7 +584,7 @@ msgstr "xenapi.fake no tiene una implementación para %s" #: ../nova/virt/xenapi/fake.py:341 #, python-format msgid "Calling %(localname)s %(impl)s" -msgstr "" +msgstr "Llamando %(localname)s %(impl)s" #: ../nova/virt/xenapi/fake.py:346 #, python-format @@ -618,12 +638,12 @@ msgstr "El pid %d está pasado, relanzando dnsmasq" #: ../nova/network/linux_net.py:358 #, python-format msgid "killing radvd threw %s" -msgstr "" +msgstr "Matando radvd lanzado %s" #: ../nova/network/linux_net.py:360 #, python-format msgid "Pid %d is stale, relaunching radvd" -msgstr "" +msgstr "Pid %d corrupto, relanzando radvd" #. pylint: disable=W0703 #: ../nova/network/linux_net.py:449 @@ -659,7 +679,7 @@ msgstr "El resultado fue %s" #: ../nova/utils.py:159 #, python-format msgid "Running cmd (SSH): %s" -msgstr "" +msgstr "corriendo cmd (SSH): %s" #: ../nova/utils.py:217 #, python-format @@ -674,12 +694,12 @@ msgstr "Ejecutando %s" #: ../nova/utils.py:262 #, python-format msgid "Link Local address is not found.:%s" -msgstr "" +msgstr "No se encuentra la dirección del enlace local.:%s" #: ../nova/utils.py:265 #, python-format msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" -msgstr "" +msgstr "No se pudo obtener enlace de la ip local de %(interface)s :%(ex)s" #: ../nova/utils.py:363 #, python-format @@ -694,7 +714,7 @@ msgstr "backend %s" #: ../nova/fakerabbit.py:49 #, python-format msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" -msgstr "" +msgstr "(%(nm)s) publica (key: %(routing_key)s) %(message)s" #: ../nova/fakerabbit.py:54 #, python-format @@ -714,12 +734,12 @@ msgstr "Declarando intercambio %s" #: ../nova/fakerabbit.py:96 #, python-format msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" -msgstr "" +msgstr "Enlazando %(queue)s a %(exchange)s con la llave %(routing_key)s" #: ../nova/fakerabbit.py:121 #, python-format msgid "Getting from %(queue)s: %(message)s" -msgstr "" +msgstr "Obtendiendo desde %(queue)s: %(message)s" #: ../nova/virt/xenapi/vm_utils.py:135 ../nova/virt/hyperv.py:171 #, python-format @@ -729,17 +749,17 @@ msgstr "Creada VM %s..." #: ../nova/virt/xenapi/vm_utils.py:138 #, python-format msgid "Created VM %(instance_name)s as %(vm_ref)s." -msgstr "" +msgstr "VM creada %(instance_name)s como %(vm_ref)s." #: ../nova/virt/xenapi/vm_utils.py:168 #, python-format msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " -msgstr "" +msgstr "Creando VBD para VM %(vm_ref)s, VDI %(vdi_ref)s ... " #: ../nova/virt/xenapi/vm_utils.py:171 #, python-format msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." -msgstr "" +msgstr "Creado el VBD %(vbd_ref)s para VM %(vm_ref)s, VDI %(vdi_ref)s" #: ../nova/virt/xenapi/vm_utils.py:187 #, python-format @@ -759,12 +779,12 @@ msgstr "Imposible destruir VBD %s" #: ../nova/virt/xenapi/vm_utils.py:224 #, python-format msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." -msgstr "" +msgstr "Creando VIF para VM %(vm_ref)s, red %(network_ref)s." #: ../nova/virt/xenapi/vm_utils.py:227 #, python-format msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." -msgstr "" +msgstr "Creado el VIF %(vif_ref)s para VM %(vm_ref)s, red %(network_ref)s." #: ../nova/virt/xenapi/vm_utils.py:246 #, python-format @@ -772,50 +792,52 @@ msgid "" "Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) on " "%(sr_ref)s." msgstr "" +"VDI creado %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) " +"sobre %(sr_ref)s." #. TODO(sirp): Add quiesce and VSS locking support when Windows support #. is added #: ../nova/virt/xenapi/vm_utils.py:258 #, python-format msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." -msgstr "" +msgstr "Creando snapshot de la VM %(vm_ref)s con etiqueta '%(label)s'..." #: ../nova/virt/xenapi/vm_utils.py:272 #, python-format msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." -msgstr "" +msgstr "Instantánea creada %(template_vm_ref)s de la VM %(vm_ref)s." #: ../nova/virt/xenapi/vm_utils.py:286 #, python-format msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" -msgstr "" +msgstr "Pidiendo xapi a subir %(vdi_uuids)s como ID %(image_id)s" #: ../nova/virt/xenapi/vm_utils.py:327 #, python-format msgid "Size for image %(image)s:%(virtual_size)d" -msgstr "" +msgstr "Tamaño para imagen %(image)s:%(virtual_size)d" #: ../nova/virt/xenapi/vm_utils.py:332 #, python-format msgid "Glance image %s" -msgstr "" +msgstr "Imagen Glance %s" #. we need to invoke a plugin for copying VDI's #. content into proper path #: ../nova/virt/xenapi/vm_utils.py:342 #, python-format msgid "Copying VDI %s to /boot/guest on dom0" -msgstr "" +msgstr "Copiando VDI %s a /boot/guest on dom0" #: ../nova/virt/xenapi/vm_utils.py:352 #, python-format msgid "Kernel/Ramdisk VDI %s destroyed" -msgstr "" +msgstr "Kernel/Ramdisk VDI %s destruÃdo" #: ../nova/virt/xenapi/vm_utils.py:361 #, python-format msgid "Asking xapi to fetch %(url)s as %(access)s" -msgstr "" +msgstr "Pidiendo a xapi que descargue %(url)s como %(access)s" #: ../nova/virt/xenapi/vm_utils.py:386 ../nova/virt/xenapi/vm_utils.py:402 #, python-format @@ -825,21 +847,21 @@ msgstr "Buscando vid %s para el kernel PV" #: ../nova/virt/xenapi/vm_utils.py:397 #, python-format msgid "PV Kernel in VDI:%s" -msgstr "" +msgstr "Kernel PV en VDI:%s" #: ../nova/virt/xenapi/vm_utils.py:405 #, python-format msgid "Running pygrub against %s" -msgstr "" +msgstr "Ejecutando pygrub contra %s" #: ../nova/virt/xenapi/vm_utils.py:411 #, python-format msgid "Found Xen kernel %s" -msgstr "" +msgstr "Kernel Xen Encontrado %s" #: ../nova/virt/xenapi/vm_utils.py:413 msgid "No Xen kernel found. Booting HVM." -msgstr "" +msgstr "Kernel Xen no encontrado. Reiniciando HVM" #: ../nova/virt/xenapi/vm_utils.py:425 ../nova/virt/hyperv.py:431 #, python-format @@ -864,7 +886,7 @@ msgstr "(VM_UTILS) xenapi power_state -> |%s|" #: ../nova/virt/xenapi/vm_utils.py:525 #, python-format msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" -msgstr "" +msgstr "VHD %(vdi_uuid)s tiene origen en %(parent_ref)s" #: ../nova/virt/xenapi/vm_utils.py:542 #, python-format @@ -893,18 +915,19 @@ msgstr "No se han encontrado VDI's para VM %s" #, python-format msgid "Unexpected number of VDIs (%(num_vdis)s) found for VM %(vm_ref)s" msgstr "" +"Numero de VDIs inesperado (%(num_vdis)s) encontrados por VM %(vm_ref)s" #: ../nova/virt/xenapi/vm_utils.py:653 #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:188 #, python-format msgid "Creating VBD for VDI %s ... " -msgstr "" +msgstr "Creando VBD para VDI %s ... " #: ../nova/virt/xenapi/vm_utils.py:655 #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:190 #, python-format msgid "Creating VBD for VDI %s done." -msgstr "" +msgstr "Creando VBF para VDI %s terminado" #: ../nova/virt/xenapi/vm_utils.py:657 #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:192 @@ -2850,12 +2873,12 @@ msgstr "" #: ../nova/api/ec2/admin.py:177 #, python-format msgid "Create project %(name)s managed by %(manager_user)s" -msgstr "" +msgstr "Crear proyecto %(name)s administrador por %(manager_user)s" #: ../nova/api/ec2/admin.py:190 #, python-format msgid "Modify project: %(name)s managed by %(manager_user)s" -msgstr "" +msgstr "Modificar proyecto: %(name)s administrado por %(manager_user)s" #: ../nova/api/ec2/admin.py:200 #, python-format @@ -2865,12 +2888,12 @@ msgstr "Borrar proyecto: %s" #: ../nova/api/ec2/admin.py:214 #, python-format msgid "Adding user %(user)s to project %(project)s" -msgstr "" +msgstr "Agregando usuario %(user)s al proyecto %(project)s" #: ../nova/api/ec2/admin.py:218 #, python-format msgid "Removing user %(user)s from project %(project)s" -msgstr "" +msgstr "Eliminando el usuario %(user)s del proyecto %(project)s" #, python-format #~ msgid "" diff --git a/po/fr.po b/po/fr.po new file mode 100644 index 000000000..9dd789b3c --- /dev/null +++ b/po/fr.po @@ -0,0 +1,2992 @@ +# French translation for nova +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the nova package. +# FIRST AUTHOR <EMAIL@ADDRESS>, 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: nova\n" +"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n" +"POT-Creation-Date: 2011-02-21 10:03-0500\n" +"PO-Revision-Date: 2011-05-27 16:50+0000\n" +"Last-Translator: Capashen <Unknown>\n" +"Language-Team: French <fr@li.org>\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n" +"X-Generator: Launchpad (build 13405)\n" + +#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 +#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 +#: ../nova/scheduler/simple.py:122 +msgid "No hosts found" +msgstr "Pas d'hôte trouvé" + +#: ../nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "Erreur imprévue lors de l'éxecution de la commande" + +#: ../nova/exception.py:36 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Commande : %(cmd)s\n" +"Valeur retournée : %(exit_code)s\n" +"Sortie standard : %(stdout)r\n" +"Sortie d'erreur : %(stderr)r" + +#: ../nova/exception.py:107 +msgid "DB exception wrapped" +msgstr "Remontée d'exception de la base de données" + +#. exc_type, exc_value, exc_traceback = sys.exc_info() +#: ../nova/exception.py:120 +msgid "Uncaught exception" +msgstr "Exception non prévue" + +#: ../nova/volume/api.py:45 +#, python-format +msgid "Quota exceeeded for %(pid)s, tried to create %(size)sG volume" +msgstr "" +"Quota dépassé pour %(pid)s lors d'une tentative de création d'un volume de " +"%(size)sG" + +#: ../nova/volume/api.py:47 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %sG" +msgstr "Quota de volume dépassé. Vous ne pouvez pas créer un volume de %sG" + +#: ../nova/volume/api.py:71 ../nova/volume/api.py:96 +msgid "Volume status must be available" +msgstr "Le status du volume doit être disponible" + +#: ../nova/volume/api.py:98 +msgid "Volume is already attached" +msgstr "Volume déjà attaché" + +#: ../nova/volume/api.py:104 +msgid "Volume is already detached" +msgstr "Volume déjà déttaché" + +#: ../nova/api/openstack/servers.py:72 +msgid "Failed to read private ip" +msgstr "Echec lors de la lecture de l'ip privée" + +#: ../nova/api/openstack/servers.py:79 +msgid "Failed to read public ip(s)" +msgstr "Echec lors de la lecture de l'ip(s) privée(s)" + +#: ../nova/api/openstack/servers.py:152 +#, python-format +msgid "%(param)s property not found for image %(_image_id)s" +msgstr "" +"La propriété %(param)s n'a pas été trouvée pour l'image %(_image_id)s" + +#: ../nova/api/openstack/servers.py:168 +msgid "No keypairs defined" +msgstr "Pas de paire de clés définie" + +#: ../nova/api/openstack/servers.py:238 +#, python-format +msgid "Compute.api::lock %s" +msgstr "Compute.api::lock %s" + +#: ../nova/api/openstack/servers.py:253 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "Compute.api::unlock %s" + +#: ../nova/api/openstack/servers.py:267 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "Compute.api::get_lock %s" + +#: ../nova/api/openstack/servers.py:281 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "Compute.api::reset_network %s" + +#: ../nova/api/openstack/servers.py:292 +#, python-format +msgid "Compute.api::pause %s" +msgstr "Compute.api::pause %s" + +#: ../nova/api/openstack/servers.py:303 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "Compute.api::unpause %s" + +#: ../nova/api/openstack/servers.py:314 +#, python-format +msgid "compute.api::suspend %s" +msgstr "compute.api::suspend %s" + +#: ../nova/api/openstack/servers.py:325 +#, python-format +msgid "compute.api::resume %s" +msgstr "compute.api::resume %s" + +#: ../nova/twistd.py:157 +msgid "Wrong number of arguments." +msgstr "Nombre d'arguments incorrect." + +#: ../nova/twistd.py:209 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "" +"Le fichier pid %s n'existe pas. Est-ce que le processus est en cours " +"d'exécution ?\n" + +#: ../nova/twistd.py:221 +msgid "No such process" +msgstr "Aucun processus de ce type" + +#: ../nova/twistd.py:230 ../nova/service.py:224 +#, python-format +msgid "Serving %s" +msgstr "En train de servir %s" + +#: ../nova/twistd.py:262 ../nova/service.py:225 +msgid "Full set of FLAGS:" +msgstr "Ensemble de propriétés complet :" + +#: ../nova/twistd.py:266 +#, python-format +msgid "Starting %s" +msgstr "Démarrage de %s" + +#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 +#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 +#: ../nova/api/ec2/__init__.py:317 +#, python-format +msgid "Instance %s not found" +msgstr "Instance %s non trouvée" + +#. NOTE: No Resource Pool concept so far +#: ../nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %(instance_name)s, %(device_path)s, %(mountpoint)s" +msgstr "Attach_volume: %(instance_name)s, %(device_path)s, %(mountpoint)s" + +#: ../nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" +"Impossible de créer VDI sur SR %(sr_ref)s pour l'instance %(instance_name)s" + +#: ../nova/virt/xenapi/volumeops.py:80 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" +"Impossible d'utiliser SR %(sr_ref)s pour l'instance %(instance_name)s" + +#: ../nova/virt/xenapi/volumeops.py:91 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "Impossible d'attacher le volume à l'instance %s" + +#: ../nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "" +"Le point de montage %(mountpoint)s a été attaché à l'instance " +"%(instance_name)s" + +#. Detach VBD from VM +#: ../nova/virt/xenapi/volumeops.py:104 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "Detach_volume: %(instance_name)s, %(mountpoint)s" + +#: ../nova/virt/xenapi/volumeops.py:112 +#, python-format +msgid "Unable to locate volume %s" +msgstr "Impossible de trouver le volume %s" + +#: ../nova/virt/xenapi/volumeops.py:120 +#, python-format +msgid "Unable to detach volume %s" +msgstr "Impossible de détacher le volume %s" + +#: ../nova/virt/xenapi/volumeops.py:127 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "" +"Le point de montage %(mountpoint)s à été détaché de l'instance " +"%(instance_name)s" + +#: ../nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "Type d'instance inconnu: %s" + +#: ../nova/crypto.py:46 +msgid "Filename of root CA" +msgstr "Nom du fichier contenant la racine de l'autorité de certification" + +#: ../nova/crypto.py:49 +msgid "Filename of private key" +msgstr "Nom de fichier de la clé privée" + +#: ../nova/crypto.py:51 +msgid "Filename of root Certificate Revokation List" +msgstr "Nom de fichier de la racine de liste de révocation (CRL)" + +#: ../nova/crypto.py:53 +msgid "Where we keep our keys" +msgstr "Emplacement de sauvegarde des clefs" + +#: ../nova/crypto.py:55 +msgid "Where we keep our root CA" +msgstr "Emplacement de sauvegarde des racines d'autorité de certification" + +#: ../nova/crypto.py:57 +msgid "Should we use a CA for each project?" +msgstr "Doit-on utiliser une autorité de certification pour chaque projet ?" + +#: ../nova/crypto.py:61 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" +"Sujet pour les certificats utilisateurs, %s pour le projet, utilisateur, " +"timestamp" + +#: ../nova/crypto.py:66 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "Sujet de certificat pour projets, %s pour le projet, timestamp" + +#: ../nova/crypto.py:71 +#, python-format +msgid "Subject for certificate for vpns, %s for project, timestamp" +msgstr "Suject de certificat pour les vpns, %s pour le projet, timestamp" + +#: ../nova/crypto.py:258 +#, python-format +msgid "Flags path: %s" +msgstr "Chemin des propriétés: %s" + +#: ../nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %(topic)s %(host)s for %(method)s" +msgstr "Typage de %(topic)s %(host)s pour %(method)s" + +#: ../nova/compute/manager.py:78 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "check_instance_lock: décoration : |%s|" + +#: ../nova/compute/manager.py:80 +#, python-format +msgid "" +"check_instance_lock: arguments: |%(self)s| |%(context)s| |%(instance_id)s|" +msgstr "" +"check_instance_lock: arguments : |%(self)s| |%(context)s| |%(instance_id)s|" + +#: ../nova/compute/manager.py:84 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "check_instance_lock: vérouillé : |%s|" + +#: ../nova/compute/manager.py:86 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "check_instance_lock: admin : |%s|" + +#: ../nova/compute/manager.py:91 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "check_instance_lock: exécution : |%s|" + +#: ../nova/compute/manager.py:95 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "check_instance_lock: ne s'éxécute pas |%s|" + +#: ../nova/compute/manager.py:179 +msgid "Instance has already been created" +msgstr "L'instance a déjà été crée" + +#: ../nova/compute/manager.py:180 +#, python-format +msgid "instance %s: starting..." +msgstr "L'instance %s: est en train d'être démarée..." + +#. pylint: disable=W0702 +#: ../nova/compute/manager.py:219 +#, python-format +msgid "instance %s: Failed to spawn" +msgstr "instance %s: n'a pas pu être crée" + +#: ../nova/compute/manager.py:233 ../nova/tests/test_cloud.py:286 +#, python-format +msgid "Terminating instance %s" +msgstr "Arrêt de l'instance %s" + +#: ../nova/compute/manager.py:255 +#, python-format +msgid "Deallocating address %s" +msgstr "Dé-allocation de l'adresss %s" + +#: ../nova/compute/manager.py:268 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "Tentative de destruction d'une instance déjà détruite: %s" + +#: ../nova/compute/manager.py:282 +#, python-format +msgid "Rebooting instance %s" +msgstr "Redémarrage de l'instance %s" + +#: ../nova/compute/manager.py:287 +#, python-format +msgid "" +"trying to reboot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" +msgstr "" +"Tentative de redémarrage d'une instance non démarrée: %(instance_id)s " +"(state: %(state)s s'attendait à : %(running)s)" + +#: ../nova/compute/manager.py:311 +#, python-format +msgid "instance %s: snapshotting" +msgstr "instance %s: création d'un instantané (snapshot)" + +#: ../nova/compute/manager.py:316 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" +msgstr "" +"Tentative de création d'un instantané (snapshot) pour une instance non " +"démarrée: %(instance_id)s (state: %(state)s sattendait à : %(running)s)" + +#: ../nova/compute/manager.py:332 +#, python-format +msgid "" +"trying to reset the password on a non-running instance: %(instance_id)s " +"(state: %(instance_state)s expected: %(expected_state)s)" +msgstr "" +"Tentative de ré-initialisation du mot de passe pour une instance non " +"démarrée: %(instance_id)s (state: %(instance_state)s expected: " +"%(expected_state)s)" + +#: ../nova/compute/manager.py:335 +#, python-format +msgid "instance %s: setting admin password" +msgstr "instance %s: configuration du mot de passe admin" + +#: ../nova/compute/manager.py:353 +#, python-format +msgid "" +"trying to inject a file into a non-running instance: %(instance_id)s (state: " +"%(instance_state)s expected: %(expected_state)s)" +msgstr "" +"Tentative d'injection d'un fichier pour une instance non " +"démarrée:%(instance_id)s (state: %(instance_state)s s'attendait à : " +"%(expected_state)s)" + +#: ../nova/compute/manager.py:362 +#, python-format +msgid "instance %(nm)s: injecting file to %(plain_path)s" +msgstr "instance %(nm)s: injection de fichier vers %(plain_path)s" + +#: ../nova/compute/manager.py:372 +#, python-format +msgid "instance %s: rescuing" +msgstr "instance %s: récupération" + +#: ../nova/compute/manager.py:387 +#, python-format +msgid "instance %s: unrescuing" +msgstr "instance %s: dé-récupération" + +#: ../nova/compute/manager.py:406 +#, python-format +msgid "instance %s: pausing" +msgstr "instance %s: mise en pause" + +#: ../nova/compute/manager.py:423 +#, python-format +msgid "instance %s: unpausing" +msgstr "instance %s: reprise après pause" + +#: ../nova/compute/manager.py:440 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "instance %s: récupération des diagnostiques" + +#: ../nova/compute/manager.py:453 +#, python-format +msgid "instance %s: suspending" +msgstr "instance %s: suspension" + +#: ../nova/compute/manager.py:472 +#, python-format +msgid "instance %s: resuming" +msgstr "instance %s: reprise après suspension" + +#: ../nova/compute/manager.py:491 +#, python-format +msgid "instance %s: locking" +msgstr "instance %s: vérrouillage" + +#: ../nova/compute/manager.py:503 +#, python-format +msgid "instance %s: unlocking" +msgstr "instance %s: déverrouillage" + +#: ../nova/compute/manager.py:513 +#, python-format +msgid "instance %s: getting locked state" +msgstr "instance %s: récupération de l'état de vérouillage" + +#: ../nova/compute/manager.py:526 +#, python-format +msgid "instance %s: reset network" +msgstr "instance %s: redémarrage du réseau" + +#: ../nova/compute/manager.py:535 ../nova/api/ec2/cloud.py:515 +#, python-format +msgid "Get console output for instance %s" +msgstr "Récupération de la sortie de la console de l'instance %s" + +#: ../nova/compute/manager.py:543 +#, python-format +msgid "instance %s: getting ajax console" +msgstr "instance %s: préparation d'une console ajax" + +#: ../nova/compute/manager.py:553 +#, python-format +msgid "" +"instance %(instance_id)s: attaching volume %(volume_id)s to %(mountpoint)s" +msgstr "" +"instance %(instance_id)s: montage du volume %(volume_id)s à %(mountpoint)s" + +#. pylint: disable=W0702 +#. NOTE(vish): The inline callback eats the exception info so we +#. log the traceback here and reraise the same +#. ecxception below. +#: ../nova/compute/manager.py:569 +#, python-format +msgid "instance %(instance_id)s: attach failed %(mountpoint)s, removing" +msgstr "" +"instance %(instance_id)s: Échec de montage %(mountpoint)s, supression" + +#: ../nova/compute/manager.py:585 +#, python-format +msgid "" +"Detach volume %(volume_id)s from mountpoint %(mp)s on instance " +"%(instance_id)s" +msgstr "" +"Démontage du volume %(volume_id)s du point de montage %(mp)s sur l'instance " +"%(instance_id)s" + +#: ../nova/compute/manager.py:588 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "Démontage de volume d'une instance inconnue %s" + +#: ../nova/scheduler/simple.py:53 +#, python-format +msgid "Host %s is not alive" +msgstr "Host %s n'est pas en fonction" + +#: ../nova/scheduler/simple.py:65 +msgid "All hosts have too many cores" +msgstr "Tous les hôtes ont trop de coeurs" + +#: ../nova/scheduler/simple.py:87 +#, python-format +msgid "Host %s not available" +msgstr "Hôte %s non disponible" + +#: ../nova/scheduler/simple.py:99 +msgid "All hosts have too many gigabytes" +msgstr "Tous les hôtes ont trop de mémoire" + +#: ../nova/scheduler/simple.py:119 +msgid "All hosts have too many networks" +msgstr "Tous les hôtes ont trop de réseaux" + +#: ../nova/volume/manager.py:85 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "Ré-exportation de %s volumes" + +#: ../nova/volume/manager.py:90 +#, python-format +msgid "volume %s: skipping export" +msgstr "volume %s : exportation évitée" + +#: ../nova/volume/manager.py:96 +#, python-format +msgid "volume %s: creating" +msgstr "volume %s: création" + +#: ../nova/volume/manager.py:108 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "volume %(vol_name)s: cÅ•eation d'un volume logique de %(vol_size)sG" + +#: ../nova/volume/manager.py:112 +#, python-format +msgid "volume %s: creating export" +msgstr "volume %s: exportation en cours" + +#: ../nova/volume/manager.py:123 +#, python-format +msgid "volume %s: created successfully" +msgstr "volume %s: crée avec succès" + +#: ../nova/volume/manager.py:131 +msgid "Volume is still attached" +msgstr "Le volume est encore attaché" + +#: ../nova/volume/manager.py:133 +msgid "Volume is not local to this node" +msgstr "Le volume n'est pas local à ce noeud" + +#: ../nova/volume/manager.py:136 +#, python-format +msgid "volume %s: removing export" +msgstr "volume %s: suppression de l'exportation" + +#: ../nova/volume/manager.py:138 +#, python-format +msgid "volume %s: deleting" +msgstr "volume %s: suppression" + +#: ../nova/volume/manager.py:147 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "volume %s: supprimé avec succès" + +#: ../nova/virt/xenapi/fake.py:74 +#, python-format +msgid "%(text)s: _db_content => %(content)s" +msgstr "%(text)s: _db_content => %(content)s" + +#: ../nova/virt/xenapi/fake.py:304 ../nova/virt/xenapi/fake.py:404 +#: ../nova/virt/xenapi/fake.py:422 ../nova/virt/xenapi/fake.py:478 +msgid "Raising NotImplemented" +msgstr "Fonction non implémentée" + +#: ../nova/virt/xenapi/fake.py:306 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "xenapi.fake n'a pas d'implémentation pour %s" + +#: ../nova/virt/xenapi/fake.py:341 +#, python-format +msgid "Calling %(localname)s %(impl)s" +msgstr "Appel %(localname)s %(impl)s" + +#: ../nova/virt/xenapi/fake.py:346 +#, python-format +msgid "Calling getter %s" +msgstr "Appel du getter %s" + +#: ../nova/virt/xenapi/fake.py:406 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" +"xenapi.fake n'a pas d'implementation pour %s ou il a été appelé avec le " +"mauvais nombre d'arguments" + +#: ../nova/tests/test_cloud.py:256 +msgid "Can't test instances without a real virtual env." +msgstr "Ne peut pas tester les instances sans un env virtuel." + +#: ../nova/tests/test_cloud.py:268 +#, python-format +msgid "Need to watch instance %s until it's running..." +msgstr "Besoin de surveiller l'instance %s jusqu'à son démarrage..." + +#: ../nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "Échec lors de l'ouverture d'une connexion à l'hyperviseur" + +#: ../nova/network/linux_net.py:187 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "Démarrage de l'interface VLAN %s" + +#: ../nova/network/linux_net.py:208 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "Démarrage de l'interface de Bridge %s" + +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:314 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "Hupping dnsmasq à renvoyé %s" + +#: ../nova/network/linux_net.py:316 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "Pid %d est dépassé, re-démarrage de dnsmasq" + +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:358 +#, python-format +msgid "killing radvd threw %s" +msgstr "La destruction de radvd à renvoyé %s" + +#: ../nova/network/linux_net.py:360 +#, python-format +msgid "Pid %d is stale, relaunching radvd" +msgstr "Pid %d est dépassé, re-démarrage radvd" + +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:449 +#, python-format +msgid "Killing dnsmasq threw %s" +msgstr "La destruction de dnsmasq à renvoyé %s" + +#: ../nova/utils.py:58 +#, python-format +msgid "Inner Exception: %s" +msgstr "Exception interne : %s" + +#: ../nova/utils.py:59 +#, python-format +msgid "Class %s cannot be found" +msgstr "La classe %s n'a pas pu être trouvée" + +#: ../nova/utils.py:118 +#, python-format +msgid "Fetching %s" +msgstr "Récupèration de %s" + +#: ../nova/utils.py:130 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Execution de la commande (sous-processus) : %s" + +#: ../nova/utils.py:143 ../nova/utils.py:183 +#, python-format +msgid "Result was %s" +msgstr "Le résultat était %s" + +#: ../nova/utils.py:159 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "Execution de la cmd (SSH): %s" + +#: ../nova/utils.py:217 +#, python-format +msgid "debug in callback: %s" +msgstr "Debug dans le rappel : %s" + +#: ../nova/utils.py:222 +#, python-format +msgid "Running %s" +msgstr "Exécution de %s" + +#: ../nova/utils.py:262 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "L'adresse du lien local n'a pas été trouvé :%s" + +#: ../nova/utils.py:265 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "Impossible de trouver l'IP du lien local de %(interface)s :%(ex)s" + +#: ../nova/utils.py:363 +#, python-format +msgid "Invalid backend: %s" +msgstr "Backend invalide : %s" + +#: ../nova/utils.py:374 +#, python-format +msgid "backend %s" +msgstr "backend %s" + +#: ../nova/fakerabbit.py:49 +#, python-format +msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +msgstr "(%(nm)s) publication (key: %(routing_key)s) %(message)s" + +#: ../nova/fakerabbit.py:54 +#, python-format +msgid "Publishing to route %s" +msgstr "Publication vers la route %s" + +#: ../nova/fakerabbit.py:84 +#, python-format +msgid "Declaring queue %s" +msgstr "Déclaration de la queue %s" + +#: ../nova/fakerabbit.py:90 +#, python-format +msgid "Declaring exchange %s" +msgstr "Déclaration de l'échange %s" + +#: ../nova/fakerabbit.py:96 +#, python-format +msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +msgstr "" +"Rattachement de %(queue)s vers %(exchange)s avec la clef %(routing_key)s" + +#: ../nova/fakerabbit.py:121 +#, python-format +msgid "Getting from %(queue)s: %(message)s" +msgstr "Récupération depuis %(queue)s: %(message)s" + +#: ../nova/virt/xenapi/vm_utils.py:135 ../nova/virt/hyperv.py:171 +#, python-format +msgid "Created VM %s..." +msgstr "VM %s crée..." + +#: ../nova/virt/xenapi/vm_utils.py:138 +#, python-format +msgid "Created VM %(instance_name)s as %(vm_ref)s." +msgstr "VM %(instance_name)s crée en tant que %(vm_ref)s." + +#: ../nova/virt/xenapi/vm_utils.py:168 +#, python-format +msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgstr "Création de VBD pour VM %(vm_ref)s, VDI %(vdi_ref)s ... " + +#: ../nova/virt/xenapi/vm_utils.py:171 +#, python-format +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgstr "VBD créé %(vbd_ref)s pour VM %(vm_ref)s, VDI %(vdi_ref)s." + +#: ../nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "VBD not found in instance %s" +msgstr "VBD non trouvé dans l'instance %s" + +#: ../nova/virt/xenapi/vm_utils.py:197 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "Impossible de deconnecter le VBD %s" + +#: ../nova/virt/xenapi/vm_utils.py:209 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "Impossible de supprimer le VBD %s" + +#: ../nova/virt/xenapi/vm_utils.py:224 +#, python-format +msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." +msgstr "Création du VIF pour la VM %(vm_ref)s, réseau %(network_ref)s." + +#: ../nova/virt/xenapi/vm_utils.py:227 +#, python-format +msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +msgstr "VIF créé %(vif_ref)s pour la VM %(vm_ref)s, network %(network_ref)s." + +#: ../nova/virt/xenapi/vm_utils.py:246 +#, python-format +msgid "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) on " +"%(sr_ref)s." +msgstr "" +"VDI créé %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) on " +"%(sr_ref)s." + +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vm_utils.py:258 +#, python-format +msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." +msgstr "" +"Création de l'instantané (snapshot) pour la VM %(vm_ref)s avec le label " +"'%(label)s'..." + +#: ../nova/virt/xenapi/vm_utils.py:272 +#, python-format +msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." +msgstr "" +"Instantané (snapshot) créé %(template_vm_ref)s pour la VM %(vm_ref)s." + +#: ../nova/virt/xenapi/vm_utils.py:286 +#, python-format +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgstr "" +"Demande de chargement à xapi de %(vdi_uuids)s en tant qu'ID %(image_id)s" + +#: ../nova/virt/xenapi/vm_utils.py:327 +#, python-format +msgid "Size for image %(image)s:%(virtual_size)d" +msgstr "Taille de l'image %(image)s:%(virtual_size)d" + +#: ../nova/virt/xenapi/vm_utils.py:332 +#, python-format +msgid "Glance image %s" +msgstr "Image Glance %s" + +#. we need to invoke a plugin for copying VDI's +#. content into proper path +#: ../nova/virt/xenapi/vm_utils.py:342 +#, python-format +msgid "Copying VDI %s to /boot/guest on dom0" +msgstr "Copie de VDI %s vers /boot/guest sur dom0" + +#: ../nova/virt/xenapi/vm_utils.py:352 +#, python-format +msgid "Kernel/Ramdisk VDI %s destroyed" +msgstr "Noyau/Ramdisk VDI %s détruit" + +#: ../nova/virt/xenapi/vm_utils.py:361 +#, python-format +msgid "Asking xapi to fetch %(url)s as %(access)s" +msgstr "Demande de récupération à xapi de %(url)s as %(access)s" + +#: ../nova/virt/xenapi/vm_utils.py:386 ../nova/virt/xenapi/vm_utils.py:402 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "Recherche du VDI %s pour le PV kernel" + +#: ../nova/virt/xenapi/vm_utils.py:397 +#, python-format +msgid "PV Kernel in VDI:%s" +msgstr "PV Kernel sur VDI :%s" + +#: ../nova/virt/xenapi/vm_utils.py:405 +#, python-format +msgid "Running pygrub against %s" +msgstr "Exécution de pygrub sur %s" + +#: ../nova/virt/xenapi/vm_utils.py:411 +#, python-format +msgid "Found Xen kernel %s" +msgstr "Kernel Xen %s trouvé" + +#: ../nova/virt/xenapi/vm_utils.py:413 +msgid "No Xen kernel found. Booting HVM." +msgstr "Pas de kernel Xen trouvé. Démarrage en HVM." + +#: ../nova/virt/xenapi/vm_utils.py:425 ../nova/virt/hyperv.py:431 +#, python-format +msgid "duplicate name found: %s" +msgstr "Doublon de nom trouvé : %s" + +#: ../nova/virt/xenapi/vm_utils.py:442 +#, python-format +msgid "VDI %s is still available" +msgstr "Le VDI %s est toujours disponible" + +#: ../nova/virt/xenapi/vm_utils.py:463 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "(VM_UTILS) état xenserver vm -> |%s|" + +#: ../nova/virt/xenapi/vm_utils.py:465 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "(VM_UTILS) xenapi power_state -> |%s|" + +#: ../nova/virt/xenapi/vm_utils.py:525 +#, python-format +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgstr "VHD %(vdi_uuid)s à pour parent %(parent_ref)s" + +#: ../nova/virt/xenapi/vm_utils.py:542 +#, python-format +msgid "Re-scanning SR %s" +msgstr "Re-parcours de SR %s" + +#: ../nova/virt/xenapi/vm_utils.py:567 +#, python-format +msgid "" +"VHD coalesce attempts exceeded (%(counter)d > %(max_attempts)d), giving up..." +msgstr "" +"VHD tentatives de coalesence dépassé (%(counter)d > %(max_attempts)d), " +"abandon..." + +#: ../nova/virt/xenapi/vm_utils.py:574 +#, python-format +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." +msgstr "" +"L'UUID parent %(parent_uuid)s ne correspond pas au parent originel " +"%(original_parent_uuid)s, attente de coalesence..." + +#: ../nova/virt/xenapi/vm_utils.py:590 +#, python-format +msgid "No VDIs found for VM %s" +msgstr "Pas de VDIs trouvé pour la VM %s" + +#: ../nova/virt/xenapi/vm_utils.py:594 +#, python-format +msgid "Unexpected number of VDIs (%(num_vdis)s) found for VM %(vm_ref)s" +msgstr "" +"Nombre de VDIs non attendu (%(num_vdis)s) trouvés pour la VM %(vm_ref)s" + +#: ../nova/virt/xenapi/vm_utils.py:653 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:188 +#, python-format +msgid "Creating VBD for VDI %s ... " +msgstr "Création de VBD pour la VDI %s ... " + +#: ../nova/virt/xenapi/vm_utils.py:655 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:190 +#, python-format +msgid "Creating VBD for VDI %s done." +msgstr "La création de VBD pour la VDI %s est terminée." + +#: ../nova/virt/xenapi/vm_utils.py:657 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:192 +#, python-format +msgid "Plugging VBD %s ... " +msgstr "Connexion de VBD %s ... " + +#: ../nova/virt/xenapi/vm_utils.py:659 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:194 +#, python-format +msgid "Plugging VBD %s done." +msgstr "Connexion de VBD %s terminée." + +#: ../nova/virt/xenapi/vm_utils.py:661 +#, python-format +msgid "VBD %(vbd)s plugged as %(orig_dev)s" +msgstr "VBD %(vbd)s connecté en tant que %(orig_dev)s" + +#: ../nova/virt/xenapi/vm_utils.py:664 +#, python-format +msgid "VBD %(vbd)s plugged into wrong dev, remapping to %(dev)s" +msgstr "VBD %(vbd)s connecté au mauvais device, re-connexion vers %(dev)s" + +#: ../nova/virt/xenapi/vm_utils.py:668 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:197 +#, python-format +msgid "Destroying VBD for VDI %s ... " +msgstr "Destruction de VBD pour la VDI %s ... " + +#: ../nova/virt/xenapi/vm_utils.py:671 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:200 +#, python-format +msgid "Destroying VBD for VDI %s done." +msgstr "Destruction de VBD pour la VDI %s terminée." + +#: ../nova/virt/xenapi/vm_utils.py:683 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:211 +msgid "VBD.unplug successful first time." +msgstr "VBD.unplug terminé dés la première tentative." + +#: ../nova/virt/xenapi/vm_utils.py:688 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:216 +msgid "VBD.unplug rejected: retrying..." +msgstr "VBD.unplug refusé : nouvel essai..." + +#: ../nova/virt/xenapi/vm_utils.py:692 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:220 +msgid "VBD.unplug successful eventually." +msgstr "VBD.unplug à enfin été achevée." + +#: ../nova/virt/xenapi/vm_utils.py:695 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:223 +#, python-format +msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +msgstr "XenAPI.Failure ignorée dans VBD.unplug: %s" + +#: ../nova/virt/xenapi/vm_utils.py:704 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:66 +#, python-format +msgid "Ignoring XenAPI.Failure %s" +msgstr "XenAPI.Failure %s ignorée" + +#: ../nova/virt/xenapi/vm_utils.py:735 +#, python-format +msgid "" +"Writing partition table %(primary_first)d %(primary_last)d to %(dest)s..." +msgstr "" +"Ecriture de la table de partitionnement %(primary_first)d %(primary_last)d " +"vers %(dest)s..." + +#: ../nova/virt/xenapi/vm_utils.py:747 +#, python-format +msgid "Writing partition table %s done." +msgstr "Ecriture de la table de partitionnement %s terminée." + +#: ../nova/tests/test_rpc.py:89 +#, python-format +msgid "Nested received %(queue)s, %(value)s" +msgstr "Reception par Nested %(queue)s, %(value)s" + +#: ../nova/tests/test_rpc.py:95 +#, python-format +msgid "Nested return %s" +msgstr "Nested renvoi %s" + +#: ../nova/tests/test_rpc.py:120 ../nova/tests/test_rpc.py:126 +#, python-format +msgid "Received %s" +msgstr "%s Reçu" + +#: ../nova/db/sqlalchemy/api.py:44 +msgid "Use of empty request context is deprecated" +msgstr "L'utilisation d'une requête de contexte vide est dévalué" + +#: ../nova/db/sqlalchemy/api.py:133 +#, python-format +msgid "No service for id %s" +msgstr "Pas de service pour l'id %s" + +#: ../nova/db/sqlalchemy/api.py:251 +#, python-format +msgid "No service for %(host)s, %(binary)s" +msgstr "Pas de service pour %(host)s, %(binary)s" + +#: ../nova/db/sqlalchemy/api.py:592 +msgid "No fixed ips defined" +msgstr "Aucune IP fixe définie" + +#: ../nova/db/sqlalchemy/api.py:608 +#, python-format +msgid "No floating ip for address %s" +msgstr "Pas d'IP flottante pour l'addresse %s" + +#: ../nova/db/sqlalchemy/api.py:629 +#, python-format +msgid "No address for instance %s" +msgstr "Pas d'adresse pour l'instance %s" + +#: ../nova/db/sqlalchemy/api.py:961 +#, python-format +msgid "no keypair for user %(user_id)s, name %(name)s" +msgstr "Pas de bi-clef pour l'utilisation %(user_id)s, nommé %(name)s" + +#: ../nova/db/sqlalchemy/api.py:1076 ../nova/db/sqlalchemy/api.py:1156 +#, python-format +msgid "No network for id %s" +msgstr "Pas de réseau pourl'id %s" + +#: ../nova/db/sqlalchemy/api.py:1086 +msgid "No networks defined" +msgstr "Pas de réseau défini" + +#: ../nova/db/sqlalchemy/api.py:1115 +#, python-format +msgid "No network for bridge %s" +msgstr "Pas de réseau pour le bridge %s" + +#: ../nova/db/sqlalchemy/api.py:1129 ../nova/db/sqlalchemy/api.py:1142 +#, python-format +msgid "No network for instance %s" +msgstr "Pas de réseau pour l'instance %s" + +#: ../nova/db/sqlalchemy/api.py:1277 +#, python-format +msgid "Token %s does not exist" +msgstr "Le jeton %s n'existe pas" + +#: ../nova/db/sqlalchemy/api.py:1302 +#, python-format +msgid "No quota for project_id %s" +msgstr "Pas de quota pour l'ID projet %s" + +#: ../nova/db/sqlalchemy/api.py:1455 ../nova/db/sqlalchemy/api.py:1501 +#: ../nova/api/ec2/__init__.py:323 +#, python-format +msgid "Volume %s not found" +msgstr "Volume %s non trouvé" + +#: ../nova/db/sqlalchemy/api.py:1514 +#, python-format +msgid "No export device found for volume %s" +msgstr "Pas de device d'exportation pour le volume %s" + +#: ../nova/db/sqlalchemy/api.py:1527 +#, python-format +msgid "No target id found for volume %s" +msgstr "Pas d'id de destination trouvée pour le volume %s" + +#: ../nova/db/sqlalchemy/api.py:1572 +#, python-format +msgid "No security group with id %s" +msgstr "Aucun groupe de sécurité avec l'id %s" + +#: ../nova/db/sqlalchemy/api.py:1589 +#, python-format +msgid "No security group named %(group_name)s for project: %(project_id)s" +msgstr "" +"Aucun groupe de sécurité nommé %(group_name)s pour le projet : %(project_id)s" + +#: ../nova/db/sqlalchemy/api.py:1682 +#, python-format +msgid "No secuity group rule with id %s" +msgstr "Pas de groupe de sécurité ayant pour ID %s" + +#: ../nova/db/sqlalchemy/api.py:1756 +#, python-format +msgid "No user for id %s" +msgstr "Pas d'utilisateur ayant pour ID %s" + +#: ../nova/db/sqlalchemy/api.py:1772 +#, python-format +msgid "No user for access key %s" +msgstr "Pas d'utilisateur avec la clef d'accès %s" + +#: ../nova/db/sqlalchemy/api.py:1834 +#, python-format +msgid "No project with id %s" +msgstr "Pas de projet ayant pour ID %s" + +#: ../nova/db/sqlalchemy/api.py:1979 +#, python-format +msgid "No console pool with id %(pool_id)s" +msgstr "Pas de groupe de console ayant pour ID %(pool_id)s" + +#: ../nova/db/sqlalchemy/api.py:1996 +#, python-format +msgid "" +"No console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s" +msgstr "" +"Pas de groupe de console de type %(console_type)s pour l'hote de calcul " +"%(compute_host)s sur le proxy %(host)s" + +#: ../nova/db/sqlalchemy/api.py:2035 +#, python-format +msgid "No console for instance %(instance_id)s in pool %(pool_id)s" +msgstr "" +"Pas de console pour l'intance %(instance_id)s dans le groupe %(pool_id)s" + +#: ../nova/db/sqlalchemy/api.py:2057 +#, python-format +msgid "on instance %s" +msgstr "sur l'intance %s" + +#: ../nova/db/sqlalchemy/api.py:2058 +#, python-format +msgid "No console with id %(console_id)s %(idesc)s" +msgstr "Pas de console ayant pour ID %(console_id)s %(idesc)s" + +#: ../nova/db/sqlalchemy/api.py:2078 ../nova/db/sqlalchemy/api.py:2097 +#, python-format +msgid "No zone with id %(zone_id)s" +msgstr "Pas de zone ayant pour ID %(zone_id)s" + +#: ../nova/virt/libvirt_conn.py:160 +#, python-format +msgid "Checking state of %s" +msgstr "Vérification de l'état de %s" + +#: ../nova/virt/libvirt_conn.py:165 +#, python-format +msgid "Current state of %(name)s was %(state)s." +msgstr "L'état de %(name)s est %(state)s." + +#: ../nova/virt/libvirt_conn.py:183 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "Connexion à libvirt: %s" + +#: ../nova/virt/libvirt_conn.py:196 +msgid "Connection to libvirt broke" +msgstr "Connexion à libvirt interrompue" + +#: ../nova/virt/libvirt_conn.py:258 +#, python-format +msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgstr "" +"instance %(instance_name)s: suppression des fichiers d'instance %(target)s" + +#: ../nova/virt/libvirt_conn.py:283 +#, python-format +msgid "Invalid device path %s" +msgstr "Chemin de device invalide %s" + +#: ../nova/virt/libvirt_conn.py:313 +#, python-format +msgid "No disk at %s" +msgstr "Pas de disque sur %s" + +#: ../nova/virt/libvirt_conn.py:320 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "" +"Les instantanés (snapshot) d'instance ne sont pas disponible avec libvirt " +"pour le moment" + +#: ../nova/virt/libvirt_conn.py:336 +#, python-format +msgid "instance %s: rebooted" +msgstr "instance %s: re-démarrée" + +#: ../nova/virt/libvirt_conn.py:339 +#, python-format +msgid "_wait_for_reboot failed: %s" +msgstr "_wait_for_reboot échouée : %s" + +#: ../nova/virt/libvirt_conn.py:382 +#, python-format +msgid "instance %s: rescued" +msgstr "instance %s: récupérée" + +#: ../nova/virt/libvirt_conn.py:385 +#, python-format +msgid "_wait_for_rescue failed: %s" +msgstr "_wait_for_rescue échouée : %s" + +#: ../nova/virt/libvirt_conn.py:411 +#, python-format +msgid "instance %s: is running" +msgstr "instance %s: est active" + +#: ../nova/virt/libvirt_conn.py:422 +#, python-format +msgid "instance %s: booted" +msgstr "instance %s: a démarrée" + +#: ../nova/virt/libvirt_conn.py:425 ../nova/virt/xenapi/vmops.py:186 +#, python-format +msgid "instance %s: failed to boot" +msgstr "instance %s: n'a pas réussie à démarrer" + +#: ../nova/virt/libvirt_conn.py:436 +#, python-format +msgid "virsh said: %r" +msgstr "virsh a retourné : %r" + +#: ../nova/virt/libvirt_conn.py:440 +msgid "cool, it's a device" +msgstr "super, c'est un device" + +#: ../nova/virt/libvirt_conn.py:448 +#, python-format +msgid "data: %(data)r, fpath: %(fpath)r" +msgstr "data: %(data)r, fpath: %(fpath)r" + +#: ../nova/virt/libvirt_conn.py:456 +#, python-format +msgid "Contents of file %(fpath)s: %(contents)r" +msgstr "Contenu du fichier %(fpath)s: %(contents)r" + +#: ../nova/virt/libvirt_conn.py:489 +msgid "Unable to find an open port" +msgstr "Impossible de trouver un port ouvert" + +#: ../nova/virt/libvirt_conn.py:563 +#, python-format +msgid "instance %s: Creating image" +msgstr "instance %s : Création de l'image" + +#: ../nova/virt/libvirt_conn.py:646 +#, python-format +msgid "instance %(inst_name)s: injecting key into image %(img_id)s" +msgstr "instance %(inst_name)s : injection de clef dans l'image %(img_id)s" + +#: ../nova/virt/libvirt_conn.py:649 +#, python-format +msgid "instance %(inst_name)s: injecting net into image %(img_id)s" +msgstr "instance %(inst_name)s : injection de réseau dans l'image %(img_id)s" + +#. This could be a windows image, or a vmdk format disk +#: ../nova/virt/libvirt_conn.py:657 +#, python-format +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image %(img_id)s " +"(%(e)s)" +msgstr "" +"instance %(inst_name)s : l'erreur d'injection de donné dans l'image " +"%(img_id)s (%(e)s) a été ignorée" + +#. TODO(termie): cache? +#: ../nova/virt/libvirt_conn.py:665 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "instance %s: démarrage de la méthode toXML" + +#: ../nova/virt/libvirt_conn.py:732 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "instance %s: fin d'éxécution de la méthode toXML" + +#: ../nova/virt/libvirt_conn.py:751 +msgid "diagnostics are not supported for libvirt" +msgstr "Les diagnostiques ne sont pas disponibles pour libvirt" + +#: ../nova/virt/libvirt_conn.py:1225 +#, python-format +msgid "Attempted to unfilter instance %s which is not filtered" +msgstr "" +"Tentative de suppression de filtre pour l'intance %s qui n'est pas filtrée" + +#: ../nova/api/ec2/metadatarequesthandler.py:76 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" + +#: ../nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "Tentative d'instanciation d'un singleton" + +#: ../nova/network/api.py:39 +#, python-format +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "Quota dépassé pour %s lors de la tentative d'allocation d'adresse" + +#: ../nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "Quota d'adresse dépassé. Vous ne pouvez pas allouer d'autre adresse" + +#: ../nova/tests/test_volume.py:162 +#, python-format +msgid "Target %s allocated" +msgstr "Destination %s allouée" + +#: ../nova/virt/images.py:70 +#, python-format +msgid "Finished retreving %(url)s -- placed in %(path)s" +msgstr "Fin de récupération de %(url)s -- placé dans %(path)s" + +#: ../nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "Doit mettre en oeuvre un calendrier de retrait" + +#: ../nova/console/manager.py:70 +msgid "Adding console" +msgstr "Ajout de console" + +#: ../nova/console/manager.py:90 +#, python-format +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "Tentative de suppression d'une console non existante %(console_id)s." + +#: ../nova/api/direct.py:149 +msgid "not available" +msgstr "non disponible" + +#: ../nova/api/ec2/cloud.py:62 +#, python-format +msgid "The key_pair %s already exists" +msgstr "Le bi-clef %s existe déjà " + +#. TODO(vish): Do this with M2Crypto instead +#: ../nova/api/ec2/cloud.py:118 +#, python-format +msgid "Generating root CA: %s" +msgstr "Génération de la racine d'autorité de certification : %s" + +#: ../nova/api/ec2/cloud.py:303 +#, python-format +msgid "Create key pair %s" +msgstr "Création du bi-clef %s" + +#: ../nova/api/ec2/cloud.py:311 +#, python-format +msgid "Delete key pair %s" +msgstr "Suppression du bi-clef %s" + +#: ../nova/api/ec2/cloud.py:386 +#, python-format +msgid "%s is not a valid ipProtocol" +msgstr "%s n'est pas un protocol ip valide" + +#: ../nova/api/ec2/cloud.py:390 +msgid "Invalid port range" +msgstr "Interval de port invalide" + +#: ../nova/api/ec2/cloud.py:421 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "Révocation de groupe de sécurité %s" + +#: ../nova/api/ec2/cloud.py:430 ../nova/api/ec2/cloud.py:459 +msgid "Not enough parameters to build a valid rule." +msgstr "Pas assez de parametres pour contruire un règle valide." + +#: ../nova/api/ec2/cloud.py:443 +msgid "No rule for the specified parameters." +msgstr "Pas de règle pour les paramètres spécifiés." + +#: ../nova/api/ec2/cloud.py:450 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "Authorisation du groupe de sécurité %s" + +#: ../nova/api/ec2/cloud.py:464 +#, python-format +msgid "This rule already exists in group %s" +msgstr "Cette règle existe déjà dans le groupe %s" + +#: ../nova/api/ec2/cloud.py:492 +#, python-format +msgid "Create Security Group %s" +msgstr "Création du groupe de sécurité %s" + +#: ../nova/api/ec2/cloud.py:495 +#, python-format +msgid "group %s already exists" +msgstr "le groupe %s existe déjà " + +#: ../nova/api/ec2/cloud.py:507 +#, python-format +msgid "Delete security group %s" +msgstr "Suppression du groupe de sécurité %s" + +#: ../nova/api/ec2/cloud.py:584 +#, python-format +msgid "Create volume of %s GB" +msgstr "Création d'un volume de %s Go" + +#: ../nova/api/ec2/cloud.py:612 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgstr "" +"Montage du volume %(volume_id)s sur l'instance %(instance_id)s en tant que " +"%(device)s" + +#: ../nova/api/ec2/cloud.py:629 +#, python-format +msgid "Detach volume %s" +msgstr "Dé-montage du volume %s" + +#: ../nova/api/ec2/cloud.py:761 +msgid "Allocate address" +msgstr "Allocation d'adresse" + +#: ../nova/api/ec2/cloud.py:766 +#, python-format +msgid "Release address %s" +msgstr "Désallocation de l'adresse %s" + +#: ../nova/api/ec2/cloud.py:771 +#, python-format +msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgstr "" +"Association de l'adresse %(public_ip)s avec l'instance %(instance_id)s" + +#: ../nova/api/ec2/cloud.py:780 +#, python-format +msgid "Disassociate address %s" +msgstr "Désassociation de l'adresse %s" + +#: ../nova/api/ec2/cloud.py:807 +msgid "Going to start terminating instances" +msgstr "Début de la destruction d'instance" + +#: ../nova/api/ec2/cloud.py:815 +#, python-format +msgid "Reboot instance %r" +msgstr "Re-démarrage de l'instance %r" + +#: ../nova/api/ec2/cloud.py:867 +#, python-format +msgid "De-registering image %s" +msgstr "Dé-enregitrement de l'image %s" + +#: ../nova/api/ec2/cloud.py:875 +#, python-format +msgid "Registered image %(image_location)s with id %(image_id)s" +msgstr "Image %(image_location)s enregistré avec l'id %(image_id)s" + +#: ../nova/api/ec2/cloud.py:882 ../nova/api/ec2/cloud.py:900 +#, python-format +msgid "attribute not supported: %s" +msgstr "attribut non reconnu : %s" + +#: ../nova/api/ec2/cloud.py:890 +#, python-format +msgid "invalid id: %s" +msgstr "ID invalide : %s" + +#: ../nova/api/ec2/cloud.py:903 +msgid "user or group not specified" +msgstr "Utilisateur ou groupe non spécifié" + +#: ../nova/api/ec2/cloud.py:905 +msgid "only group \"all\" is supported" +msgstr "Seul le group \"tous\" est supporté" + +#: ../nova/api/ec2/cloud.py:907 +msgid "operation_type must be add or remove" +msgstr "" +"le type d'opération (operation_type) doit être ajout (add) ou suppression " +"(remove)" + +#: ../nova/api/ec2/cloud.py:908 +#, python-format +msgid "Updating image %s publicity" +msgstr "Mis à jour de la publication de l'image %s" + +#: ../bin/nova-api.py:52 +#, python-format +msgid "Using paste.deploy config at: %s" +msgstr "Utilisation de la configuration paste.deploy sur : %s" + +#: ../bin/nova-api.py:57 +#, python-format +msgid "No paste configuration for app: %s" +msgstr "Pas de configuration collé pour l'application : %s" + +#: ../bin/nova-api.py:59 +#, python-format +msgid "" +"App Config: %(api)s\n" +"%(config)r" +msgstr "" +"App Config: %(api)s\n" +"%(config)r" + +#: ../bin/nova-api.py:64 +#, python-format +msgid "Running %s API" +msgstr "API %s en cours d'éxécution" + +#: ../bin/nova-api.py:69 +#, python-format +msgid "No known API applications configured in %s." +msgstr "Pas d'API d'applications connue configurée pour %s." + +#: ../bin/nova-api.py:83 +#, python-format +msgid "Starting nova-api node (version %s)" +msgstr "Démarrage du noeud nova-api (version %s)" + +#: ../bin/nova-api.py:89 +#, python-format +msgid "No paste configuration found for: %s" +msgstr "Pas de configuration collée trouvée : %s" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:84 +#, python-format +msgid "Argument %(key)s value %(value)s is too short." +msgstr "La valeur %(value)s pour l'argument %(key)s est trop courte." + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:89 +#, python-format +msgid "Argument %(key)s value %(value)s contains invalid characters." +msgstr "" +"La valeur %(value)s pour l'argument %(key)s contient des caractères " +"interdits." + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:94 +#, python-format +msgid "Argument %(key)s value %(value)s starts with a hyphen." +msgstr "" +"La valeur %(value)s pour l'argument %(key)s débute par un trait d'union." + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:102 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:130 +#, python-format +msgid "Argument %s is required." +msgstr "L'argument %s est requis." + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:117 +#, python-format +msgid "" +"Argument %(key)s may not take value %(value)s. Valid values are ['true', " +"'false']." +msgstr "" +"La valeur %(value)s pour l'argument %(key)s n'est pas valide. Les valeurs " +"autorisées sont ['true', 'false']." + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:163 +#, python-format +msgid "" +"Created VDI %(vdi_ref)s (%(label)s, %(size)s, %(read_only)s) on %(sr_ref)s." +msgstr "" +"La VDI %(vdi_ref)s (%(label)s, %(size)s, %(read_only)s) a été créée sur " +"%(sr_ref)s." + +#: ../nova/virt/xenapi/vmops.py:67 +#, python-format +msgid "Attempted to create non-unique name %s" +msgstr "Tentative de création d'un nom non unique %s" + +#: ../nova/virt/xenapi/vmops.py:73 +#, python-format +msgid "instance %(name)s: not enough free memory" +msgstr "instance %(name)s: pas assez de mémoire" + +#: ../nova/virt/xenapi/vmops.py:148 +#, python-format +msgid "Starting VM %s..." +msgstr "Démarrage de la VM %s..." + +#: ../nova/virt/xenapi/vmops.py:151 +#, python-format +msgid "Spawning VM %(instance_name)s created %(vm_ref)s." +msgstr "La VM %(instance_name)s a créé %(vm_ref)s." + +#: ../nova/virt/xenapi/vmops.py:162 +#, python-format +msgid "Invalid value for onset_files: '%s'" +msgstr "Valeur interdite pour onset_files : '%s'" + +#: ../nova/virt/xenapi/vmops.py:167 +#, python-format +msgid "Injecting file path: '%s'" +msgstr "Injection du chemin d'accès : '%s'" + +#: ../nova/virt/xenapi/vmops.py:180 +#, python-format +msgid "Instance %s: booted" +msgstr "Instance %s : démarée" + +#: ../nova/virt/xenapi/vmops.py:232 +#, python-format +msgid "Instance not present %s" +msgstr "Instance non présente %s" + +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vmops.py:261 +#, python-format +msgid "Starting snapshot for VM %s" +msgstr "Début de création d'instantané (snapshot) pour la VM %s" + +#: ../nova/virt/xenapi/vmops.py:269 +#, python-format +msgid "Unable to Snapshot %(vm_ref)s: %(exc)s" +msgstr "Impossible de faire un instantané de %(vm_ref)s: %(exc)s" + +#: ../nova/virt/xenapi/vmops.py:280 +#, python-format +msgid "Finished snapshot and upload for VM %s" +msgstr "Fin de l'instantané et du chargement de VM %s" + +#: ../nova/virt/xenapi/vmops.py:356 +#, python-format +msgid "VM %(vm)s already halted, skipping shutdown..." +msgstr "La VM %(vm)s est déjà arrété, exctinction évitée." + +#: ../nova/virt/xenapi/vmops.py:389 +msgid "Removing kernel/ramdisk files" +msgstr "Suppression des fichiers noyau/ramdisk" + +#: ../nova/virt/xenapi/vmops.py:399 +msgid "kernel/ramdisk files removed" +msgstr "Fichiers noyau/ramdisk supprimés" + +#: ../nova/virt/xenapi/vmops.py:561 +#, python-format +msgid "" +"TIMEOUT: The call to %(method)s timed out. VM id=%(instance_id)s; " +"args=%(strargs)s" +msgstr "" +"DELAI DEPASSE : L'appel de %(method)s à depasser de délai de réponse " +"maximal. VM id=%(instance_id)s; args=%(strargs)s" + +#: ../nova/virt/xenapi/vmops.py:564 +#, python-format +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. VM " +"id=%(instance_id)s; args=%(strargs)s" +msgstr "" +"NON IMPLEMENTE : l'appel de %(method)s n'est pas pris en compte par l'agent. " +"VM id=%(instance_id)s; args=%(strargs)s" + +#: ../nova/virt/xenapi/vmops.py:569 +#, python-format +msgid "" +"The call to %(method)s returned an error: %(e)s. VM id=%(instance_id)s; " +"args=%(strargs)s" +msgstr "" +"L'appel de %(method)s à renvoyé l'erreur : %(e)s. VM id=%(instance_id)s; " +"args=%(strargs)s" + +#: ../nova/virt/xenapi/vmops.py:760 +#, python-format +msgid "OpenSSL error: %s" +msgstr "Erreur OpenSSL : %s" + +#: ../nova/tests/test_compute.py:148 +#, python-format +msgid "Running instances: %s" +msgstr "Instance actives : %s" + +#: ../nova/tests/test_compute.py:154 +#, python-format +msgid "After terminating instances: %s" +msgstr "Après l'arrêt d'instances : %s" + +#: ../nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "Patron du script à exécuter au boot d'instance cloudpipe" + +#: ../nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "Réseau à passer à la configuration d'openvpn" + +#: ../nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "Masque réseau à passer à la configuration d'openvpn" + +#: ../nova/cloudpipe/pipelib.py:97 +#, python-format +msgid "Launching VPN for %s" +msgstr "Démarrage du VPN pour %s" + +#: ../nova/db/sqlalchemy/migration.py:35 +msgid "python-migrate is not installed. Exiting." +msgstr "python-migrate n'est pas installé. Fin d'éxécution." + +#: ../nova/image/s3.py:99 +#, python-format +msgid "Image %s could not be found" +msgstr "L'image %s n'a pas été trouvée" + +#: ../nova/api/ec2/__init__.py:121 +msgid "Too many failed authentications." +msgstr "Trop d'erreur d'authentification" + +#: ../nova/api/ec2/__init__.py:131 +#, python-format +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and " +"will be locked out for %(lock_mins)d minutes." +msgstr "" +"La clef d'accès %(access_key)s a rencontrée %(failures)d echecs " +"d'authentification et sera par conséquent vérouillée pour %(lock_mins)d " +"minutes." + +#: ../nova/api/ec2/__init__.py:169 ../nova/objectstore/handler.py:140 +#, python-format +msgid "Authentication Failure: %s" +msgstr "Echec d'authentification : %s" + +#: ../nova/api/ec2/__init__.py:182 +#, python-format +msgid "Authenticated Request For %(uname)s:%(pname)s)" +msgstr "Requête authentifiée pour : %(uname)s:%(pname)s)" + +#: ../nova/api/ec2/__init__.py:207 +#, python-format +msgid "action: %s" +msgstr "action: %s" + +#: ../nova/api/ec2/__init__.py:209 +#, python-format +msgid "arg: %(key)s\t\tval: %(value)s" +msgstr "arg: %(key)s\t\tval: %(value)s" + +#: ../nova/api/ec2/__init__.py:281 +#, python-format +msgid "" +"Unauthorized request for controller=%(controller)s and action=%(action)s" +msgstr "" +"Requête non authorisé pour le controlleur=%(controller)s et " +"l'action=%(action)s" + +#: ../nova/api/ec2/__init__.py:314 +#, python-format +msgid "InstanceNotFound raised: %s" +msgstr "\"Instance non trouvée\" remontée : %s" + +#: ../nova/api/ec2/__init__.py:320 +#, python-format +msgid "VolumeNotFound raised: %s" +msgstr "\"Volume non trouvé\" remonté : %s" + +#: ../nova/api/ec2/__init__.py:326 +#, python-format +msgid "NotFound raised: %s" +msgstr "\"Non trouvé\" remonté : %s" + +#: ../nova/api/ec2/__init__.py:329 +#, python-format +msgid "ApiError raised: %s" +msgstr "\"Erreur API\" remontée : %s" + +#: ../nova/api/ec2/__init__.py:338 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "\"Erreur inopinée\" remontée : %s" + +#: ../nova/api/ec2/__init__.py:343 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" +"Une erreur inopinée à eu lieu. Merci d'essayer votre requête à nouveau." + +#: ../nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" +msgstr "L'utilisateur %s existe déjà " + +#: ../nova/auth/dbdriver.py:106 ../nova/auth/ldapdriver.py:232 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "Le projet ne peut pas être créé car le manager %s n'existe pas" + +#: ../nova/auth/dbdriver.py:122 ../nova/auth/ldapdriver.py:243 +#, python-format +msgid "Project can't be created because user %s doesn't exist" +msgstr "Le projet ne peut pas être créé car l'utiisateur %s n'existe pas" + +#: ../nova/auth/dbdriver.py:135 ../nova/auth/ldapdriver.py:229 +#, python-format +msgid "Project can't be created because project %s already exists" +msgstr "Le projet ne peut pas être créé car projet %s existe déjà " + +#: ../nova/auth/dbdriver.py:157 ../nova/auth/ldapdriver.py:268 +#, python-format +msgid "Project can't be modified because manager %s doesn't exist" +msgstr "Le projet ne peut pas être modifié car le manager %s n'existe pas" + +#: ../nova/auth/dbdriver.py:245 +#, python-format +msgid "User \"%s\" not found" +msgstr "Utilisateur \"%s\" non trouvé" + +#: ../nova/auth/dbdriver.py:248 +#, python-format +msgid "Project \"%s\" not found" +msgstr "Projet %s non trouvé" + +#: ../nova/virt/xenapi_conn.py:129 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" +msgstr "" +"Doit spécifier xenapi_connection_url, xenapi_connection_username (optionel), " +"et xenapi_connection_password pour utiliser connection_type=xenapi" + +#: ../nova/virt/xenapi_conn.py:311 +#, python-format +msgid "Task [%(name)s] %(task)s status: success %(result)s" +msgstr "Tâche [%(name)s] %(task)s état : succès %(result)s" + +#: ../nova/virt/xenapi_conn.py:317 +#, python-format +msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +msgstr "Tâche [%(name)s] %(task)s état : %(status)s %(error_info)s" + +#: ../nova/virt/xenapi_conn.py:331 ../nova/virt/xenapi_conn.py:344 +#, python-format +msgid "Got exception: %s" +msgstr "Reçu exception : %s" + +#: ../nova/compute/monitor.py:259 +#, python-format +msgid "updating %s..." +msgstr "mise à jour %s..." + +#: ../nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "erreur inopinée pendant la ise à jour" + +#: ../nova/compute/monitor.py:356 +#, python-format +msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" +msgstr "Ne peut pas récupérer blockstats pour \"%(disk)s\" sur \"%(iid)s\"" + +#: ../nova/compute/monitor.py:379 +#, python-format +msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" +msgstr "Ne peut pas récupérer ifstats pour \"%(interface)s\" sur \"%(iid)s\"" + +#: ../nova/compute/monitor.py:414 +msgid "unexpected exception getting connection" +msgstr "erreur inopinée pendant la connexion" + +#: ../nova/compute/monitor.py:429 +#, python-format +msgid "Found instance: %s" +msgstr "Instance trouvée : %s" + +#: ../nova/volume/san.py:67 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Imposible de trouver une exportation iSCSI pour le volume %s" + +#: ../nova/api/ec2/apirequest.py:100 +#, python-format +msgid "" +"Unsupported API request: controller = %(controller)s, action = %(action)s" +msgstr "" +"Requête API non supportée : controleur = %(controller)s, action = %(action)s" + +#: ../nova/api/openstack/__init__.py:55 +#, python-format +msgid "Caught error: %s" +msgstr "Erreur interceptée : %s" + +#: ../nova/api/openstack/__init__.py:76 +msgid "Including admin operations in API." +msgstr "Inclusion des opérations d'admin dans l'API." + +#: ../nova/console/xvp.py:99 +msgid "Rebuilding xvp conf" +msgstr "Reconstruction de la configuration xvp" + +#: ../nova/console/xvp.py:116 +#, python-format +msgid "Re-wrote %s" +msgstr "Ré-écriture de %s" + +#: ../nova/console/xvp.py:121 +msgid "Stopping xvp" +msgstr "Arrêt xvp" + +#: ../nova/console/xvp.py:134 +msgid "Starting xvp" +msgstr "Démarrage xvp" + +#: ../nova/console/xvp.py:141 +#, python-format +msgid "Error starting xvp: %s" +msgstr "Erreur au démarrage xvp : %s" + +#: ../nova/console/xvp.py:144 +msgid "Restarting xvp" +msgstr "Re-démarrage xvp" + +#: ../nova/console/xvp.py:146 +msgid "xvp not running..." +msgstr "xvp non actif..." + +#: ../bin/nova-manage.py:272 +msgid "" +"The above error may show that the database has not been created.\n" +"Please create a database using nova-manage sync db before running this " +"command." +msgstr "" +"L'erreur çi dessus peut indiquer que la base de données n'a pas été créée.\n" +"Veuillez créer une base de données en utilisant nova-manage sync db avant " +"d'utiliser cette commande." + +#: ../bin/nova-manage.py:426 +msgid "" +"No more networks available. If this is a new installation, you need\n" +"to call something like this:\n" +"\n" +" nova-manage network create 10.0.0.0/8 10 64\n" +"\n" +msgstr "" +"Plus de réseaux disponibles. Si vous venez d'installer nova, vous devez " +"exécuter une commande telle que :\n" +"\n" +" nova-manage network create 10.0.0.0/8 10 64\n" +"\n" + +#: ../bin/nova-manage.py:431 +msgid "" +"The above error may show that the certificate db has not been created.\n" +"Please create a database by running a nova-api server on this host." +msgstr "" +"L'erreur çi dessus peut indiquer que la base de données de certificats n'a " +"pas été créée.\n" +"Veuillez créer une base de données en exécutant nova-api server sur cet hôte." + +#: ../bin/nova-manage.py:447 ../bin/nova-manage.py:536 +msgid "network" +msgstr "réseau" + +#: ../bin/nova-manage.py:448 +msgid "IP address" +msgstr "adresse IP" + +#: ../bin/nova-manage.py:449 +msgid "MAC address" +msgstr "adresse MAC" + +#: ../bin/nova-manage.py:450 +msgid "hostname" +msgstr "nom du serveur" + +#: ../bin/nova-manage.py:451 +msgid "host" +msgstr "hôte" + +#: ../bin/nova-manage.py:537 +msgid "netmask" +msgstr "masque de réseau" + +#: ../bin/nova-manage.py:538 +msgid "start address" +msgstr "adresse de départ" + +#: ../nova/virt/disk.py:69 +#, python-format +msgid "Failed to load partition: %s" +msgstr "Impossible de charger la partition: %s" + +#: ../nova/virt/disk.py:91 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "Impossible de monter le système de fichier : %s" + +#: ../nova/virt/disk.py:124 +#, python-format +msgid "nbd device %s did not show up" +msgstr "Device nbd %s n'est pas apparu" + +#: ../nova/virt/disk.py:128 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "Impossible de lier l'image au loopback : %s" + +#: ../nova/virt/disk.py:151 +msgid "No free nbd devices" +msgstr "Pas de device nbd libre" + +#: ../doc/ext/nova_todo.py:46 +#, python-format +msgid "%(filename)s, line %(line_info)d" +msgstr "%(filename)s, ligne %(line_info)d" + +#. FIXME(chiradeep): implement this +#: ../nova/virt/hyperv.py:118 +msgid "In init host" +msgstr "Dans init host" + +#: ../nova/virt/hyperv.py:131 +#, python-format +msgid "Attempt to create duplicate vm %s" +msgstr "Tentative de création de vm en doublon %s" + +#: ../nova/virt/hyperv.py:148 +#, python-format +msgid "Starting VM %s " +msgstr "Démarrage VM %s " + +#: ../nova/virt/hyperv.py:150 +#, python-format +msgid "Started VM %s " +msgstr "VM %s démarrée " + +#: ../nova/virt/hyperv.py:152 +#, python-format +msgid "spawn vm failed: %s" +msgstr "Erreur à l'activation VM : %s" + +#: ../nova/virt/hyperv.py:169 +#, python-format +msgid "Failed to create VM %s" +msgstr "Erreur de création VM %s" + +#: ../nova/virt/hyperv.py:188 +#, python-format +msgid "Set memory for vm %s..." +msgstr "Réglage de la mémoire sur VM %s..." + +#: ../nova/virt/hyperv.py:198 +#, python-format +msgid "Set vcpus for vm %s..." +msgstr "Réglage vcpus sur vm %s..." + +#: ../nova/virt/hyperv.py:202 +#, python-format +msgid "Creating disk for %(vm_name)s by attaching disk file %(vhdfile)s" +msgstr "" +"Création du disque pour %(vm_name)s par liaison avec le fichier disque " +"%(vhdfile)s" + +#: ../nova/virt/hyperv.py:227 +#, python-format +msgid "Failed to add diskdrive to VM %s" +msgstr "Impossible d'ajouter le disque à la VM %s" + +#: ../nova/virt/hyperv.py:230 +#, python-format +msgid "New disk drive path is %s" +msgstr "Nouveau chemin d'accès pour le disque : %s" + +#: ../nova/virt/hyperv.py:247 +#, python-format +msgid "Failed to add vhd file to VM %s" +msgstr "Impossible d'ajouter le fichier vhd à la VM %s" + +#: ../nova/virt/hyperv.py:249 +#, python-format +msgid "Created disk for %s" +msgstr "Disque créé pour %s" + +#: ../nova/virt/hyperv.py:253 +#, python-format +msgid "Creating nic for %s " +msgstr "Création de l'interface réseau pour %s " + +#: ../nova/virt/hyperv.py:272 +msgid "Failed creating a port on the external vswitch" +msgstr "Erreur de création de port sur le vswitch externe" + +#: ../nova/virt/hyperv.py:273 +#, python-format +msgid "Failed creating port for %s" +msgstr "Erreur de création de port pour %s" + +#: ../nova/virt/hyperv.py:276 +#, python-format +msgid "Created switch port %(vm_name)s on switch %(ext_path)s" +msgstr "Le port switch %(vm_name)s a été créé sur le switch %(ext_path)s" + +#: ../nova/virt/hyperv.py:286 +#, python-format +msgid "Failed to add nic to VM %s" +msgstr "Impossible d'ajouter une interface réseau à la VM %s" + +#: ../nova/virt/hyperv.py:288 +#, python-format +msgid "Created nic for %s " +msgstr "Interface réseau créée pour %s " + +#: ../nova/virt/hyperv.py:321 +#, python-format +msgid "WMI job failed: %s" +msgstr "Tâche WMI echouée : %s" + +#: ../nova/virt/hyperv.py:325 +#, python-format +msgid "WMI job succeeded: %(desc)s, Elapsed=%(elap)s " +msgstr "Tâche WMI réussie : %(desc)s, Elapsed=%(elap)s " + +#: ../nova/virt/hyperv.py:361 +#, python-format +msgid "Got request to destroy vm %s" +msgstr "Requête de suppression de la VM %s reçue" + +#: ../nova/virt/hyperv.py:386 +#, python-format +msgid "Failed to destroy vm %s" +msgstr "Impossible de supprimer la VM %s" + +#: ../nova/virt/hyperv.py:393 +#, python-format +msgid "Del: disk %(vhdfile)s vm %(instance_name)s" +msgstr "Suppression: disque %(vhdfile)s vm %(instance_name)s" + +#: ../nova/virt/hyperv.py:415 +#, python-format +msgid "" +"Got Info for vm %(instance_id)s: state=%(state)s, mem=%(memusage)s, " +"num_cpu=%(numprocs)s, cpu_time=%(uptime)s" +msgstr "" +"Informations reçues pour la VM %(instance_id)s: état=%(state)s, " +"mem=%(memusage)s, num_cpu=%(numprocs)s, cpu_time=%(uptime)s" + +#: ../nova/virt/hyperv.py:451 +#, python-format +msgid "Successfully changed vm state of %(vm_name)s to %(req_state)s" +msgstr "Changement d'état de la VM %(vm_name)s en %(req_state)s réussi" + +#: ../nova/virt/hyperv.py:454 +#, python-format +msgid "Failed to change vm state of %(vm_name)s to %(req_state)s" +msgstr "Impossible de changer l'état de la vm %(vm_name)s en %(req_state)s" + +#: ../nova/compute/api.py:71 +#, python-format +msgid "Instance %d was not found in get_network_topic" +msgstr "Instance %d non trouvée dans get_network_topic" + +#: ../nova/compute/api.py:77 +#, python-format +msgid "Instance %d has no host" +msgstr "Instance %d n'a pas d'hôte" + +#: ../nova/compute/api.py:97 +#, python-format +msgid "Quota exceeeded for %(pid)s, tried to run %(min_count)s instances" +msgstr "" +"Quota dépassé pour %(pid)s, lors de la tentative d'exécution de " +"%(min_count)s instances" + +#: ../nova/compute/api.py:99 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." +msgstr "" +"Quota d'instances dépassé. Vous ne pouvez éxécuter que %s instances de ce " +"type de plus." + +#: ../nova/compute/api.py:112 +msgid "Creating a raw instance" +msgstr "Création d'une instance raw" + +#: ../nova/compute/api.py:160 +#, python-format +msgid "Going to run %s instances..." +msgstr "Démarrage de %s instances..." + +#: ../nova/compute/api.py:187 +#, python-format +msgid "Casting to scheduler for %(pid)s/%(uid)s's instance %(instance_id)s" +msgstr "Envoi au scheduler de %(pid)s/%(uid)s's instance %(instance_id)s" + +#: ../nova/compute/api.py:292 +#, python-format +msgid "Going to try to terminate %s" +msgstr "Va essayer d'arrêter %s" + +#: ../nova/compute/api.py:296 +#, python-format +msgid "Instance %d was not found during terminate" +msgstr "L'instance %d n'a pas été trouvée durant l'arrêt" + +#: ../nova/compute/api.py:301 +#, python-format +msgid "Instance %d is already being terminated" +msgstr "L'instance %d est déjà en cours d'arrêt" + +#: ../nova/compute/api.py:481 +#, python-format +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "Device spécifié invalide : %s. Exemple de device: /dev/vdb" + +#: ../nova/compute/api.py:496 +msgid "Volume isn't attached to anything!" +msgstr "Le volume n'est pas attaché à quoi que ce soit!" + +#: ../nova/rpc.py:98 +#, python-format +msgid "" +"AMQP server on %(fl_host)s:%(fl_port)d is unreachable. Trying again in " +"%(fl_intv)d seconds." +msgstr "" +"Le serveur AMQP sur %(fl_host)s:%(fl_port)d n'est pas joignable. Nouvel " +"essai dans %(fl_intv)d secondes." + +#: ../nova/rpc.py:103 +#, python-format +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "" +"Impossible de se connecter au serveur AMQP après %d essais. Extinction." + +#: ../nova/rpc.py:122 +msgid "Reconnected to queue" +msgstr "Reconnection à la queue" + +#: ../nova/rpc.py:129 +msgid "Failed to fetch message from queue" +msgstr "Impossible de récuperrer les message de la queue" + +#: ../nova/rpc.py:159 +#, python-format +msgid "Initing the Adapter Consumer for %s" +msgstr "Initialisation du Consomateur d'Adapteur pour %s" + +#: ../nova/rpc.py:178 +#, python-format +msgid "received %s" +msgstr "%s reçu" + +#. NOTE(vish): we may not want to ack here, but that means that bad +#. messages stay in the queue indefinitely, so for now +#. we just log the message and send an error string +#. back to the caller +#: ../nova/rpc.py:191 +#, python-format +msgid "no method for message: %s" +msgstr "Pas de méthode pour le message : %s" + +#: ../nova/rpc.py:192 +#, python-format +msgid "No method for message: %s" +msgstr "Pas de méthode pour le message : %s" + +#: ../nova/rpc.py:253 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Renvoi de l'exception %s à l'appelant" + +#: ../nova/rpc.py:294 +#, python-format +msgid "unpacked context: %s" +msgstr "Contexte décompacté : %s" + +#: ../nova/rpc.py:313 +msgid "Making asynchronous call..." +msgstr "En cours d'appel assynchrone..." + +#: ../nova/rpc.py:316 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID est %s" + +#: ../nova/rpc.py:354 +msgid "Making asynchronous cast..." +msgstr "En cours de diffusion assynchrone" + +#: ../nova/rpc.py:364 +#, python-format +msgid "response %s" +msgstr "réponse %s" + +#: ../nova/rpc.py:373 +#, python-format +msgid "topic is %s" +msgstr "le sujet est %s" + +#: ../nova/rpc.py:374 +#, python-format +msgid "message %s" +msgstr "message %s" + +#: ../nova/volume/driver.py:78 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "Récupération après une exécution erronée. Tentative numéro %s" + +#: ../nova/volume/driver.py:87 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "Le groupe de volume %s n'existe pas" + +#: ../nova/volume/driver.py:220 +#, python-format +msgid "FAKE AOE: %s" +msgstr "FAUX AOE: %s" + +#: ../nova/volume/driver.py:233 +msgid "Skipping ensure_export. No iscsi_target " +msgstr "ensure_export sauté. Pas d'iscsi_target " + +#: ../nova/volume/driver.py:279 ../nova/volume/driver.py:288 +msgid "Skipping remove_export. No iscsi_target " +msgstr "remove_export sauté. Pas d'iscsi_target " + +#: ../nova/volume/driver.py:347 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "FAUX ISCSI: %s" + +#: ../nova/volume/driver.py:359 +#, python-format +msgid "rbd has no pool %s" +msgstr "rbd n'as pas de file %s" + +#: ../nova/volume/driver.py:414 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "Sheepdog n'est pas actif : %s" + +#: ../nova/volume/driver.py:416 +msgid "Sheepdog is not working" +msgstr "Sheepdog n'est pas actif" + +#: ../nova/wsgi.py:68 +#, python-format +msgid "Starting %(arg0)s on %(host)s:%(port)s" +msgstr "Démarrage %(arg0)s sur %(host)s:%(port)s" + +#: ../nova/wsgi.py:147 +msgid "You must implement __call__" +msgstr "Vous devez implémenter __call__" + +#: ../bin/nova-instancemonitor.py:55 +msgid "Starting instance monitor" +msgstr "Démarrage du superviseur d'instance" + +#: ../bin/nova-dhcpbridge.py:58 +msgid "leasing ip" +msgstr "Allocation IP" + +#: ../bin/nova-dhcpbridge.py:73 +msgid "Adopted old lease or got a change of mac/hostname" +msgstr "Allocation périmée accepté ou changement de nom/adresse MAC d'hôte" + +#: ../bin/nova-dhcpbridge.py:80 +msgid "releasing ip" +msgstr "Libération IP" + +#: ../bin/nova-dhcpbridge.py:123 +#, python-format +msgid "" +"Called %(action)s for mac %(mac)s with ip %(ip)s and hostname %(hostname)s " +"on interface %(interface)s" +msgstr "" +"%(action)s appelée pour MAC %(mac)s avec IP %(ip)s nom d'hôte %(hostname)s " +"sur l'interface %(interface)s" + +#: ../nova/virt/fake.py:239 +#, python-format +msgid "Instance %s Not Found" +msgstr "Instance %s Non trouvée" + +#: ../nova/network/manager.py:153 +#, python-format +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "Désassociation de %s ip(s) fixes périmées" + +#: ../nova/network/manager.py:157 +msgid "setting network host" +msgstr "réglage de l'hôte réseau" + +#: ../nova/network/manager.py:212 +#, python-format +msgid "Leasing IP %s" +msgstr "Allocation IP %s" + +#: ../nova/network/manager.py:216 +#, python-format +msgid "IP %s leased that isn't associated" +msgstr "IP %s allouée qui n'est pas associée" + +#: ../nova/network/manager.py:220 +#, python-format +msgid "IP %(address)s leased to bad mac %(inst_addr)s vs %(mac)s" +msgstr "IP %(address)s allouée à une mauvaise MAC %(inst_addr)s pour %(mac)s" + +#: ../nova/network/manager.py:228 +#, python-format +msgid "IP %s leased that was already deallocated" +msgstr "IP %s alloué qui était déjà désallouée" + +#: ../nova/network/manager.py:233 +#, python-format +msgid "Releasing IP %s" +msgstr "Libération IP %s" + +#: ../nova/network/manager.py:237 +#, python-format +msgid "IP %s released that isn't associated" +msgstr "IP %s libérée qui n'était pas associée" + +#: ../nova/network/manager.py:241 +#, python-format +msgid "IP %(address)s released from bad mac %(inst_addr)s vs %(mac)s" +msgstr "" +"IP %(address)s libérée par une mauvaise MAC %(inst_addr)s pour %(mac)s" + +#: ../nova/network/manager.py:244 +#, python-format +msgid "IP %s released that was not leased" +msgstr "IP %s libérée qui n'était pas allouée" + +#: ../nova/network/manager.py:519 +msgid "" +"The sum between the number of networks and the vlan start cannot be greater " +"than 4094" +msgstr "" +"La somme du nombre de réseau et le début de vlan ne peut excéder 4094" + +#: ../nova/virt/xenapi/volume_utils.py:57 +#, python-format +msgid "Introducing %s..." +msgstr "Introduction de %s" + +#: ../nova/virt/xenapi/volume_utils.py:74 +#, python-format +msgid "Introduced %(label)s as %(sr_ref)s." +msgstr "%(label)s introduit comme %(sr_ref)s." + +#: ../nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "Impossible de créer le dépot de stockage" + +#: ../nova/virt/xenapi/volume_utils.py:90 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "Impossible de trouver SR du VDB %s" + +#: ../nova/virt/xenapi/volume_utils.py:96 +#, python-format +msgid "Forgetting SR %s ... " +msgstr "Démémorisation du SR %s " + +#: ../nova/virt/xenapi/volume_utils.py:101 +#, python-format +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgstr "" +"Exception %(exc)s ignorée pendant l'obtention de PBDs pour %(sr_ref)s" + +#: ../nova/virt/xenapi/volume_utils.py:107 +#, python-format +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgstr "Exception %(exc)s ignorée pendant la deconnexion du PBD %(pbd)s" + +#: ../nova/virt/xenapi/volume_utils.py:111 +#, python-format +msgid "Forgetting SR %s done." +msgstr "Démémorisation du SR %s terminée" + +#: ../nova/virt/xenapi/volume_utils.py:113 +#, python-format +msgid "Ignoring exception %(exc)s when forgetting SR %(sr_ref)s" +msgstr "Exception %(exc)s ignorée pendant la démémorisation du SR %(sr_ref)s" + +#: ../nova/virt/xenapi/volume_utils.py:123 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "Impossible d'introduire VDI sur SR %s" + +#: ../nova/virt/xenapi/volume_utils.py:128 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "Impossible de récuppérer l'enregistrement du VDI %s sur" + +#: ../nova/virt/xenapi/volume_utils.py:146 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "Impossible d'introduire le VDI pour SR %s" + +#: ../nova/virt/xenapi/volume_utils.py:175 +#, python-format +msgid "Unable to obtain target information %(device_path)s, %(mountpoint)s" +msgstr "" +"Impossible de récuppérer les informations de destination %(device_path)s, " +"%(mountpoint)s" + +#: ../nova/virt/xenapi/volume_utils.py:197 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "Le point de montage ne peut pas être traduit : %s" + +#: ../nova/objectstore/image.py:262 +#, python-format +msgid "Failed to decrypt private key: %s" +msgstr "Impossible de déchiffrer la clef privée : %s" + +#: ../nova/objectstore/image.py:269 +#, python-format +msgid "Failed to decrypt initialization vector: %s" +msgstr "Impossible de déchiffrer le vecteur d'initialisation : %s" + +#: ../nova/objectstore/image.py:277 +#, python-format +msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgstr "Impossible de déchiffrer le fichier image %(image_file)s: %(err)s" + +#: ../nova/objectstore/handler.py:106 +#, python-format +msgid "Unknown S3 value type %r" +msgstr "Type de valeur S3 inconnu %r" + +#: ../nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "Requête authentifiée" + +#: ../nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "Listes de conteneurs (buckets) demandée" + +#: ../nova/objectstore/handler.py:209 +#, python-format +msgid "List keys for bucket %s" +msgstr "List des clefs pour le conteneur (bucket) %s" + +#: ../nova/objectstore/handler.py:217 +#, python-format +msgid "Unauthorized attempt to access bucket %s" +msgstr "Demande d'accès au conteneur %s non autorisé" + +#: ../nova/objectstore/handler.py:235 +#, python-format +msgid "Creating bucket %s" +msgstr "Création du conteneur %s" + +#: ../nova/objectstore/handler.py:245 +#, python-format +msgid "Deleting bucket %s" +msgstr "Suppression du conteneur %s" + +#: ../nova/objectstore/handler.py:249 +#, python-format +msgid "Unauthorized attempt to delete bucket %s" +msgstr "Tentative de suppression du conteneur %s non autorisée" + +#: ../nova/objectstore/handler.py:273 +#, python-format +msgid "Getting object: %(bname)s / %(nm)s" +msgstr "Récupération de l'objet : %(bname)s / %(nm)s" + +#: ../nova/objectstore/handler.py:276 +#, python-format +msgid "Unauthorized attempt to get object %(nm)s from bucket %(bname)s" +msgstr "" +"Tentative d'accès à l'objet %(nm)s non autorisée pour le conteneur %(bname)s" + +#: ../nova/objectstore/handler.py:296 +#, python-format +msgid "Putting object: %(bname)s / %(nm)s" +msgstr "Injection d'objet : %(bname)s / %(nm)s" + +#: ../nova/objectstore/handler.py:299 +#, python-format +msgid "Unauthorized attempt to upload object %(nm)s to bucket %(bname)s" +msgstr "" +"Tentative de chargement de l'objet %(nm)s vers le conteneur %(bname)s non " +"autorisée" + +#: ../nova/objectstore/handler.py:318 +#, python-format +msgid "Deleting object: %(bname)s / %(nm)s" +msgstr "Suppression de l'objet : %(bname)s / %(nm)s" + +#: ../nova/objectstore/handler.py:322 +#, python-format +msgid "Unauthorized attempt to delete object %(nm)s from bucket %(bname)s" +msgstr "" +"Tentative de suppression de l'objet %(nm)s non autorisé pour le conteneur " +"%(bname)s" + +#: ../nova/objectstore/handler.py:396 +#, python-format +msgid "Not authorized to upload image: invalid directory %s" +msgstr "Chargement d'image non autorisé : répertoire invalide %s" + +#: ../nova/objectstore/handler.py:404 +#, python-format +msgid "Not authorized to upload image: unauthorized bucket %s" +msgstr "Chargement d'image non autorisé : conteneur non autorisé %s" + +#: ../nova/objectstore/handler.py:409 +#, python-format +msgid "Starting image upload: %s" +msgstr "Début de chargement d'image: %s" + +#: ../nova/objectstore/handler.py:423 +#, python-format +msgid "Not authorized to update attributes of image %s" +msgstr "Tentative de mise a jour d'attributs non autorisée pour l'image %s" + +#: ../nova/objectstore/handler.py:431 +#, python-format +msgid "Toggling publicity flag of image %(image_id)s %(newstatus)r" +msgstr "" +"Basculement de l'attribut public de l'image %(image_id)s %(newstatus)r" + +#. other attributes imply update +#: ../nova/objectstore/handler.py:436 +#, python-format +msgid "Updating user fields on image %s" +msgstr "Mise à jour des champs utilisateurs sur l'image %s" + +#: ../nova/objectstore/handler.py:450 +#, python-format +msgid "Unauthorized attempt to delete image %s" +msgstr "Tentative de suppression de l'image %s non autorisée" + +#: ../nova/objectstore/handler.py:455 +#, python-format +msgid "Deleted image: %s" +msgstr "Image supprimée : %s" + +#: ../nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" +msgstr "Recherche de l'utilisateur : %r" + +#: ../nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "Autorisation refusée pour la clef d'accès %s" + +#: ../nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" +msgstr "Pas d'utilisateur trouvé pour la clef d'accès %s" + +#: ../nova/auth/manager.py:270 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "Nom de projet utilisé = nom d'utilisateur (%s)" + +#: ../nova/auth/manager.py:277 +#, python-format +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgstr "" +"Autorisation refusée : pas de projet nommé %(pjid)s (utilisateur=%(uname)s)" + +#: ../nova/auth/manager.py:279 +#, python-format +msgid "No project called %s could be found" +msgstr "Aucun projet nommé %s trouvé" + +#: ../nova/auth/manager.py:287 +#, python-format +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" +msgstr "" +"Autorisation refusée : utilisateur %(uname)s n'est ni admin ni membre du " +"projet %(pjname)s" + +#: ../nova/auth/manager.py:289 +#, python-format +msgid "User %(uid)s is not a member of project %(pjid)s" +msgstr "L'utilisateur %(uid)s n'est pas membre du projet %(pjid)s" + +#: ../nova/auth/manager.py:298 ../nova/auth/manager.py:309 +#, python-format +msgid "Invalid signature for user %s" +msgstr "Signature non valide pour l'utilisateur %s" + +#: ../nova/auth/manager.py:299 ../nova/auth/manager.py:310 +msgid "Signature does not match" +msgstr "La signature ne correspond pas" + +#: ../nova/auth/manager.py:380 +msgid "Must specify project" +msgstr "Le projet doit être spécifié" + +#: ../nova/auth/manager.py:414 +#, python-format +msgid "The %s role can not be found" +msgstr "Le rôle %s n'a pas été trouvé" + +#: ../nova/auth/manager.py:416 +#, python-format +msgid "The %s role is global only" +msgstr "Le rôle %s est obligatoirement global" + +#: ../nova/auth/manager.py:420 +#, python-format +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +msgstr "" +"Ajout du rôle %(role)s à l'utilisateur %(uid)s pour le projet %(pid)s" + +#: ../nova/auth/manager.py:423 +#, python-format +msgid "Adding sitewide role %(role)s to user %(uid)s" +msgstr "Ajout du rôle global %(role)s pour l'utilisateur %(uid)s" + +#: ../nova/auth/manager.py:448 +#, python-format +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgstr "" +"Suppression du rôle %(role)s pour l'utilisateur %(uid)s dans le projet " +"%(pid)s" + +#: ../nova/auth/manager.py:451 +#, python-format +msgid "Removing sitewide role %(role)s from user %(uid)s" +msgstr "Suppression du role global %(role)s pour l'utilisateur %(uid)s" + +#: ../nova/auth/manager.py:515 +#, python-format +msgid "Created project %(name)s with manager %(manager_user)s" +msgstr "Création du projet %(name)s ayant pour manager %(manager_user)s" + +#: ../nova/auth/manager.py:533 +#, python-format +msgid "modifying project %s" +msgstr "modification du projet %s" + +#: ../nova/auth/manager.py:545 +#, python-format +msgid "Adding user %(uid)s to project %(pid)s" +msgstr "Ajout de l'utilisateur %(uid)s au projet %(pid)s" + +#: ../nova/auth/manager.py:566 +#, python-format +msgid "Remove user %(uid)s from project %(pid)s" +msgstr "Suppression de l'utilisateur %(uid)s du projet %(pid)s" + +#: ../nova/auth/manager.py:592 +#, python-format +msgid "Deleting project %s" +msgstr "Suppression du projet %s" + +#: ../nova/auth/manager.py:650 +#, python-format +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgstr "Utilisateur créé %(rvname)s (admin: %(rvadmin)r)" + +#: ../nova/auth/manager.py:659 +#, python-format +msgid "Deleting user %s" +msgstr "Suppression de l'utilisateur %s" + +#: ../nova/auth/manager.py:669 +#, python-format +msgid "Access Key change for user %s" +msgstr "Clef d'accès changée pour l'utilisateur %s" + +#: ../nova/auth/manager.py:671 +#, python-format +msgid "Secret Key change for user %s" +msgstr "Clef secrète changée pour l'utilisateur %s" + +#: ../nova/auth/manager.py:673 +#, python-format +msgid "Admin status set to %(admin)r for user %(uid)s" +msgstr "Statut admin changé en %(admin)r pour l'utilisateur %(uid)s" + +#: ../nova/auth/manager.py:722 +#, python-format +msgid "No vpn data for project %s" +msgstr "Pas de données VPN pour le projet %s" + +#: ../nova/service.py:161 +#, python-format +msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgstr "Démarrage du noeud %(topic)s (version %(vcs_string)s)" + +#: ../nova/service.py:174 +msgid "Service killed that has no database entry" +msgstr "Service détruit sans entrée dans la base de données" + +#: ../nova/service.py:195 +msgid "The service database object disappeared, Recreating it." +msgstr "" +"L'objet du service de base de données à disparru, re-création en cours." + +#: ../nova/service.py:207 +msgid "Recovered model server connection!" +msgstr "Récupération du modelle de connexion serveur terminée!" + +#: ../nova/service.py:213 +msgid "model server went away" +msgstr "Le modèle de serveur à disparu" + +#: ../nova/auth/ldapdriver.py:174 +#, python-format +msgid "LDAP user %s already exists" +msgstr "L'utilisateur LDAP %s existe déjà " + +#: ../nova/auth/ldapdriver.py:205 +#, python-format +msgid "LDAP object for %s doesn't exist" +msgstr "L'objet LDAP pour %s n'existe pas" + +#: ../nova/auth/ldapdriver.py:348 +#, python-format +msgid "User %s doesn't exist" +msgstr "L'utilisateur %s n'existe pas" + +#: ../nova/auth/ldapdriver.py:472 +#, python-format +msgid "Group can't be created because group %s already exists" +msgstr "Un groupe nommé %s existe déjà " + +#: ../nova/auth/ldapdriver.py:478 +#, python-format +msgid "Group can't be created because user %s doesn't exist" +msgstr "Le groupe n'a pu être créé car l'utilisateur %s n'existe pas" + +#: ../nova/auth/ldapdriver.py:495 +#, python-format +msgid "User %s can't be searched in group because the user doesn't exist" +msgstr "" +"L'utilisateur %s ne peut pas être trouvé dans le groupe car cet utilisateur " +"n'existe pas" + +#: ../nova/auth/ldapdriver.py:507 +#, python-format +msgid "User %s can't be added to the group because the user doesn't exist" +msgstr "" +"L'utilisateur %s ne peut être ajouté au groupe car cet utilisateur n'existe " +"pas" + +#: ../nova/auth/ldapdriver.py:510 ../nova/auth/ldapdriver.py:521 +#, python-format +msgid "The group at dn %s doesn't exist" +msgstr "Le group ayant pour dn %s n'existe pas" + +#: ../nova/auth/ldapdriver.py:513 +#, python-format +msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgstr "L'utilisateur %(uid)s est déjà membre du groupe %(group_dn)s" + +#: ../nova/auth/ldapdriver.py:524 +#, python-format +msgid "" +"User %s can't be removed from the group because the user doesn't exist" +msgstr "" +"L'utilisateur %s ne peut être supprimé du groupe car cet utilisateur " +"n'existe pas" + +#: ../nova/auth/ldapdriver.py:528 +#, python-format +msgid "User %s is not a member of the group" +msgstr "L'utilisateur %s n'est pas membre du groupe" + +#: ../nova/auth/ldapdriver.py:542 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" +"Tentative de suppression du dernier membre d'un groupe. Essayez plutôt de " +"supprimer le group sur %s." + +#: ../nova/auth/ldapdriver.py:549 +#, python-format +msgid "User %s can't be removed from all because the user doesn't exist" +msgstr "" +"L'utilisateur %s ne peut être supprimé partout car l'utilisateur n'existe pas" + +#: ../nova/auth/ldapdriver.py:564 +#, python-format +msgid "Group at dn %s doesn't exist" +msgstr "Le groupe ayant pour dn %s n'existe pas" + +#: ../nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "Réseau non unique trouvé pour le bridge %s" + +#: ../nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "Aucun réseau trouvé pour le bridge %s" + +#: ../nova/api/ec2/admin.py:97 +#, python-format +msgid "Creating new user: %s" +msgstr "Création d'un nouvel utilisateur : %s" + +#: ../nova/api/ec2/admin.py:105 +#, python-format +msgid "Deleting user: %s" +msgstr "Suppression de l'utilisateur : %s" + +#: ../nova/api/ec2/admin.py:127 +#, python-format +msgid "Adding role %(role)s to user %(user)s for project %(project)s" +msgstr "" +"Ajout du rôle %(role)s à l'utilisateur %(user)s pour le projet %(project)s" + +#: ../nova/api/ec2/admin.py:131 +#, python-format +msgid "Adding sitewide role %(role)s to user %(user)s" +msgstr "Ajout du rôle global %(role)s à l'utilisateur %(user)s" + +#: ../nova/api/ec2/admin.py:137 +#, python-format +msgid "Removing role %(role)s from user %(user)s for project %(project)s" +msgstr "" +"Suppresion du rôle %(role)s de l'utilisateur %(user)s pour le projet " +"%(project)s" + +#: ../nova/api/ec2/admin.py:141 +#, python-format +msgid "Removing sitewide role %(role)s from user %(user)s" +msgstr "Suppression du rôle global %(role)s pour l'utilisateur %(user)s" + +#: ../nova/api/ec2/admin.py:146 ../nova/api/ec2/admin.py:223 +msgid "operation must be add or remove" +msgstr "L'opération doit être ajout ou suppression" + +#: ../nova/api/ec2/admin.py:159 +#, python-format +msgid "Getting x509 for user: %(name)s on project: %(project)s" +msgstr "" +"Récupération de x509 pour l'utilisateur : %(name)s sur le projet : " +"%(project)s" + +#: ../nova/api/ec2/admin.py:177 +#, python-format +msgid "Create project %(name)s managed by %(manager_user)s" +msgstr "Création du projet %(name)s géré par %(manager_user)s" + +#: ../nova/api/ec2/admin.py:190 +#, python-format +msgid "Modify project: %(name)s managed by %(manager_user)s" +msgstr "Modification du projet: %(name)s géré par %(manager_user)s" + +#: ../nova/api/ec2/admin.py:200 +#, python-format +msgid "Delete project: %s" +msgstr "Supprimer le projet : %s" + +#: ../nova/api/ec2/admin.py:214 +#, python-format +msgid "Adding user %(user)s to project %(project)s" +msgstr "Ajout de l'utilisateur %(user)s au projet %(project)s" + +#: ../nova/api/ec2/admin.py:218 +#, python-format +msgid "Removing user %(user)s from project %(project)s" +msgstr "Suppression de l'utilisateur %(user)s du projet %(project)s" @@ -14,8 +14,8 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-03-19 06:19+0000\n" -"X-Generator: Launchpad (build 12559)\n" +"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n" +"X-Generator: Launchpad (build 13405)\n" #: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 #: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 @@ -8,20 +8,20 @@ msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n" "POT-Creation-Date: 2011-02-21 10:03-0500\n" -"PO-Revision-Date: 2011-03-29 07:27+0000\n" -"Last-Translator: Koji Iida <Unknown>\n" +"PO-Revision-Date: 2011-05-10 10:26+0000\n" +"Last-Translator: Akira YOSHIYAMA <Unknown>\n" "Language-Team: \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-03-30 05:22+0000\n" -"X-Generator: Launchpad (build 12559)\n" +"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n" +"X-Generator: Launchpad (build 13405)\n" #: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 #: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 #: ../nova/scheduler/simple.py:122 msgid "No hosts found" -msgstr "é©åˆ‡ãªãƒ›ã‚¹ãƒˆãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。" +msgstr "ホストãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“" #: ../nova/exception.py:33 msgid "Unexpected error while running command." @@ -36,10 +36,15 @@ msgid "" "Stdout: %(stdout)r\n" "Stderr: %(stderr)r" msgstr "" +"%(description)s\n" +"コマンド: %(cmd)s\n" +"終了コード: %(exit_code)s\n" +"標準出力: %(stdout)r\n" +"標準エラー出力: %(stderr)r" #: ../nova/exception.py:107 msgid "DB exception wrapped" -msgstr "" +msgstr "DB 例外ãŒãƒ©ãƒƒãƒ—(wrapped)ã•れã¾ã—ãŸ" #. exc_type, exc_value, exc_traceback = sys.exc_info() #: ../nova/exception.py:120 @@ -49,32 +54,32 @@ msgstr "ã‚ャッãƒã•れãªã‹ã£ãŸä¾‹å¤–" #: ../nova/volume/api.py:45 #, python-format msgid "Quota exceeeded for %(pid)s, tried to create %(size)sG volume" -msgstr "サイズ %(size)sG ã®ãƒœãƒªãƒ¥ãƒ¼ãƒ ã®ä½œæˆã‚’行ãŠã†ã¨ã—ã¾ã—ãŸãŒã€%(pid)sã®ã‚¯ã‚ªãƒ¼ã‚¿ã‚’è¶…ãˆã¦ã„ã¾ã™ã€‚" +msgstr "サイズ %(size)s GB ã®ãƒœãƒªãƒ¥ãƒ¼ãƒ 作æˆã‚’行ãŠã†ã¨ã—ã¾ã—ãŸãŒã€%(pid)s ã®ã‚µã‚¤ã‚ºåˆ¶é™ã‚’è¶…ãˆã¦ã„ã¾ã™ã€‚" #: ../nova/volume/api.py:47 #, python-format msgid "Volume quota exceeded. You cannot create a volume of size %sG" -msgstr "ボリュームã®ã‚¯ã‚ªãƒ¼ã‚¿ã‚’è¶…ãˆã¦ã„ã¾ã™ã€‚%sã®å¤§ãã•ã®ãƒœãƒªãƒ¥ãƒ¼ãƒ ã¯ä½œæˆã§ãã¾ã›ã‚“。" +msgstr "ボリュームã®ã‚µã‚¤ã‚ºåˆ¶é™ã‚’è¶…ãˆã¦ã„ã¾ã™ã€‚サイズ %s GB ã®ãƒœãƒªãƒ¥ãƒ¼ãƒ ã¯ä½œæˆã§ãã¾ã›ã‚“。" #: ../nova/volume/api.py:71 ../nova/volume/api.py:96 msgid "Volume status must be available" -msgstr "ボリュームã®ã‚¹ãƒ†ãƒ¼ã‚¿ã‚¹(status)㌠available ã§ãªã‘れã°ãªã‚Šã¾ã›ã‚“。" +msgstr "ボリュームã®ã‚¹ãƒ†ãƒ¼ã‚¿ã‚¹(status)㯠available ã§ãªã‘れã°ãªã‚Šã¾ã›ã‚“。" #: ../nova/volume/api.py:98 msgid "Volume is already attached" -msgstr "ãƒœãƒªãƒ¥ãƒ¼ãƒ ã¯æ—¢ã«ã‚¢ã‚¿ãƒƒãƒã•れã¦ã„ã¾ã™(attached)。" +msgstr "ãƒœãƒªãƒ¥ãƒ¼ãƒ ã¯æ—¢ã«æŽ¥ç¶š(attached)ã•れã¦ã„ã¾ã™ã€‚" #: ../nova/volume/api.py:104 msgid "Volume is already detached" -msgstr "ãƒœãƒªãƒ¥ãƒ¼ãƒ ã¯æ—¢ã«ãƒ‡ã‚¿ãƒƒãƒã•れã¦ã„ã¾ã™(detached)。" +msgstr "ãƒœãƒªãƒ¥ãƒ¼ãƒ ã¯æ—¢ã«åˆ‡æ–(detached)ã•れã¦ã„ã¾ã™ã€‚" #: ../nova/api/openstack/servers.py:72 msgid "Failed to read private ip" -msgstr "" +msgstr "プライベート IP アドレスã®ãƒªãƒ¼ãƒ‰ã«å¤±æ•—ã—ã¾ã—ãŸ" #: ../nova/api/openstack/servers.py:79 msgid "Failed to read public ip(s)" -msgstr "" +msgstr "パブリック IP アドレスã®ãƒªãƒ¼ãƒ‰ã«å¤±æ•—ã—ã¾ã—ãŸ" #: ../nova/api/openstack/servers.py:152 #, python-format @@ -83,7 +88,7 @@ msgstr "イメージ %(_image_id)s ã«å¯¾ã™ã‚‹ãƒ—ãƒãƒ‘ティ %(param)s ãŒè¦‹ã #: ../nova/api/openstack/servers.py:168 msgid "No keypairs defined" -msgstr "ã‚ーペアãŒå®šç¾©ã•れã¦ã„ã¾ã›ã‚“。" +msgstr "ã‚ーペアãŒå®šç¾©ã•れã¦ã„ã¾ã›ã‚“" #: ../nova/api/openstack/servers.py:238 #, python-format @@ -103,7 +108,7 @@ msgstr "例外: Compute.api::get_lock %s" #: ../nova/api/openstack/servers.py:281 #, python-format msgid "Compute.api::reset_network %s" -msgstr "" +msgstr "例外: Compute.api::reset_network %s" #: ../nova/api/openstack/servers.py:292 #, python-format @@ -150,7 +155,7 @@ msgstr "FLAGSã®ä¸€è¦§:" #: ../nova/twistd.py:266 #, python-format msgid "Starting %s" -msgstr "%s ã‚’é–‹å§‹ã—ã¾ã™ã€‚" +msgstr "%s ã‚’èµ·å‹•ä¸" #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 @@ -163,33 +168,33 @@ msgstr "インスタンス %s ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。" #: ../nova/virt/xenapi/volumeops.py:51 #, python-format msgid "Attach_volume: %(instance_name)s, %(device_path)s, %(mountpoint)s" -msgstr "" +msgstr "ボリューム接続: %(instance_name)s, %(device_path)s, %(mountpoint)s" #: ../nova/virt/xenapi/volumeops.py:69 #, python-format msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" -msgstr "" +msgstr "インスタンス %(instance_name)s 用ã®SR %(sr_ref)s ã«ãŠã‘ã‚‹ VDI を作æˆã§ãã¾ã›ã‚“" #: ../nova/virt/xenapi/volumeops.py:80 #, python-format msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" -msgstr "" +msgstr "インスタンス %(instance_name)s 用ã®SR %(sr_ref)s ãŒä½¿ç”¨ã§ãã¾ã›ã‚“" #: ../nova/virt/xenapi/volumeops.py:91 #, python-format msgid "Unable to attach volume to instance %s" -msgstr "インスタンス %s ã«ãƒœãƒªãƒ¥ãƒ¼ãƒ をアタッãƒã§ãã¾ã›ã‚“。" +msgstr "インスタンス %s ã«ãƒœãƒªãƒ¥ãƒ¼ãƒ を接続(attach)ã§ãã¾ã›ã‚“。" #: ../nova/virt/xenapi/volumeops.py:93 #, python-format msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" -msgstr "インスタンス %(instance_name)s ã«ãƒžã‚¦ãƒ³ãƒˆãƒã‚¤ãƒ³ãƒˆ %(mountpoint)s をアタッãƒã—ã¾ã—ãŸã€‚" +msgstr "インスタンス %(instance_name)s ã«ãƒžã‚¦ãƒ³ãƒˆãƒã‚¤ãƒ³ãƒˆ %(mountpoint)s を接続(attach)ã—ã¾ã—ãŸ" #. Detach VBD from VM #: ../nova/virt/xenapi/volumeops.py:104 #, python-format msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" -msgstr "" +msgstr "ボリューム切æ–: %(instance_name)s, %(mountpoint)s" #: ../nova/virt/xenapi/volumeops.py:112 #, python-format @@ -199,17 +204,17 @@ msgstr "ボリューム%s ã®å˜åœ¨ãŒç¢ºèªã§ãã¾ã›ã‚“。" #: ../nova/virt/xenapi/volumeops.py:120 #, python-format msgid "Unable to detach volume %s" -msgstr "ボリューム%s ã®ãƒ‡ã‚¿ãƒƒãƒãŒã§ãã¾ã›ã‚“。" +msgstr "ボリューム%s を切æ–(detach)ã§ãã¾ã›ã‚“" #: ../nova/virt/xenapi/volumeops.py:127 #, python-format msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" -msgstr "インスタンス %(instance_name)s ã‹ã‚‰ãƒžã‚¦ãƒ³ãƒˆãƒã‚¤ãƒ³ãƒˆ %(mountpoint)s をデタッãƒã—ã¾ã—ãŸã€‚" +msgstr "インスタンス %(instance_name)s ã‹ã‚‰ãƒžã‚¦ãƒ³ãƒˆãƒã‚¤ãƒ³ãƒˆ %(mountpoint)s を切æ–(detach)ã—ã¾ã—ãŸ" #: ../nova/compute/instance_types.py:41 #, python-format msgid "Unknown instance type: %s" -msgstr "%s ã¯æœªçŸ¥ã®ã‚¤ãƒ³ã‚¹ã‚¿ãƒ³ã‚¹ã‚¿ã‚¤ãƒ—ã§ã™ã€‚" +msgstr "未知ã®ã‚¤ãƒ³ã‚¹ã‚¿ãƒ³ã‚¹ã‚¿ã‚¤ãƒ—: %s" #: ../nova/crypto.py:46 msgid "Filename of root CA" @@ -258,7 +263,7 @@ msgstr "Flags ã®ãƒ‘ス: %s" #: ../nova/scheduler/manager.py:69 #, python-format msgid "Casting to %(topic)s %(host)s for %(method)s" -msgstr "" +msgstr "%(method)s 用㫠%(topic)s %(host)s を割り当ã¦ä¸" #: ../nova/compute/manager.py:78 #, python-format @@ -269,7 +274,7 @@ msgstr "check_instance_lock: decorating: |%s|" #, python-format msgid "" "check_instance_lock: arguments: |%(self)s| |%(context)s| |%(instance_id)s|" -msgstr "" +msgstr "check_instance_lock: 引数: |%(self)s| |%(context)s| |%(instance_id)s|" #: ../nova/compute/manager.py:84 #, python-format @@ -298,23 +303,23 @@ msgstr "ã‚¤ãƒ³ã‚¹ã‚¿ãƒ³ã‚¹ã¯æ—¢ã«ç”Ÿæˆã•れã¦ã„ã¾ã™ã€‚" #: ../nova/compute/manager.py:180 #, python-format msgid "instance %s: starting..." -msgstr "インスタンス %s ã‚’é–‹å§‹ã—ã¾ã™ã€‚" +msgstr "インスタンス %s: èµ·å‹•ä¸â€¦" #. pylint: disable=W0702 #: ../nova/compute/manager.py:219 #, python-format msgid "instance %s: Failed to spawn" -msgstr "インスタンス %s ã®èµ·å‹•ã«å¤±æ•—ã—ã¾ã—ãŸã€‚" +msgstr "インスタンス %s: èµ·å‹•ã«å¤±æ•—ã—ã¾ã—ãŸ" #: ../nova/compute/manager.py:233 ../nova/tests/test_cloud.py:286 #, python-format msgid "Terminating instance %s" -msgstr "Terminating instance: インスタンス %s を終了ã—ã¾ã™ã€‚" +msgstr "インスタンス %s ã‚’åœæ¢ä¸" #: ../nova/compute/manager.py:255 #, python-format msgid "Deallocating address %s" -msgstr "アドレス %s ã®å‰²å½“を解除(deallocate)ã—ã¾ã™ã€‚" +msgstr "アドレス %s ã®å‰²å½“を解除ä¸(deallocating)" #: ../nova/compute/manager.py:268 #, python-format @@ -338,7 +343,7 @@ msgstr "" #: ../nova/compute/manager.py:311 #, python-format msgid "instance %s: snapshotting" -msgstr "snapshotting: インスタンス %s ã®ã‚¹ãƒŠãƒƒãƒ—ショットをå–å¾—ã—ã¾ã™ã€‚" +msgstr "snapshotting: インスタンス %s ã®ã‚¹ãƒŠãƒƒãƒ—ショットをå–å¾—ä¸" #: ../nova/compute/manager.py:316 #, python-format @@ -361,7 +366,7 @@ msgstr "" #: ../nova/compute/manager.py:335 #, python-format msgid "instance %s: setting admin password" -msgstr "インスタンス %s: admin password をセットã—ã¾ã™" +msgstr "インスタンス %s: 管ç†è€…用パスワードをセットä¸" #: ../nova/compute/manager.py:353 #, python-format @@ -375,7 +380,7 @@ msgstr "" #: ../nova/compute/manager.py:362 #, python-format msgid "instance %(nm)s: injecting file to %(plain_path)s" -msgstr "インスタンス %(nm)s: ファイルを %(plain_path)s ã«ã‚¤ãƒ³ã‚¸ã‚§ã‚¯ãƒˆã—ã¾ã™" +msgstr "インスタンス %(nm)s: ファイルを %(plain_path)s ã«åŸ‹ã‚è¾¼ã¿ä¸" #: ../nova/compute/manager.py:372 #, python-format @@ -447,7 +452,7 @@ msgstr "インスタンス %s: ajax consoleを接続ã—ã¾ã™" msgid "" "instance %(instance_id)s: attaching volume %(volume_id)s to %(mountpoint)s" msgstr "" -"インスタンス %(instance_id)s: ボリューム%(volume_id)s ã‚’ %(mountpoint)s ã«ã‚¢ã‚¿ãƒƒãƒã—ã¾ã™" +"インスタンス %(instance_id)s: ボリューム%(volume_id)s ã‚’ %(mountpoint)s ã«æŽ¥ç¶šä¸(attaching)" #. pylint: disable=W0702 #. NOTE(vish): The inline callback eats the exception info so we @@ -549,7 +554,7 @@ msgstr "ボリューム%s ã®å‰Šé™¤ã«æˆåŠŸã—ã¾ã—ãŸã€‚" #: ../nova/virt/xenapi/fake.py:74 #, python-format msgid "%(text)s: _db_content => %(content)s" -msgstr "" +msgstr "%(text)s: _db_content => %(content)s" #: ../nova/virt/xenapi/fake.py:304 ../nova/virt/xenapi/fake.py:404 #: ../nova/virt/xenapi/fake.py:422 ../nova/virt/xenapi/fake.py:478 @@ -616,12 +621,12 @@ msgstr "Pid %d ã¯ç„¡åйã§ã™ã€‚dnsmasqã‚’å†å®Ÿè¡Œã—ã¾ã™ã€‚" #: ../nova/network/linux_net.py:358 #, python-format msgid "killing radvd threw %s" -msgstr "" +msgstr "radvd åœæ¢ãŒ %s 例外を発行ã—ã¾ã—ãŸ" #: ../nova/network/linux_net.py:360 #, python-format msgid "Pid %d is stale, relaunching radvd" -msgstr "" +msgstr "Pid %d ãŒã‚¹ãƒˆãƒ¼ãƒ«ã—ã¦ã„ã‚‹ã®ã§ radvd ã‚’å†å®Ÿè¡Œã—ã¦ã„ã¾ã™â€¦" #. pylint: disable=W0703 #: ../nova/network/linux_net.py:449 @@ -677,7 +682,7 @@ msgstr "リンクãƒãƒ¼ã‚«ãƒ«ã‚¢ãƒ‰ãƒ¬ã‚¹ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“: %s" #: ../nova/utils.py:265 #, python-format msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" -msgstr "" +msgstr "%(interface)s ã®ãƒãƒ¼ã‚«ãƒ«IPアドレスã®ãƒªãƒ³ã‚¯ãŒå–å¾—ã§ãã¾ã›ã‚“:%(ex)s" #: ../nova/utils.py:363 #, python-format @@ -692,7 +697,7 @@ msgstr "ãƒãƒƒã‚¯ã‚¨ãƒ³ãƒ‰ã¯ %s ã§ã™ã€‚" #: ../nova/fakerabbit.py:49 #, python-format msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" -msgstr "" +msgstr "(%(nm)s) 公開 (ã‚ー: %(routing_key)s) %(message)s" #: ../nova/fakerabbit.py:54 #, python-format @@ -712,12 +717,12 @@ msgstr "exchange %s ã®å®£è¨€" #: ../nova/fakerabbit.py:96 #, python-format msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" -msgstr "" +msgstr "ã‚ー %(routing_key)s 付ãã§ %(exchange)s ã« %(queue)s ã‚’ãƒã‚¤ãƒ³ãƒ‰ã—ã¦ã„ã¾ã™" #: ../nova/fakerabbit.py:121 #, python-format msgid "Getting from %(queue)s: %(message)s" -msgstr "" +msgstr "%(queue)s ã‹ã‚‰å–å¾—ã—ã¦ã„ã¾ã™: %(message)s" #: ../nova/virt/xenapi/vm_utils.py:135 ../nova/virt/hyperv.py:171 #, python-format @@ -727,17 +732,17 @@ msgstr "VM %s を作æˆã—ã¾ã™ã€‚" #: ../nova/virt/xenapi/vm_utils.py:138 #, python-format msgid "Created VM %(instance_name)s as %(vm_ref)s." -msgstr "" +msgstr "%(vm_ref)s ã¨ã—㦠VM %(instance_name)s を作æˆã—ã¦ã„ã¾ã™" #: ../nova/virt/xenapi/vm_utils.py:168 #, python-format msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " -msgstr "" +msgstr "VM %(vm_ref)s, VDI %(vdi_ref)s 用 VBD を作æˆã—ã¦ã„ã¾ã™â€¦ " #: ../nova/virt/xenapi/vm_utils.py:171 #, python-format msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." -msgstr "" +msgstr "VM %(vm_ref)s, VDI %(vdi_ref)s 用仮想ブãƒãƒƒã‚¯ãƒ‡ãƒã‚¤ã‚¹(VBD) %(vbd_ref)s を作æˆã—ã¾ã—ãŸã€‚" #: ../nova/virt/xenapi/vm_utils.py:187 #, python-format @@ -757,12 +762,12 @@ msgstr "VBD %s ã®å‰Šé™¤ã«å¤±æ•—ã—ã¾ã—ãŸã€‚" #: ../nova/virt/xenapi/vm_utils.py:224 #, python-format msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." -msgstr "" +msgstr "VM %(vm_ref)s, network %(network_ref)s 用仮想インターフェース(VIF)を作æˆã—ã¦ã„ã¾ã™ã€‚" #: ../nova/virt/xenapi/vm_utils.py:227 #, python-format msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." -msgstr "" +msgstr "VM %(vm_ref)s, network %(network_ref)s 用 VIF %(vif_ref)s を作æˆã—ã¾ã—ãŸã€‚" #: ../nova/virt/xenapi/vm_utils.py:246 #, python-format @@ -770,50 +775,52 @@ msgid "" "Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) on " "%(sr_ref)s." msgstr "" +"%(sr_ref)s 上㫠VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, " +"%(read_only)s) を作æˆã—ã¾ã—ãŸã€‚" #. TODO(sirp): Add quiesce and VSS locking support when Windows support #. is added #: ../nova/virt/xenapi/vm_utils.py:258 #, python-format msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." -msgstr "" +msgstr "ラベル '%(label)s' 付ã VM %(vm_ref)s ã®ã‚¹ãƒŠãƒƒãƒ—ショットを作æˆã—ã¦ã„ã¾ã™â€¦" #: ../nova/virt/xenapi/vm_utils.py:272 #, python-format msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." -msgstr "" +msgstr "VM %(vm_ref)s ã‹ã‚‰ã‚¹ãƒŠãƒƒãƒ—ショット %(template_vm_ref)s を作æˆã—ã¾ã—ãŸã€‚" #: ../nova/virt/xenapi/vm_utils.py:286 #, python-format msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" -msgstr "" +msgstr "ID %(image_id)s ã¨ã—㦠%(vdi_uuids)s ã®ã‚¢ãƒƒãƒ—ãƒãƒ¼ãƒ‰ã®ç‚ºã« xapi ã‚’å•ã„åˆã‚ã›ã—ã¦ã„ã¾ã™" #: ../nova/virt/xenapi/vm_utils.py:327 #, python-format msgid "Size for image %(image)s:%(virtual_size)d" -msgstr "" +msgstr "イメージ %(image)s ã®ã‚µã‚¤ã‚º:%(virtual_size)d" #: ../nova/virt/xenapi/vm_utils.py:332 #, python-format msgid "Glance image %s" -msgstr "" +msgstr "Glance イメージ %s" #. we need to invoke a plugin for copying VDI's #. content into proper path #: ../nova/virt/xenapi/vm_utils.py:342 #, python-format msgid "Copying VDI %s to /boot/guest on dom0" -msgstr "" +msgstr "ドメイン0 上㮠/boot/guest ã« VDI %s をコピーä¸" #: ../nova/virt/xenapi/vm_utils.py:352 #, python-format msgid "Kernel/Ramdisk VDI %s destroyed" -msgstr "" +msgstr "カーãƒãƒ«/RAMディスク VDI %s ãŒå‰Šé™¤ã•れã¾ã—ãŸ" #: ../nova/virt/xenapi/vm_utils.py:361 #, python-format msgid "Asking xapi to fetch %(url)s as %(access)s" -msgstr "" +msgstr "%(access)s ã¨ã—㦠%(url)s å–å¾—ã®ç‚ºã« xapi ã‚’å•ã„åˆã‚ã›ã—ã¦ã„ã¾ã™" #: ../nova/virt/xenapi/vm_utils.py:386 ../nova/virt/xenapi/vm_utils.py:402 #, python-format @@ -823,12 +830,12 @@ msgstr "PV kernelã®vdi %s ã‚’å–å¾—ã—ã¾ã™ã€‚" #: ../nova/virt/xenapi/vm_utils.py:397 #, python-format msgid "PV Kernel in VDI:%s" -msgstr "" +msgstr "VDI ä¸ã® PV カーãƒãƒ«:%s" #: ../nova/virt/xenapi/vm_utils.py:405 #, python-format msgid "Running pygrub against %s" -msgstr "" +msgstr "%s ã«å¯¾ã—㦠pygrub を実行ã—ã¦ã„ã¾ã™" #: ../nova/virt/xenapi/vm_utils.py:411 #, python-format @@ -838,6 +845,8 @@ msgstr "Xen Kernel %s ãŒè¦‹ã¤ã‹ã‚Šã¾ã—ãŸã€‚" #: ../nova/virt/xenapi/vm_utils.py:413 msgid "No Xen kernel found. Booting HVM." msgstr "" +"No Xen kernel found. Booting HVM.\r\n" +"Xen 用カーãƒãƒ«ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。完全仮想化モード(HVM)ã§èµ·å‹•ã—ã¦ã„ã¾ã™ã€‚" #: ../nova/virt/xenapi/vm_utils.py:425 ../nova/virt/hyperv.py:431 #, python-format @@ -862,7 +871,7 @@ msgstr "(VM_UTILS) xenapi ã® power_state -> |%s|" #: ../nova/virt/xenapi/vm_utils.py:525 #, python-format msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" -msgstr "" +msgstr "VHD %(vdi_uuid)s ã®è¦ªã¯ %(parent_ref)s ã§ã™" #: ../nova/virt/xenapi/vm_utils.py:542 #, python-format @@ -873,14 +882,14 @@ msgstr "SR %s ã‚’å†ã‚¹ã‚ャンã—ã¾ã™ã€‚" #, python-format msgid "" "VHD coalesce attempts exceeded (%(counter)d > %(max_attempts)d), giving up..." -msgstr "" +msgstr "VHD 作æˆãŒåˆ¶é™ã‚’è¶Šãˆã¾ã—ãŸ(%(counter)d > %(max_attempts)d)ã®ã§ã€ä¸æ¢ã—ã¦ã„ã¾ã™â€¦" #: ../nova/virt/xenapi/vm_utils.py:574 #, python-format msgid "" "Parent %(parent_uuid)s doesn't match original parent " "%(original_parent_uuid)s, waiting for coalesce..." -msgstr "" +msgstr "親 %(parent_uuid)s ãŒå…ƒã€…ã®è¦ª %(original_parent_uuid)s ã¨ä¸€è‡´ã—ã¾ã›ã‚“。作æˆã‚’待機ã—ã¦ã„ã¾ã™â€¦" #: ../nova/virt/xenapi/vm_utils.py:590 #, python-format @@ -890,96 +899,96 @@ msgstr "VM %s ã«VDIãŒå˜åœ¨ã—ã¾ã›ã‚“。" #: ../nova/virt/xenapi/vm_utils.py:594 #, python-format msgid "Unexpected number of VDIs (%(num_vdis)s) found for VM %(vm_ref)s" -msgstr "" +msgstr "VM %(vm_ref)s 用ã«äºˆæœŸã—ãªã„æ•°ã® VDI (%(num_vdis)s) ãŒè¦‹ã¤ã‹ã‚Šã¾ã—ãŸ" #: ../nova/virt/xenapi/vm_utils.py:653 #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:188 #, python-format msgid "Creating VBD for VDI %s ... " -msgstr "" +msgstr "VDI %s 用㫠VBD を作æˆã—ã¦ã„ã¾ã™â€¦ " #: ../nova/virt/xenapi/vm_utils.py:655 #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:190 #, python-format msgid "Creating VBD for VDI %s done." -msgstr "" +msgstr "VDI %s 用 VBD ã®ä½œæˆãŒå®Œäº†ã—ã¾ã—ãŸã€‚" #: ../nova/virt/xenapi/vm_utils.py:657 #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:192 #, python-format msgid "Plugging VBD %s ... " -msgstr "" +msgstr "VBD %s を接続ã—ã¦ã„ã¾ã™â€¦ " #: ../nova/virt/xenapi/vm_utils.py:659 #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:194 #, python-format msgid "Plugging VBD %s done." -msgstr "" +msgstr "仮想ブãƒãƒƒã‚¯ãƒ‡ãƒã‚¤ã‚¹(VBD) %s ã®æŽ¥ç¶šãŒå®Œäº†ã—ã¾ã—ãŸã€‚" #: ../nova/virt/xenapi/vm_utils.py:661 #, python-format msgid "VBD %(vbd)s plugged as %(orig_dev)s" -msgstr "" +msgstr "%(orig_dev)s ã¨ã—ã¦ä»®æƒ³ãƒ–ãƒãƒƒã‚¯ãƒ‡ãƒã‚¤ã‚¹(VBD) %(vbd)s を接続ã—ã¾ã—ãŸ" #: ../nova/virt/xenapi/vm_utils.py:664 #, python-format msgid "VBD %(vbd)s plugged into wrong dev, remapping to %(dev)s" -msgstr "" +msgstr "仮想ブãƒãƒƒã‚¯ãƒ‡ãƒã‚¤ã‚¹(VBD) %(vbd)s ã¯ä¸æ£ãªãƒ‡ãƒã‚¤ã‚¹ã«æŽ¥ç¶šã•れã¾ã—ãŸã®ã§ã€%(dev)s ã«å†ãƒžãƒƒãƒ”ングã—ã¦ã„ã¾ã™" #: ../nova/virt/xenapi/vm_utils.py:668 #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:197 #, python-format msgid "Destroying VBD for VDI %s ... " -msgstr "" +msgstr "VDI %s 用ã®ä»®æƒ³ãƒ–ãƒãƒƒã‚¯ãƒ‡ãƒã‚¤ã‚¹(VBD)を削除ã—ã¦ã„ã¾ã™â€¦ " #: ../nova/virt/xenapi/vm_utils.py:671 #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:200 #, python-format msgid "Destroying VBD for VDI %s done." -msgstr "" +msgstr "VDI %s 用ã®ä»®æƒ³ãƒ–ãƒãƒƒã‚¯ãƒ‡ãƒã‚¤ã‚¹(VBD)ã®å‰Šé™¤ãŒå®Œäº†ã—ã¾ã—ãŸã€‚" #: ../nova/virt/xenapi/vm_utils.py:683 #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:211 msgid "VBD.unplug successful first time." -msgstr "" +msgstr "VBD.unplug ã¯ï¼‘å›žç›®ã§æˆåŠŸã—ã¾ã—ãŸã€‚" #: ../nova/virt/xenapi/vm_utils.py:688 #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:216 msgid "VBD.unplug rejected: retrying..." -msgstr "" +msgstr "VBD.unplug ãŒæ‹’å¦ã•れã¾ã—ãŸ: å†è©¦è¡Œã—ã¦ã„ã¾ã™â€¦" #: ../nova/virt/xenapi/vm_utils.py:692 #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:220 msgid "VBD.unplug successful eventually." -msgstr "" +msgstr "VBD.unplug ã¯æœ€çµ‚çš„ã«æˆåŠŸã—ã¾ã—ãŸã€‚" #: ../nova/virt/xenapi/vm_utils.py:695 #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:223 #, python-format msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" -msgstr "" +msgstr "VBD.unplug ä¸ã® XenAPI.Failure を無視ã—ã¦ã„ã¾ã™: %s" #: ../nova/virt/xenapi/vm_utils.py:704 #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:66 #, python-format msgid "Ignoring XenAPI.Failure %s" -msgstr "" +msgstr "XenAPI.Failure %s を無視ã—ã¦ã„ã¾ã™" #: ../nova/virt/xenapi/vm_utils.py:735 #, python-format msgid "" "Writing partition table %(primary_first)d %(primary_last)d to %(dest)s..." -msgstr "" +msgstr "%(dest)s ã«ãƒ‘ーティションテーブル %(primary_first)d %(primary_last)d を書ã込んã§ã„ã¾ã™â€¦" #: ../nova/virt/xenapi/vm_utils.py:747 #, python-format msgid "Writing partition table %s done." -msgstr "" +msgstr "パーティションテーブル %s ã®æ›¸ãè¾¼ã¿ãŒå®Œäº†ã—ã¾ã—ãŸã€‚" #: ../nova/tests/test_rpc.py:89 #, python-format msgid "Nested received %(queue)s, %(value)s" -msgstr "" +msgstr "Nested received %(queue)s, %(value)s" #: ../nova/tests/test_rpc.py:95 #, python-format @@ -1003,7 +1012,7 @@ msgstr "id %s ã®serviceãŒå˜åœ¨ã—ã¾ã›ã‚“。" #: ../nova/db/sqlalchemy/api.py:251 #, python-format msgid "No service for %(host)s, %(binary)s" -msgstr "" +msgstr "%(host)s, %(binary)s 用ã®ã‚µãƒ¼ãƒ“スãŒã‚りã¾ã›ã‚“" #: ../nova/db/sqlalchemy/api.py:592 msgid "No fixed ips defined" @@ -1110,6 +1119,8 @@ msgid "" "No console pool of type %(console_type)s for compute host %(compute_host)s " "on proxy host %(host)s" msgstr "" +"コンピュータホスト %(compute_host)s 用 %(console_type)s 型コンソールプールãŒãƒ—ãƒã‚シホスト %(host)s " +"上ã«ã‚りã¾ã›ã‚“" #: ../nova/db/sqlalchemy/api.py:2035 #, python-format @@ -1119,27 +1130,27 @@ msgstr "プール %(pool_id)s ã« %(instance_id)s ã®ã‚³ãƒ³ã‚½ãƒ¼ãƒ«ãŒã‚り㾠#: ../nova/db/sqlalchemy/api.py:2057 #, python-format msgid "on instance %s" -msgstr "" +msgstr "インスタンス %s 上" #: ../nova/db/sqlalchemy/api.py:2058 #, python-format msgid "No console with id %(console_id)s %(idesc)s" -msgstr "" +msgstr "ID %(console_id)s %(idesc)s ã‚’æŒã¤ã‚³ãƒ³ã‚½ãƒ¼ãƒ«ãŒã‚りã¾ã›ã‚“" #: ../nova/db/sqlalchemy/api.py:2078 ../nova/db/sqlalchemy/api.py:2097 #, python-format msgid "No zone with id %(zone_id)s" -msgstr "" +msgstr "ID %(zone_id)s ã‚’æŒã¤ã‚¾ãƒ¼ãƒ³ãŒã‚りã¾ã›ã‚“" #: ../nova/virt/libvirt_conn.py:160 #, python-format msgid "Checking state of %s" -msgstr "" +msgstr "%s ã®çŠ¶æ…‹ã‚’ç¢ºèªã—ã¦ã„ã¾ã™" #: ../nova/virt/libvirt_conn.py:165 #, python-format msgid "Current state of %(name)s was %(state)s." -msgstr "" +msgstr "%(name)s ã®ç¾åœ¨ã®çŠ¶æ…‹ã¯ %(state)s ã§ã™" #: ../nova/virt/libvirt_conn.py:183 #, python-format @@ -1153,12 +1164,12 @@ msgstr "libvirtã¸ã®æŽ¥ç¶šãŒåˆ‡ã‚Œã¦ã„ã¾ã™ã€‚" #: ../nova/virt/libvirt_conn.py:258 #, python-format msgid "instance %(instance_name)s: deleting instance files %(target)s" -msgstr "" +msgstr "インスタンス %(instance_name)s: インスタンスファイル群 %(target)s を削除ã—ã¦ã„ã¾ã™" #: ../nova/virt/libvirt_conn.py:283 #, python-format msgid "Invalid device path %s" -msgstr "" +msgstr "%s ã¯ä¸æ£ãªãƒ‡ãƒã‚¤ã‚¹ãƒ‘スã§ã™" #: ../nova/virt/libvirt_conn.py:313 #, python-format @@ -1216,16 +1227,16 @@ msgstr "デãƒã‚¤ã‚¹ã§ã™ã€‚" #: ../nova/virt/libvirt_conn.py:448 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" -msgstr "" +msgstr "データ: %(data)r, ファイルパス: %(fpath)r" #: ../nova/virt/libvirt_conn.py:456 #, python-format msgid "Contents of file %(fpath)s: %(contents)r" -msgstr "" +msgstr "ファイル %(fpath)s ã®å†…容: %(contents)r" #: ../nova/virt/libvirt_conn.py:489 msgid "Unable to find an open port" -msgstr "" +msgstr "é–‹ã„ãŸãƒãƒ¼ãƒˆãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“" #: ../nova/virt/libvirt_conn.py:563 #, python-format @@ -1235,12 +1246,12 @@ msgstr "インスタンス %s ã®ã‚¤ãƒ¡ãƒ¼ã‚¸ã‚’生æˆã—ã¾ã™ã€‚" #: ../nova/virt/libvirt_conn.py:646 #, python-format msgid "instance %(inst_name)s: injecting key into image %(img_id)s" -msgstr "" +msgstr "インスタンス %(inst_name)s: イメージ %(img_id)s ã«éµæƒ…å ±ã‚’åŸ‹ã‚込んã§ã„ã¾ã™" #: ../nova/virt/libvirt_conn.py:649 #, python-format msgid "instance %(inst_name)s: injecting net into image %(img_id)s" -msgstr "" +msgstr "インスタンス %(inst_name)s: イメージ %(img_id)s ã«ãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯æƒ…å ±ã‚’åŸ‹ã‚込んã§ã„ã¾ã™" #. This could be a windows image, or a vmdk format disk #: ../nova/virt/libvirt_conn.py:657 @@ -1248,7 +1259,7 @@ msgstr "" msgid "" "instance %(inst_name)s: ignoring error injecting data into image %(img_id)s " "(%(e)s)" -msgstr "" +msgstr "インスタンス %(inst_name)s: イメージ %(img_id)s ã¸ã®ãƒ‡ãƒ¼ã‚¿åŸ‹ã‚è¾¼ã¿ã®ã‚¨ãƒ©ãƒ¼ã‚’無視ã—ã¦ã„ã¾ã™ (%(e)s)" #. TODO(termie): cache? #: ../nova/virt/libvirt_conn.py:665 @@ -1263,12 +1274,12 @@ msgstr "インスタンス %s: toXML メソッドを完了。" #: ../nova/virt/libvirt_conn.py:751 msgid "diagnostics are not supported for libvirt" -msgstr "" +msgstr "libvirt ã§ã¯è¨ºæ–(diagnostics)ãŒã‚µãƒãƒ¼ãƒˆã•れã¦ã„ã¾ã›ã‚“" #: ../nova/virt/libvirt_conn.py:1225 #, python-format msgid "Attempted to unfilter instance %s which is not filtered" -msgstr "" +msgstr "フィルタã•れã¦ã„ãªã„インスタンス %s ã®ãƒ•ィルタ解除を試行ã—ã¾ã—ãŸ" #: ../nova/api/ec2/metadatarequesthandler.py:76 #, python-format @@ -1296,7 +1307,7 @@ msgstr "ターゲット %s ã‚’ã‚¢ãƒã‚±ãƒ¼ãƒˆã—ã¾ã—ãŸã€‚" #: ../nova/virt/images.py:70 #, python-format msgid "Finished retreving %(url)s -- placed in %(path)s" -msgstr "" +msgstr "%(url)s ã®å–得を完了ã—ã¾ã—㟠-- %(path)s ã«é…ç½®ã•れã¾ã—ãŸ" #: ../nova/scheduler/driver.py:66 msgid "Must implement a fallback schedule" @@ -1304,21 +1315,21 @@ msgstr "予備ã®(fallback)スケジューラを実装ã™ã‚‹å¿…è¦ãŒã‚りã¾ã #: ../nova/console/manager.py:70 msgid "Adding console" -msgstr "" +msgstr "ã‚³ãƒ³ã‚½ãƒ¼ãƒ«ã‚’è¿½åŠ ã—ã¦ã„ã¾ã™" #: ../nova/console/manager.py:90 #, python-format msgid "Tried to remove non-existant console %(console_id)s." -msgstr "" +msgstr "å˜åœ¨ã—ãªã„コンソール %(console_id)s を削除ã—よã†ã¨ã—ã¾ã—ãŸ" #: ../nova/api/direct.py:149 msgid "not available" -msgstr "" +msgstr "利用ã§ãã¾ã›ã‚“" #: ../nova/api/ec2/cloud.py:62 #, python-format msgid "The key_pair %s already exists" -msgstr "" +msgstr "ã‚ーペア %s ã¯æ—¢ã«å˜åœ¨ã—ã¾ã™" #. TODO(vish): Do this with M2Crypto instead #: ../nova/api/ec2/cloud.py:118 @@ -1352,7 +1363,7 @@ msgstr "Revoke security group ingress: ã‚»ã‚ãƒ¥ãƒªãƒ†ã‚£ã‚°ãƒ«ãƒ¼ãƒ—è¨±å¯ %s ã #: ../nova/api/ec2/cloud.py:430 ../nova/api/ec2/cloud.py:459 msgid "Not enough parameters to build a valid rule." -msgstr "" +msgstr "有効ãªãƒ«ãƒ¼ãƒ«ã‚’作æˆã™ã‚‹ç‚ºã®å分ãªãƒ‘ラメータãŒã‚りã¾ã›ã‚“" #: ../nova/api/ec2/cloud.py:443 msgid "No rule for the specified parameters." @@ -1391,7 +1402,7 @@ msgstr "Create volume: %s GBã®ãƒœãƒªãƒ¥ãƒ¼ãƒ を作æˆã—ã¾ã™ã€‚" #: ../nova/api/ec2/cloud.py:612 #, python-format msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" -msgstr "" +msgstr "ボリューム%(volume_id)s をインスタンス %(instance_id)s ã®ãƒ‡ãƒã‚¤ã‚¹ %(device)s ã«æŽ¥ç¶š" #: ../nova/api/ec2/cloud.py:629 #, python-format @@ -1410,7 +1421,7 @@ msgstr "Release address: アドレス %s を開放ã—ã¾ã™ã€‚" #: ../nova/api/ec2/cloud.py:771 #, python-format msgid "Associate address %(public_ip)s to instance %(instance_id)s" -msgstr "" +msgstr "インスタンス %(instance_id)s ã«ã‚¢ãƒ‰ãƒ¬ã‚¹ %(public_ip)s を割り当ã¦" #: ../nova/api/ec2/cloud.py:780 #, python-format @@ -1434,7 +1445,7 @@ msgstr "De-registering image: イメージ %s を登録解除ã—ã¾ã™ã€‚" #: ../nova/api/ec2/cloud.py:875 #, python-format msgid "Registered image %(image_location)s with id %(image_id)s" -msgstr "" +msgstr "イメージ %(image_location)s ㌠ID %(image_id)s ã§ç™»éŒ²ã•れã¾ã—ãŸ" #: ../nova/api/ec2/cloud.py:882 ../nova/api/ec2/cloud.py:900 #, python-format @@ -1466,12 +1477,12 @@ msgstr "イメージ %s ã®å…¬é–‹è¨å®šã‚’æ›´æ–°ã—ã¾ã™ã€‚" #: ../bin/nova-api.py:52 #, python-format msgid "Using paste.deploy config at: %s" -msgstr "" +msgstr "%s ã«ã‚ã‚‹ paste.deploy è¨å®šã‚’使用ã—ã¦ã„ã¾ã™" #: ../bin/nova-api.py:57 #, python-format msgid "No paste configuration for app: %s" -msgstr "" +msgstr "アプリケーション用 paste è¨å®šãŒã‚りã¾ã›ã‚“: %s" #: ../bin/nova-api.py:59 #, python-format @@ -1479,60 +1490,63 @@ msgid "" "App Config: %(api)s\n" "%(config)r" msgstr "" +"アプリケーションè¨å®š %(api)s\n" +"%(config)r" #: ../bin/nova-api.py:64 #, python-format msgid "Running %s API" -msgstr "" +msgstr "%s API を実行ã—ã¦ã„ã¾ã™" #: ../bin/nova-api.py:69 #, python-format msgid "No known API applications configured in %s." -msgstr "" +msgstr "%s ä¸ã«æ—¢çŸ¥ã® API アプリケーションè¨å®šãŒã‚りã¾ã›ã‚“。" #: ../bin/nova-api.py:83 #, python-format msgid "Starting nova-api node (version %s)" -msgstr "" +msgstr "nova-api ノードを起動ã—ã¦ã„ã¾ã™ (ãƒãƒ¼ã‚¸ãƒ§ãƒ³ %s)" #: ../bin/nova-api.py:89 #, python-format msgid "No paste configuration found for: %s" -msgstr "" +msgstr "%s 用㮠paste è¨å®šãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“" #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:84 #, python-format msgid "Argument %(key)s value %(value)s is too short." -msgstr "" +msgstr "引数 %(key)s ã®å€¤ %(value)s ãŒçŸã™ãŽã¾ã™ã€‚" #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:89 #, python-format msgid "Argument %(key)s value %(value)s contains invalid characters." -msgstr "" +msgstr "引数 %(key)s ã®å€¤ %(value)s ãŒä¸æ£ãªæ–‡å—ã‚’å«ã‚“ã§ã„ã¾ã™ã€‚" #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:94 #, python-format msgid "Argument %(key)s value %(value)s starts with a hyphen." -msgstr "" +msgstr "引数 %(key)s ã®å€¤ %(value)s ãŒãƒã‚¤ãƒ•ン(-)ã‹ã‚‰å§‹ã¾ã£ã¦ã„ã¾ã™ã€‚" #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:102 #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:130 #, python-format msgid "Argument %s is required." -msgstr "" +msgstr "引数 %s ãŒå¿…è¦ã§ã™ã€‚" #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:117 #, python-format msgid "" "Argument %(key)s may not take value %(value)s. Valid values are ['true', " "'false']." -msgstr "" +msgstr "引数 %(key)s ã¯å€¤ %(value)s ãŒè¨±å¯ã•れã¦ã„ã¾ã›ã‚“。有効ãªå€¤ã¯ ['true', 'false'] ã§ã™ã€‚" #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:163 #, python-format msgid "" "Created VDI %(vdi_ref)s (%(label)s, %(size)s, %(read_only)s) on %(sr_ref)s." msgstr "" +"%(sr_ref)s 上㫠VDI %(vdi_ref)s (%(label)s, %(size)s, %(read_only)s) を作æˆã—ã¾ã—ãŸã€‚" #: ../nova/virt/xenapi/vmops.py:67 #, python-format @@ -1542,7 +1556,7 @@ msgstr "ユニークã§ã¯ãªã„name %s を作æˆã—よã†ã¨ã—ã¾ã—ãŸã€‚" #: ../nova/virt/xenapi/vmops.py:73 #, python-format msgid "instance %(name)s: not enough free memory" -msgstr "" +msgstr "インスタンス %(name)s: å分ãªç©ºãメモリãŒã‚りã¾ã›ã‚“" #: ../nova/virt/xenapi/vmops.py:148 #, python-format @@ -1552,17 +1566,17 @@ msgstr "VM %s ã‚’é–‹å§‹ã—ã¾ã™â€¦" #: ../nova/virt/xenapi/vmops.py:151 #, python-format msgid "Spawning VM %(instance_name)s created %(vm_ref)s." -msgstr "" +msgstr "%(vm_ref)s ã‹ã‚‰ä½œæˆã•れ㟠VM %(instance_name)s ã®å®Ÿè¡ŒãŒç¹°ã‚Šã‹ãˆã•れã¦ã„ã¾ã™ã€‚" #: ../nova/virt/xenapi/vmops.py:162 #, python-format msgid "Invalid value for onset_files: '%s'" -msgstr "" +msgstr "onset_files 用ã®ä¸æ£ãªå€¤: '%s'" #: ../nova/virt/xenapi/vmops.py:167 #, python-format msgid "Injecting file path: '%s'" -msgstr "" +msgstr "ファイルパス '%s' を埋ã‚込んã§ã„ã¾ã™" #: ../nova/virt/xenapi/vmops.py:180 #, python-format @@ -1584,7 +1598,7 @@ msgstr "VM %s ã«å¯¾ã™ã‚‹ã‚¹ãƒŠãƒƒãƒ—ショットを開始ã—ã¾ã™ã€‚" #: ../nova/virt/xenapi/vmops.py:269 #, python-format msgid "Unable to Snapshot %(vm_ref)s: %(exc)s" -msgstr "" +msgstr "%(vm_ref)s ã®ã‚¹ãƒŠãƒƒãƒ—ショットãŒä½œæˆå‡ºæ¥ã¾ã›ã‚“: %(exc)s" #: ../nova/virt/xenapi/vmops.py:280 #, python-format @@ -1594,15 +1608,15 @@ msgstr "VM %s ã®ã‚¹ãƒŠãƒƒãƒ—ショットã¨ã‚¢ãƒƒãƒ—ãƒãƒ¼ãƒ‰ãŒå®Œäº†ã—ã¾ã— #: ../nova/virt/xenapi/vmops.py:356 #, python-format msgid "VM %(vm)s already halted, skipping shutdown..." -msgstr "" +msgstr "VM %(vm)s ã¯æ—¢ã«åœæ¢ã—ã¦ã„ã¾ã™ã®ã§ã€ã‚·ãƒ£ãƒƒãƒˆãƒ€ã‚¦ãƒ³ã‚’çœç•¥ã—ã¾ã™â€¦" #: ../nova/virt/xenapi/vmops.py:389 msgid "Removing kernel/ramdisk files" -msgstr "" +msgstr "カーãƒãƒ«/RAMディスクファイルを削除ã—ã¦ã„ã¾ã™" #: ../nova/virt/xenapi/vmops.py:399 msgid "kernel/ramdisk files removed" -msgstr "" +msgstr "カーãƒãƒ«/RAMディスクファイルãŒå‰Šé™¤ã•れã¾ã—ãŸ" #: ../nova/virt/xenapi/vmops.py:561 #, python-format @@ -1610,6 +1624,7 @@ msgid "" "TIMEOUT: The call to %(method)s timed out. VM id=%(instance_id)s; " "args=%(strargs)s" msgstr "" +"タイムアウト: %(method)s ã®å‘¼ã³å‡ºã—ãŒã‚¿ã‚¤ãƒ アウトã—ã¾ã—ãŸã€‚VM id=%(instance_id)s; 引数=%(strargs)s" #: ../nova/virt/xenapi/vmops.py:564 #, python-format @@ -1617,6 +1632,7 @@ msgid "" "NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. VM " "id=%(instance_id)s; args=%(strargs)s" msgstr "" +"未実装: %(method)s ã®å‘¼ã³å‡ºã—ã¯ã‚¨ãƒ¼ã‚¸ã‚§ãƒ³ãƒˆã«å®Ÿè£…ã•れã¦ã„ã¾ã›ã‚“。VM id=%(instance_id)s; 引数=%(strargs)s" #: ../nova/virt/xenapi/vmops.py:569 #, python-format @@ -1624,11 +1640,12 @@ msgid "" "The call to %(method)s returned an error: %(e)s. VM id=%(instance_id)s; " "args=%(strargs)s" msgstr "" +"%(method)s 呼ã³å‡ºã—ãŒã‚¨ãƒ©ãƒ¼ã‚’è¿”ã—ã¾ã—ãŸ: %(e)s. VM id=%(instance_id)s; 引数=%(strargs)s" #: ../nova/virt/xenapi/vmops.py:760 #, python-format msgid "OpenSSL error: %s" -msgstr "" +msgstr "OpenSSL エラー: %s" #: ../nova/tests/test_compute.py:148 #, python-format @@ -1659,7 +1676,7 @@ msgstr "%s 用ã®VPNã‚’èµ·å‹•ã—ã¾ã™ã€‚" #: ../nova/db/sqlalchemy/migration.py:35 msgid "python-migrate is not installed. Exiting." -msgstr "" +msgstr "python-migrate ãŒã‚¤ãƒ³ã‚¹ãƒˆãƒ¼ãƒ«ã•れã¦ã„ã¾ã›ã‚“。終了ã—ã¾ã™ã€‚" #: ../nova/image/s3.py:99 #, python-format @@ -1676,6 +1693,7 @@ msgid "" "Access key %(access_key)s has had %(failures)d failed authentications and " "will be locked out for %(lock_mins)d minutes." msgstr "" +"アクセスã‚ー %(access_key)s 㯠%(failures)d 回èªè¨¼ã«å¤±æ•—ã—ã¾ã—ãŸã®ã§ã€%(lock_mins)d 分間ãƒãƒƒã‚¯ã—ã¾ã™ã€‚" #: ../nova/api/ec2/__init__.py:169 ../nova/objectstore/handler.py:140 #, python-format @@ -1685,7 +1703,7 @@ msgstr "%s ã®èªè¨¼ã«å¤±æ•—ã—ã¾ã—ãŸã€‚" #: ../nova/api/ec2/__init__.py:182 #, python-format msgid "Authenticated Request For %(uname)s:%(pname)s)" -msgstr "" +msgstr "%(uname)s 用ã®èªè¨¼ãƒªã‚¯ã‚¨ã‚¹ãƒˆ:%(pname)s)" #: ../nova/api/ec2/__init__.py:207 #, python-format @@ -1695,23 +1713,23 @@ msgstr "アクション(action): %s" #: ../nova/api/ec2/__init__.py:209 #, python-format msgid "arg: %(key)s\t\tval: %(value)s" -msgstr "" +msgstr "引数: %(key)s\t\t値: %(value)s" #: ../nova/api/ec2/__init__.py:281 #, python-format msgid "" "Unauthorized request for controller=%(controller)s and action=%(action)s" -msgstr "" +msgstr "コントãƒãƒ¼ãƒ©=%(controller)s ã¨ã‚¢ã‚¯ã‚·ãƒ§ãƒ³=%(action)s 用ã®è¨±å¯ã•れã¦ã„ãªã„リクエスト" #: ../nova/api/ec2/__init__.py:314 #, python-format msgid "InstanceNotFound raised: %s" -msgstr "" +msgstr "InstanceNotFound ãŒç™ºè¡Œã•れã¾ã—ãŸ: %s" #: ../nova/api/ec2/__init__.py:320 #, python-format msgid "VolumeNotFound raised: %s" -msgstr "" +msgstr "VolumeNotFound ãŒç™ºè¡Œã•れã¾ã—ãŸ: %s" #: ../nova/api/ec2/__init__.py:326 #, python-format @@ -1778,12 +1796,12 @@ msgstr "" #: ../nova/virt/xenapi_conn.py:311 #, python-format msgid "Task [%(name)s] %(task)s status: success %(result)s" -msgstr "" +msgstr "タスク [%(name)s] %(task)s 状態: æˆåŠŸ %(result)s" #: ../nova/virt/xenapi_conn.py:317 #, python-format msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" -msgstr "" +msgstr "タスク [%(name)s] %(task)s 状態: %(status)s %(error_info)s" #: ../nova/virt/xenapi_conn.py:331 ../nova/virt/xenapi_conn.py:344 #, python-format @@ -1802,12 +1820,12 @@ msgstr "æ›´æ–°ã®æœ€ä¸ã«äºˆæœŸã—ãªã„エラーãŒç™ºç”Ÿã—ã¾ã—ãŸã€‚" #: ../nova/compute/monitor.py:356 #, python-format msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "" +msgstr "\"%(iid)s\" 上㮠\"%(disk)s\" 用ã®ãƒ–ãƒãƒƒã‚¯çµ±è¨ˆ(blockstats)ãŒå–å¾—ã§ãã¾ã›ã‚“" #: ../nova/compute/monitor.py:379 #, python-format msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "" +msgstr "\"%(iid)s\" 上㮠%(interface)s\" 用インターフェース統計(ifstats)ãŒå–å¾—ã§ãã¾ã›ã‚“" #: ../nova/compute/monitor.py:414 msgid "unexpected exception getting connection" @@ -1821,13 +1839,13 @@ msgstr "インスタンス %s ãŒè¦‹ã¤ã‹ã‚Šã¾ã—ãŸã€‚" #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" -msgstr "" +msgstr "ボリューム%s 用㮠iSCSI エクスãƒãƒ¼ãƒˆãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“" #: ../nova/api/ec2/apirequest.py:100 #, python-format msgid "" "Unsupported API request: controller = %(controller)s, action = %(action)s" -msgstr "" +msgstr "未サãƒãƒ¼ãƒˆã® API リクエスト: コントãƒãƒ¼ãƒ© = %(controller)s, アクション = %(action)s" #: ../nova/api/openstack/__init__.py:55 #, python-format @@ -1840,33 +1858,33 @@ msgstr "管ç†ç”¨ã‚ªãƒšãƒ¬ãƒ¼ã‚·ãƒ§ãƒ³ï¼ˆadmin operation)ã‚’APIã«ç™»éŒ²ã—ã¾ã #: ../nova/console/xvp.py:99 msgid "Rebuilding xvp conf" -msgstr "" +msgstr "xvp è¨å®šã‚’冿§‹ç¯‰ã—ã¦ã„ã¾ã™" #: ../nova/console/xvp.py:116 #, python-format msgid "Re-wrote %s" -msgstr "" +msgstr "%s ã‚’å†åº¦æ›¸ãè¾¼ã¿ã¾ã—ãŸ" #: ../nova/console/xvp.py:121 msgid "Stopping xvp" -msgstr "" +msgstr "xvp ã‚’åœæ¢ã—ã¦ã„ã¾ã™" #: ../nova/console/xvp.py:134 msgid "Starting xvp" -msgstr "" +msgstr "xvp ã‚’é–‹å§‹ã—ã¦ã„ã¾ã™" #: ../nova/console/xvp.py:141 #, python-format msgid "Error starting xvp: %s" -msgstr "" +msgstr "xvp ã®é–‹å§‹ä¸ã«ã‚¨ãƒ©ãƒ¼: %s" #: ../nova/console/xvp.py:144 msgid "Restarting xvp" -msgstr "" +msgstr "xvp ã‚’å†èµ·å‹•ã—ã¦ã„ã¾ã™" #: ../nova/console/xvp.py:146 msgid "xvp not running..." -msgstr "" +msgstr "xvp ãŒå®Ÿè¡Œã•れã¦ã„ã¾ã›ã‚“…" #: ../bin/nova-manage.py:272 msgid "" @@ -1874,6 +1892,8 @@ msgid "" "Please create a database using nova-manage sync db before running this " "command." msgstr "" +"上記ã®ã‚¨ãƒ©ãƒ¼ã¯ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ãŒä½œæˆã•れã¦ã„ãªã„為ã«è¡¨ç¤ºã•れãŸã®ã‹ã‚‚知れã¾ã›ã‚“。\n" +"ã“ã®ã‚³ãƒžãƒ³ãƒ‰ã‚’実行ã™ã‚‹å‰ã«ã€nova-manage db sync を使用ã—ã¦ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ã‚’作æˆã—ã¦ä¸‹ã•ã„。" #: ../bin/nova-manage.py:426 msgid "" @@ -1883,40 +1903,47 @@ msgid "" " nova-manage network create 10.0.0.0/8 10 64\n" "\n" msgstr "" +"ã“れ以上利用å¯èƒ½ãªãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯ãŒã‚りã¾ã›ã‚“。ã“ã‚ŒãŒæ–°è¦ã®ã‚¤ãƒ³ã‚¹ãƒˆãƒ¼ãƒ«ã§ã‚れã°ã€\n" +"下記ã®ã‚ˆã†ãªã‚³ãƒžãƒ³ãƒ‰ã‚’実行ã™ã‚‹å¿…è¦ãŒã‚りã¾ã™:\n" +"\n" +" nova-manage network create 10.0.0.0/8 10 64\n" +"\n" #: ../bin/nova-manage.py:431 msgid "" "The above error may show that the certificate db has not been created.\n" "Please create a database by running a nova-api server on this host." msgstr "" +"上記ã®ã‚¨ãƒ©ãƒ¼ã¯èªè¨¼ DB ãŒä½œæˆã•れã¦ã„ãªã„為ã«è¡¨ç¤ºã•れるã®ã‹ã‚‚知れã¾ã›ã‚“。\n" +"ã“ã®ãƒ›ã‚¹ãƒˆä¸Šã§ nova-api を実行ã—ã¦ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ã‚’作æˆã—ã¦ä¸‹ã•ã„。" #: ../bin/nova-manage.py:447 ../bin/nova-manage.py:536 msgid "network" -msgstr "" +msgstr "ãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯" #: ../bin/nova-manage.py:448 msgid "IP address" -msgstr "" +msgstr "IPアドレス" #: ../bin/nova-manage.py:449 msgid "MAC address" -msgstr "" +msgstr "MAC アドレス" #: ../bin/nova-manage.py:450 msgid "hostname" -msgstr "" +msgstr "ホストå" #: ../bin/nova-manage.py:451 msgid "host" -msgstr "" +msgstr "ホスト" #: ../bin/nova-manage.py:537 msgid "netmask" -msgstr "" +msgstr "ãƒãƒƒãƒˆãƒžã‚¹ã‚¯" #: ../bin/nova-manage.py:538 msgid "start address" -msgstr "" +msgstr "開始アドレス" #: ../nova/virt/disk.py:69 #, python-format @@ -1931,7 +1958,7 @@ msgstr "ファイルシステム%s ã®ãƒžã‚¦ãƒ³ãƒˆã«å¤±æ•—ã—ã¾ã—ãŸã€‚" #: ../nova/virt/disk.py:124 #, python-format msgid "nbd device %s did not show up" -msgstr "" +msgstr "nbd デãƒã‚¤ã‚¹ %s ãŒå‡ºç¾ã—ã¾ã›ã‚“" #: ../nova/virt/disk.py:128 #, python-format @@ -1940,12 +1967,12 @@ msgstr "イメージをループãƒãƒƒã‚¯ %s ã«ã‚¢ã‚¿ãƒƒãƒã§ãã¾ã›ã‚“。" #: ../nova/virt/disk.py:151 msgid "No free nbd devices" -msgstr "" +msgstr "空ãã® nbd デãƒã‚¤ã‚¹ãŒã‚りã¾ã›ã‚“" #: ../doc/ext/nova_todo.py:46 #, python-format msgid "%(filename)s, line %(line_info)d" -msgstr "" +msgstr "%(filename)s, %(line_info)d 行目" #. FIXME(chiradeep): implement this #: ../nova/virt/hyperv.py:118 @@ -1990,7 +2017,7 @@ msgstr "vm %s ã®vcpus ã‚’è¨å®šã—ã¾ã™ã€‚" #: ../nova/virt/hyperv.py:202 #, python-format msgid "Creating disk for %(vm_name)s by attaching disk file %(vhdfile)s" -msgstr "" +msgstr "ディスクファイル %(vhdfile)s 接続ã«ã‚ˆã‚Šã€%(vm_name)s 用ディスクを作æˆã—ã¦ã„ã¾ã™" #: ../nova/virt/hyperv.py:227 #, python-format @@ -2029,7 +2056,7 @@ msgstr "ãƒãƒ¼ãƒˆ %s ã®ä½œæˆã«å¤±æ•—ã—ã¾ã—ãŸã€‚" #: ../nova/virt/hyperv.py:276 #, python-format msgid "Created switch port %(vm_name)s on switch %(ext_path)s" -msgstr "" +msgstr "スイッム%(ext_path)s 上ã®ã‚¹ã‚¤ãƒƒãƒãƒãƒ¼ãƒˆ %(vm_name)s ãŒä½œæˆã•れã¾ã—ãŸ" #: ../nova/virt/hyperv.py:286 #, python-format @@ -2049,7 +2076,7 @@ msgstr "WMIジョブã«å¤±æ•—ã—ã¾ã—ãŸ: %s" #: ../nova/virt/hyperv.py:325 #, python-format msgid "WMI job succeeded: %(desc)s, Elapsed=%(elap)s " -msgstr "" +msgstr "WMI ジョブæˆåŠŸ: %(desc)s, çµŒéŽæ™‚é–“=%(elap)s " #: ../nova/virt/hyperv.py:361 #, python-format @@ -2064,7 +2091,7 @@ msgstr "vm %s ã®å‰Šé™¤ã«å¤±æ•—ã—ã¾ã—ãŸã€‚" #: ../nova/virt/hyperv.py:393 #, python-format msgid "Del: disk %(vhdfile)s vm %(instance_name)s" -msgstr "" +msgstr "削除: ディスク %(vhdfile)s VM %(instance_name)s" #: ../nova/virt/hyperv.py:415 #, python-format @@ -2072,16 +2099,20 @@ msgid "" "Got Info for vm %(instance_id)s: state=%(state)s, mem=%(memusage)s, " "num_cpu=%(numprocs)s, cpu_time=%(uptime)s" msgstr "" +"VM %(instance_id)s ç”¨æƒ…å ±å–å¾—: 状態=%(state)s, メモリ=%(memusage)s, CPUæ•°=%(numprocs)s, " +"CPU時間=%(uptime)s" #: ../nova/virt/hyperv.py:451 #, python-format msgid "Successfully changed vm state of %(vm_name)s to %(req_state)s" -msgstr "" +msgstr "VM ã®çŠ¶æ…‹ãŒ %(vm_name)s ã‹ã‚‰ %(req_state)s ã«ç„¡äº‹å¤‰æ›´ã•れã¾ã—ãŸ" #: ../nova/virt/hyperv.py:454 #, python-format msgid "Failed to change vm state of %(vm_name)s to %(req_state)s" msgstr "" +"Failed to change vm state of \r\n" +"%(vm_name)s ã‹ã‚‰ %(req_state)s ã¸ã®VM ã®çŠ¶æ…‹å¤‰æ›´ã«å¤±æ•—ã—ã¾ã—ãŸ" #: ../nova/compute/api.py:71 #, python-format @@ -2096,7 +2127,7 @@ msgstr "インスタンス %d ã«ãƒ›ã‚¹ãƒˆãŒç™»éŒ²ã•れã¦ã„ã¾ã›ã‚“。" #: ../nova/compute/api.py:97 #, python-format msgid "Quota exceeeded for %(pid)s, tried to run %(min_count)s instances" -msgstr "" +msgstr "%(pid)s 用ã®åˆ¶é™(Quota)è¶…éŽã€%(min_count)s 個ã®ã‚¤ãƒ³ã‚¹ã‚¿ãƒ³ã‚¹ã‚’実行ã—よã†ã¨ã—ã¾ã—ãŸ" #: ../nova/compute/api.py:99 #, python-format @@ -2117,11 +2148,14 @@ msgstr "%s 個ã®ã‚¤ãƒ³ã‚¹ã‚¿ãƒ³ã‚¹ã®èµ·å‹•ã‚’å§‹ã‚ã¾ã™â€¦" #, python-format msgid "Casting to scheduler for %(pid)s/%(uid)s's instance %(instance_id)s" msgstr "" +"Casting to scheduler for \r\n" +"\r\n" +"%(pid)s/%(uid)s ã®ã‚¤ãƒ³ã‚¹ã‚¿ãƒ³ã‚¹ %(instance_id)s 用ã®ã‚¹ã‚±ã‚¸ãƒ¥ãƒ¼ãƒ©ã‚’割り当ã¦" #: ../nova/compute/api.py:292 #, python-format msgid "Going to try to terminate %s" -msgstr "" +msgstr "%s ã‚’åœæ¢ã—よã†ã¨ã—ã¦ã„ã¾ã™" #: ../nova/compute/api.py:296 #, python-format @@ -2148,6 +2182,8 @@ msgid "" "AMQP server on %(fl_host)s:%(fl_port)d is unreachable. Trying again in " "%(fl_intv)d seconds." msgstr "" +"%(fl_host)s 上㮠AMQP サーãƒ:%(fl_port)d ãŒåˆ°é”ä¸èƒ½(unreachable)ã§ã™ã€‚%(fl_intv)d " +"秒後ã«å†è©¦è¡Œã—ã¾ã™ã€‚" #: ../nova/rpc.py:103 #, python-format @@ -2207,7 +2243,7 @@ msgstr "MSG_ID㯠%s ã§ã™ã€‚" #: ../nova/rpc.py:354 msgid "Making asynchronous cast..." -msgstr "" +msgstr "éžåŒæœŸã®å‰²ã‚Šå½“ã¦ã‚’作æˆã—ã¦ã„ã¾ã™â€¦" #: ../nova/rpc.py:364 #, python-format @@ -2241,11 +2277,11 @@ msgstr "å½ã®AOE: %s" #: ../nova/volume/driver.py:233 msgid "Skipping ensure_export. No iscsi_target " -msgstr "" +msgstr "ensure_export ã‚’çœç•¥ã—ã¦ã„ã¾ã™ã€‚iscsi_target ãŒã‚りã¾ã›ã‚“。 " #: ../nova/volume/driver.py:279 ../nova/volume/driver.py:288 msgid "Skipping remove_export. No iscsi_target " -msgstr "" +msgstr "remove_export ã‚’çœç•¥ã—ã¦ã„ã¾ã™ã€‚iscsi_target ãŒã‚りã¾ã›ã‚“ " #: ../nova/volume/driver.py:347 #, python-format @@ -2255,41 +2291,41 @@ msgstr "å½ã®ISCSI: %s" #: ../nova/volume/driver.py:359 #, python-format msgid "rbd has no pool %s" -msgstr "" +msgstr "rbd ã«ãƒ—ール %s ãŒã‚りã¾ã›ã‚“。" #: ../nova/volume/driver.py:414 #, python-format msgid "Sheepdog is not working: %s" -msgstr "" +msgstr "Sheepdog ãŒå‹•作ã—ã¦ã„ã¾ã›ã‚“: %s" #: ../nova/volume/driver.py:416 msgid "Sheepdog is not working" -msgstr "" +msgstr "Sheepdog ãŒæ©Ÿèƒ½ã—ã¦ã„ã¾ã›ã‚“" #: ../nova/wsgi.py:68 #, python-format msgid "Starting %(arg0)s on %(host)s:%(port)s" -msgstr "" +msgstr "%(host)s:%(port)s 上㧠%(arg0)s ã‚’é–‹å§‹ã—ã¦ã„ã¾ã™" #: ../nova/wsgi.py:147 msgid "You must implement __call__" -msgstr "" +msgstr "__call__ を実装ã—ãªã‘れã°ãªã‚Šã¾ã›ã‚“" #: ../bin/nova-instancemonitor.py:55 msgid "Starting instance monitor" -msgstr "" +msgstr "インスタンスモニタを開始ã—ã¦ã„ã¾ã™" #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" -msgstr "" +msgstr "IP アドレスをリースã—ã¾ã—ãŸ" #: ../bin/nova-dhcpbridge.py:73 msgid "Adopted old lease or got a change of mac/hostname" -msgstr "" +msgstr "以å‰ã®ãƒªãƒ¼ã‚¹ã‚’採用ã—ãŸã‹ã€MAC/ホストåã®å¤‰æ›´ã‚’å–å¾—ã—ã¾ã—ãŸ" #: ../bin/nova-dhcpbridge.py:80 msgid "releasing ip" -msgstr "" +msgstr "IP アドレスを開放ã—ã¦ã„ã¾ã™" #: ../bin/nova-dhcpbridge.py:123 #, python-format @@ -2297,6 +2333,8 @@ msgid "" "Called %(action)s for mac %(mac)s with ip %(ip)s and hostname %(hostname)s " "on interface %(interface)s" msgstr "" +"インターフェース %(interface)s 上㫠IP アドレス %(ip)sã€ãƒ›ã‚¹ãƒˆå %(hostname)s ã‚’æŒã£ãŸ MAC アドレス " +"%(mac)s 用㮠%(action)s を呼ã³å‡ºã—ã¾ã—ãŸ" #: ../nova/virt/fake.py:239 #, python-format @@ -2325,7 +2363,7 @@ msgstr "IP %s ãŒãƒªãƒ¼ã‚¹ã•れã¾ã—ãŸãŒé–¢é€£ä»˜ã‘られã¦ã„ã¾ã›ã‚“。 #: ../nova/network/manager.py:220 #, python-format msgid "IP %(address)s leased to bad mac %(inst_addr)s vs %(mac)s" -msgstr "" +msgstr "IP アドレス %(address)s ãŒä¸æ£ãª MAC アドレス %(inst_addr)s 対 %(mac)s ã«å‰²ã‚Šå½“ã¦ã‚‰ã‚Œã¾ã—ãŸ" #: ../nova/network/manager.py:228 #, python-format @@ -2335,7 +2373,7 @@ msgstr "æ—¢ã«å‰²å½“解除ã—ã¦ã„ã‚‹IP %s ãŒãƒªãƒ¼ã‚¹ã•れã¾ã—ãŸã€‚" #: ../nova/network/manager.py:233 #, python-format msgid "Releasing IP %s" -msgstr "" +msgstr "IP アドレス %s を開放ã—ã¦ã„ã¾ã™" #: ../nova/network/manager.py:237 #, python-format @@ -2345,7 +2383,7 @@ msgstr "割り当ã¦ã¦ã„ãªã„IP %s ãŒé–‹æ”¾ã•れã¾ã—ãŸã€‚" #: ../nova/network/manager.py:241 #, python-format msgid "IP %(address)s released from bad mac %(inst_addr)s vs %(mac)s" -msgstr "" +msgstr "䏿£ãª MAC アドレス %(inst_addr)s 対 %(mac)s ã‹ã‚‰ IP アドレス %(address)s ãŒé–‹æ”¾ã•れã¾ã—ãŸ" #: ../nova/network/manager.py:244 #, python-format @@ -2356,7 +2394,7 @@ msgstr "リースã—ã¦ã„ãªã„IP %s ãŒé–‹æ”¾ã•れã¾ã—ãŸã€‚" msgid "" "The sum between the number of networks and the vlan start cannot be greater " "than 4094" -msgstr "" +msgstr "ãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯ã®æ•°ã¨VLANã®é–‹å§‹ç•ªå·ã®å’Œã¯ 4094 より大ããã§ãã¾ã›ã‚“。" #: ../nova/virt/xenapi/volume_utils.py:57 #, python-format @@ -2366,7 +2404,7 @@ msgstr "%s ã‚’ introduce ã—ã¾ã™â€¦" #: ../nova/virt/xenapi/volume_utils.py:74 #, python-format msgid "Introduced %(label)s as %(sr_ref)s." -msgstr "" +msgstr "%(sr_ref)s ã¨ã—㦠%(label)s ã‚’å°Žå…¥ã—ã¾ã—ãŸ" #: ../nova/virt/xenapi/volume_utils.py:78 msgid "Unable to create Storage Repository" @@ -2385,12 +2423,12 @@ msgstr "SR %s ã‚’forgetã—ã¾ã™ã€‚ " #: ../nova/virt/xenapi/volume_utils.py:101 #, python-format msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" -msgstr "" +msgstr "%(sr_ref)s 用ã®ç‰©ç†ãƒ–ãƒãƒƒã‚¯ãƒ‡ãƒã‚¤ã‚¹(PBD)å–得時ã«ä¾‹å¤– %(exc)s を無視ã—ã¦ã„ã¾ã™" #: ../nova/virt/xenapi/volume_utils.py:107 #, python-format msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" -msgstr "" +msgstr "物ç†ãƒ–ãƒãƒƒã‚¯ãƒ‡ãƒã‚¤ã‚¹(PBD) %(pbd)s ã®å–ã‚Šå¤–ã—æ™‚ã«ä¾‹å¤– %(exc)s を無視ã—ã¦ã„ã¾ã™" #: ../nova/virt/xenapi/volume_utils.py:111 #, python-format @@ -2400,7 +2438,7 @@ msgstr "SR %s ã®forgetãŒå®Œäº†ã€‚" #: ../nova/virt/xenapi/volume_utils.py:113 #, python-format msgid "Ignoring exception %(exc)s when forgetting SR %(sr_ref)s" -msgstr "" +msgstr "SR %(sr_ref)s ã®ç™»éŒ²å‰Šé™¤æ™‚ã«ä¾‹å¤– %(exc)s を無視ã—ã¦ã„ã¾ã™" #: ../nova/virt/xenapi/volume_utils.py:123 #, python-format @@ -2420,7 +2458,7 @@ msgstr "SR %s ã®VDIã‚’introduceã§ãã¾ã›ã‚“。" #: ../nova/virt/xenapi/volume_utils.py:175 #, python-format msgid "Unable to obtain target information %(device_path)s, %(mountpoint)s" -msgstr "" +msgstr "ã‚¿ãƒ¼ã‚²ãƒƒãƒˆæƒ…å ± %(device_path)s, %(mountpoint)s ã‚’å–å¾—ã§ãã¾ã›ã‚“" #: ../nova/virt/xenapi/volume_utils.py:197 #, python-format @@ -2430,17 +2468,17 @@ msgstr "マウントãƒã‚¤ãƒ³ãƒˆã‚’変æ›ã§ãã¾ã›ã‚“。 %s" #: ../nova/objectstore/image.py:262 #, python-format msgid "Failed to decrypt private key: %s" -msgstr "" +msgstr "プライベートã‚ーã®å¾©å·ã«å¤±æ•—ã—ã¾ã—ãŸ: %s" #: ../nova/objectstore/image.py:269 #, python-format msgid "Failed to decrypt initialization vector: %s" -msgstr "" +msgstr "åˆæœŸåŒ–ベクタã®å¾©å·ã«å¤±æ•—ã—ã¾ã—ãŸ: %s" #: ../nova/objectstore/image.py:277 #, python-format msgid "Failed to decrypt image file %(image_file)s: %(err)s" -msgstr "" +msgstr "イメージファイル %(image_file)s ã®å¾©å·ã«å¤±æ•—ã—ã¾ã—ãŸ: %(err)s" #: ../nova/objectstore/handler.py:106 #, python-format @@ -2483,32 +2521,32 @@ msgstr "Unauthorized attempt to delete bucket: ãƒã‚±ãƒƒãƒˆ %s ã«å¯¾ã™ã‚‹å‰Šé™ #: ../nova/objectstore/handler.py:273 #, python-format msgid "Getting object: %(bname)s / %(nm)s" -msgstr "" +msgstr "オブジェクトをå–å¾—ã—ã¦ã„ã¾ã™: %(bname)s / %(nm)s" #: ../nova/objectstore/handler.py:276 #, python-format msgid "Unauthorized attempt to get object %(nm)s from bucket %(bname)s" -msgstr "" +msgstr "ãƒã‚±ãƒƒãƒˆ %(bname)s ã‹ã‚‰ã®ã‚ªãƒ–ジェクト %(nm)s å–å¾—ã¯è¨±å¯ã•れã¦ã„ã¾ã›ã‚“" #: ../nova/objectstore/handler.py:296 #, python-format msgid "Putting object: %(bname)s / %(nm)s" -msgstr "" +msgstr "オブジェクトをアップãƒãƒ¼ãƒ‰ã—ã¦ã„ã¾ã™: %(bname)s / %(nm)s" #: ../nova/objectstore/handler.py:299 #, python-format msgid "Unauthorized attempt to upload object %(nm)s to bucket %(bname)s" -msgstr "" +msgstr "ãƒã‚±ãƒƒãƒˆ %(bname)s ã¸ã®ã‚ªãƒ–ジェクト %(nm)s ã®ã‚¢ãƒƒãƒ—ãƒãƒ¼ãƒ‰ã¯è¨±å¯ã•れã¦ã„ã¾ã›ã‚“" #: ../nova/objectstore/handler.py:318 #, python-format msgid "Deleting object: %(bname)s / %(nm)s" -msgstr "" +msgstr "オブジェクトを削除ã—ã¦ã„ã¾ã™: %(bname)s / %(nm)s" #: ../nova/objectstore/handler.py:322 #, python-format msgid "Unauthorized attempt to delete object %(nm)s from bucket %(bname)s" -msgstr "" +msgstr "ãƒã‚±ãƒƒãƒˆ %(bname)s 上ã®ã‚ªãƒ–ジェクト %(nm)s ã®å‰Šé™¤ã¯è¨±å¯ã•れã¦ã„ã¾ã›ã‚“" #: ../nova/objectstore/handler.py:396 #, python-format @@ -2535,7 +2573,7 @@ msgstr "Not authorized to update attributes: イメージ %s ã®ã‚¢ãƒˆãƒªãƒ“ュã #: ../nova/objectstore/handler.py:431 #, python-format msgid "Toggling publicity flag of image %(image_id)s %(newstatus)r" -msgstr "" +msgstr "イメージ %(image_id)s %(newstatus)r ã®å…¬é–‹ãƒ•ラグを切り替ãˆã¦ã„ã¾ã™" #. other attributes imply update #: ../nova/objectstore/handler.py:436 @@ -2576,7 +2614,7 @@ msgstr "ユーザå (%s) をプãƒã‚¸ã‚§ã‚¯ãƒˆåã¨ã—ã¦ä½¿ç”¨ã—ã¾ã™ã€‚" #: ../nova/auth/manager.py:277 #, python-format msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" -msgstr "" +msgstr "許å¯ã•れã¾ã›ã‚“: %(pjid)s ã¨ã„ã†åç§°ã®ãƒ—ãƒã‚¸ã‚§ã‚¯ãƒˆã¯ã‚りã¾ã›ã‚“ (ユーザ=%(uname)s)" #: ../nova/auth/manager.py:279 #, python-format @@ -2588,12 +2626,12 @@ msgstr "プãƒã‚¸ã‚§ã‚¯ãƒˆ %s ã¯è¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—ãŸã€‚" msgid "" "Failed authorization: user %(uname)s not admin and not member of project " "%(pjname)s" -msgstr "" +msgstr "許å¯ã•れã¾ã›ã‚“: ユーザ %(uname)s ã¯ç®¡ç†è€…ã§ã‚‚プãƒã‚¸ã‚§ã‚¯ãƒˆ %(pjname)s ã®ãƒ¡ãƒ³ãƒã§ã‚‚ã‚りã¾ã›ã‚“。" #: ../nova/auth/manager.py:289 #, python-format msgid "User %(uid)s is not a member of project %(pjid)s" -msgstr "" +msgstr "ユーザ %(uid)s ã¯ãƒ—ãƒã‚¸ã‚§ã‚¯ãƒˆ %(pjid)s ã®ãƒ¡ãƒ³ãƒã§ã¯ã‚りã¾ã›ã‚“。" #: ../nova/auth/manager.py:298 ../nova/auth/manager.py:309 #, python-format @@ -2621,27 +2659,27 @@ msgstr "ãƒãƒ¼ãƒ« %s ã¯ã‚°ãƒãƒ¼ãƒãƒ«ã§ã®ã¿ä½¿ç”¨å¯èƒ½ã§ã™ã€‚" #: ../nova/auth/manager.py:420 #, python-format msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" -msgstr "" +msgstr "プãƒã‚¸ã‚§ã‚¯ãƒˆ %(pid)s ã®ãƒ¦ãƒ¼ã‚¶ %(uid)s ã«ãƒãƒ¼ãƒ« %(role)s を付与ã—ã¾ã™ã€‚" #: ../nova/auth/manager.py:423 #, python-format msgid "Adding sitewide role %(role)s to user %(uid)s" -msgstr "" +msgstr "サイト共通ã®ãƒãƒ¼ãƒ« %(role)s をユーザ %(uid)s ã«ä»˜ä¸Žã—ã¾ã™ã€‚" #: ../nova/auth/manager.py:448 #, python-format msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" -msgstr "" +msgstr "プãƒã‚¸ã‚§ã‚¯ãƒˆ %(pid)s ã®ãƒ¦ãƒ¼ã‚¶ %(uid)s ã‹ã‚‰ãƒãƒ¼ãƒ« %(role)s を削除ã—ã¾ã™ã€‚" #: ../nova/auth/manager.py:451 #, python-format msgid "Removing sitewide role %(role)s from user %(uid)s" -msgstr "" +msgstr "ユーザ %(uid)s ã‹ã‚‰ã‚µã‚¤ãƒˆå…±é€šã®ãƒãƒ¼ãƒ« %(role)s を削除ã—ã¾ã™ã€‚" #: ../nova/auth/manager.py:515 #, python-format msgid "Created project %(name)s with manager %(manager_user)s" -msgstr "" +msgstr "プãƒã‚¸ã‚§ã‚¯ãƒˆ %(name)s を管ç†è€… %(manager_user)s ã§ä½œæˆã—ã¾ã—ãŸã€‚" #: ../nova/auth/manager.py:533 #, python-format @@ -2651,12 +2689,12 @@ msgstr "modifying project: プãƒã‚¸ã‚§ã‚¯ãƒˆ %s ã‚’æ›´æ–°ã—ã¾ã™ã€‚" #: ../nova/auth/manager.py:545 #, python-format msgid "Adding user %(uid)s to project %(pid)s" -msgstr "" +msgstr "ユーザ %(uid)s をプãƒã‚¸ã‚§ã‚¯ãƒˆ %(pid)s ã«è¿½åŠ ã—ã¾ã™ã€‚" #: ../nova/auth/manager.py:566 #, python-format msgid "Remove user %(uid)s from project %(pid)s" -msgstr "" +msgstr "ユーザ %(uid)s をプãƒã‚¸ã‚§ã‚¯ãƒˆ %(pid)s ã‹ã‚‰å‰Šé™¤ã—ã¾ã™ã€‚" #: ../nova/auth/manager.py:592 #, python-format @@ -2666,7 +2704,7 @@ msgstr "Deleting project: プãƒã‚¸ã‚§ã‚¯ãƒˆ %s を削除ã—ã¾ã™ã€‚" #: ../nova/auth/manager.py:650 #, python-format msgid "Created user %(rvname)s (admin: %(rvadmin)r)" -msgstr "" +msgstr "ユーザ %(rvname)s を作æˆã—ã¾ã—ãŸã€‚(管ç†è€…: %(rvadmin)r)" #: ../nova/auth/manager.py:659 #, python-format @@ -2686,7 +2724,7 @@ msgstr "Secret Key change: ユーザ %s ã®ã‚·ãƒ¼ã‚¯ãƒ¬ãƒƒãƒˆã‚ーを更新ã—ã #: ../nova/auth/manager.py:673 #, python-format msgid "Admin status set to %(admin)r for user %(uid)s" -msgstr "" +msgstr "ユーザ %(uid)s ã«å¯¾ã—ã¦ç®¡ç†è€…状態㌠%(admin)r ã«è¨å®šã•れã¾ã—ãŸã€‚" #: ../nova/auth/manager.py:722 #, python-format @@ -2696,7 +2734,7 @@ msgstr "プãƒã‚¸ã‚§ã‚¯ãƒˆ %s ã«é–¢ã™ã‚‹vpnデータãŒã‚りã¾ã›ã‚“。" #: ../nova/service.py:161 #, python-format msgid "Starting %(topic)s node (version %(vcs_string)s)" -msgstr "" +msgstr "%(topic)s ノードを開始ã—ã¦ã„ã¾ã™ (ãƒãƒ¼ã‚¸ãƒ§ãƒ³ %(vcs_string)s)" #: ../nova/service.py:174 msgid "Service killed that has no database entry" @@ -2717,7 +2755,7 @@ msgstr "モデルサーãƒãŒæ¶ˆæ»…ã—ã¾ã—ãŸã€‚" #: ../nova/auth/ldapdriver.py:174 #, python-format msgid "LDAP user %s already exists" -msgstr "" +msgstr "LDAPユーザ %s ã¯ã™ã§ã«å˜åœ¨ã—ã¾ã™ã€‚" #: ../nova/auth/ldapdriver.py:205 #, python-format @@ -2727,48 +2765,48 @@ msgstr "LDAPオブジェクト %s ãŒå˜åœ¨ã—ã¾ã›ã‚“。" #: ../nova/auth/ldapdriver.py:348 #, python-format msgid "User %s doesn't exist" -msgstr "" +msgstr "ユーザ %s ã¯å˜åœ¨ã—ã¾ã›ã‚“" #: ../nova/auth/ldapdriver.py:472 #, python-format msgid "Group can't be created because group %s already exists" -msgstr "" +msgstr "グループ %s ã¯æ—¢ã«å˜åœ¨ã™ã‚‹ãŸã‚ã€ä½œæˆã§ãã¾ã›ã‚“。" #: ../nova/auth/ldapdriver.py:478 #, python-format msgid "Group can't be created because user %s doesn't exist" -msgstr "" +msgstr "ユーザ %s ã¯å˜åœ¨ã—ãªã„ãŸã‚ã€ã‚°ãƒ«ãƒ¼ãƒ—ã®ä½œæˆã¯ã§ãã¾ã›ã‚“。" #: ../nova/auth/ldapdriver.py:495 #, python-format msgid "User %s can't be searched in group because the user doesn't exist" -msgstr "" +msgstr "ユーザ %s ã¯å˜åœ¨ã—ãªã„ãŸã‚ã€ã‚°ãƒ«ãƒ¼ãƒ—å†…ã§æ¤œç´¢ã§ãã¾ã›ã‚“。" #: ../nova/auth/ldapdriver.py:507 #, python-format msgid "User %s can't be added to the group because the user doesn't exist" -msgstr "" +msgstr "ユーザ %s ã¯å˜åœ¨ã—ãªã„ãŸã‚ã€å½“該グループã«è¿½åŠ ã§ãã¾ã›ã‚“。" #: ../nova/auth/ldapdriver.py:510 ../nova/auth/ldapdriver.py:521 #, python-format msgid "The group at dn %s doesn't exist" -msgstr "" +msgstr "è˜åˆ¥å(DN) %s ã®ã‚°ãƒ«ãƒ¼ãƒ—ã¯å˜åœ¨ã—ã¾ã›ã‚“。" #: ../nova/auth/ldapdriver.py:513 #, python-format msgid "User %(uid)s is already a member of the group %(group_dn)s" -msgstr "" +msgstr "ユーザ %(uid)s ã¯ã™ã§ã«ã‚°ãƒ«ãƒ¼ãƒ— %(group_dn)s ã®ãƒ¡ãƒ³ãƒã§ã™ã€‚" #: ../nova/auth/ldapdriver.py:524 #, python-format msgid "" "User %s can't be removed from the group because the user doesn't exist" -msgstr "" +msgstr "ユーザ %s ã¯å˜åœ¨ã—ãªã„ãŸã‚ã€å½“該グループã‹ã‚‰å‰Šé™¤ã§ãã¾ã›ã‚“。" #: ../nova/auth/ldapdriver.py:528 #, python-format msgid "User %s is not a member of the group" -msgstr "" +msgstr "ユーザ %s ã¯å½“該グループã®ãƒ¡ãƒ³ãƒã§ã¯ã‚りã¾ã›ã‚“。" #: ../nova/auth/ldapdriver.py:542 #, python-format @@ -2780,7 +2818,7 @@ msgstr "ã‚°ãƒ«ãƒ¼ãƒ—ã®æœ€å¾Œã®ãƒ¡ãƒ³ãƒãƒ¼ã‚’削除ã—よã†ã¨ã—ã¾ã—ãŸã€‚ #: ../nova/auth/ldapdriver.py:549 #, python-format msgid "User %s can't be removed from all because the user doesn't exist" -msgstr "" +msgstr "ユーザ %s ã¯å˜åœ¨ã—ãªã„ãŸã‚ã€å…¨ãƒãƒ¼ãƒ«ã‹ã‚‰ã®å‰Šé™¤ã¯ã§ãã¾ã›ã‚“。" #: ../nova/auth/ldapdriver.py:564 #, python-format @@ -2810,22 +2848,22 @@ msgstr "Deleting user: ユーザ %s を削除ã—ã¾ã™ã€‚" #: ../nova/api/ec2/admin.py:127 #, python-format msgid "Adding role %(role)s to user %(user)s for project %(project)s" -msgstr "" +msgstr "プãƒã‚¸ã‚§ã‚¯ãƒˆ %(project)s ã®ãƒ¦ãƒ¼ã‚¶ %(user)s ã«ãƒãƒ¼ãƒ« %(role)s を付与ã—ã¾ã™ã€‚" #: ../nova/api/ec2/admin.py:131 #, python-format msgid "Adding sitewide role %(role)s to user %(user)s" -msgstr "" +msgstr "ユーザ %(user)s ã«ã‚µã‚¤ãƒˆå…±é€šãªãƒãƒ¼ãƒ« %(role)s ã‚’è¿½åŠ ä¸" #: ../nova/api/ec2/admin.py:137 #, python-format msgid "Removing role %(role)s from user %(user)s for project %(project)s" -msgstr "" +msgstr "プãƒã‚¸ã‚§ã‚¯ãƒˆ %(project)s ã®ãƒ¦ãƒ¼ã‚¶ %(user)s ã‹ã‚‰ãƒãƒ¼ãƒ« %(role)s を削除ã—ã¾ã™ã€‚" #: ../nova/api/ec2/admin.py:141 #, python-format msgid "Removing sitewide role %(role)s from user %(user)s" -msgstr "" +msgstr "ユーザ %(user)s ã‹ã‚‰ã‚µã‚¤ãƒˆå…±é€šãªãƒãƒ¼ãƒ« %(role)s を削除ä¸" #: ../nova/api/ec2/admin.py:146 ../nova/api/ec2/admin.py:223 msgid "operation must be add or remove" @@ -2834,17 +2872,17 @@ msgstr "operation 㯠add ã¾ãŸã¯ remove ã®ä½•れã‹ã§ã‚ã‚‹å¿…è¦ãŒã‚りã #: ../nova/api/ec2/admin.py:159 #, python-format msgid "Getting x509 for user: %(name)s on project: %(project)s" -msgstr "" +msgstr "プãƒã‚¸ã‚§ã‚¯ãƒˆ %(project)s ã®ãƒ¦ãƒ¼ã‚¶ %(name)s ã® x509 証明書をå–å¾—ã—ã¾ã™ã€‚" #: ../nova/api/ec2/admin.py:177 #, python-format msgid "Create project %(name)s managed by %(manager_user)s" -msgstr "" +msgstr "管ç†è€… %(manager_user)s ã«ã‚ˆã£ã¦ç®¡ç†ã•れるプãƒã‚¸ã‚§ã‚¯ãƒˆ %(name)s を作æˆã—ã¾ã™ã€‚" #: ../nova/api/ec2/admin.py:190 #, python-format msgid "Modify project: %(name)s managed by %(manager_user)s" -msgstr "" +msgstr "管ç†è€… %(manager_user)s ã«ã‚ˆã£ã¦ç®¡ç†ã•れるプãƒã‚¸ã‚§ã‚¯ãƒˆ %(name)s を変更ã—ã¾ã™ã€‚" #: ../nova/api/ec2/admin.py:200 #, python-format @@ -2854,12 +2892,12 @@ msgstr "Delete project: プãƒã‚¸ã‚§ã‚¯ãƒˆ %s を削除ã—ã¾ã—ãŸã€‚" #: ../nova/api/ec2/admin.py:214 #, python-format msgid "Adding user %(user)s to project %(project)s" -msgstr "" +msgstr "ユーザ %(user)s をプãƒã‚¸ã‚§ã‚¯ãƒˆ %(project)s ã«è¿½åŠ ã—ã¾ã™ã€‚" #: ../nova/api/ec2/admin.py:218 #, python-format msgid "Removing user %(user)s from project %(project)s" -msgstr "" +msgstr "ユーザ %(user)s をプãƒã‚¸ã‚§ã‚¯ãƒˆ %(project)s ã‹ã‚‰å‰Šé™¤ã—ã¾ã™ã€‚" #, python-format #~ msgid "" diff --git a/po/pt_BR.po b/po/pt_BR.po index 887c32597..f067a69e0 100644 --- a/po/pt_BR.po +++ b/po/pt_BR.po @@ -14,8 +14,8 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-03-25 05:22+0000\n" -"X-Generator: Launchpad (build 12559)\n" +"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n" +"X-Generator: Launchpad (build 13405)\n" #: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 #: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 @@ -8,20 +8,20 @@ msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n" "POT-Creation-Date: 2011-02-21 10:03-0500\n" -"PO-Revision-Date: 2011-03-30 07:06+0000\n" -"Last-Translator: Andrey Olykainen <Unknown>\n" +"PO-Revision-Date: 2011-07-09 07:20+0000\n" +"Last-Translator: ilya kislicyn <Unknown>\n" "Language-Team: Russian <ru@li.org>\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-03-31 05:58+0000\n" -"X-Generator: Launchpad (build 12559)\n" +"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n" +"X-Generator: Launchpad (build 13405)\n" #: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 #: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 #: ../nova/scheduler/simple.py:122 msgid "No hosts found" -msgstr "" +msgstr "Узлы не найдены" #: ../nova/exception.py:33 msgid "Unexpected error while running command." @@ -54,7 +54,7 @@ msgstr "" #: ../nova/volume/api.py:47 #, python-format msgid "Volume quota exceeded. You cannot create a volume of size %sG" -msgstr "" +msgstr "Квота тома превышена. Ð’Ñ‹ не можете Ñоздать том размером %sG" #: ../nova/volume/api.py:71 ../nova/volume/api.py:96 msgid "Volume status must be available" @@ -62,19 +62,19 @@ msgstr "" #: ../nova/volume/api.py:98 msgid "Volume is already attached" -msgstr "" +msgstr "Том уже Ñмотирован" #: ../nova/volume/api.py:104 msgid "Volume is already detached" -msgstr "" +msgstr "Том уже отмонтирован" #: ../nova/api/openstack/servers.py:72 msgid "Failed to read private ip" -msgstr "" +msgstr "Ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð¿Ñ€Ð¸Ð²Ð°Ñ‚Ð½Ð¾Ð³Ð¾ IP адреÑа" #: ../nova/api/openstack/servers.py:79 msgid "Failed to read public ip(s)" -msgstr "" +msgstr "Ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð¿ÑƒÐ±Ð»Ð¸Ñ‡Ð½Ñ‹Ñ… IP адреÑов" #: ../nova/api/openstack/servers.py:152 #, python-format @@ -83,7 +83,7 @@ msgstr "" #: ../nova/api/openstack/servers.py:168 msgid "No keypairs defined" -msgstr "" +msgstr "Ðе определены ключевые пары" #: ../nova/api/openstack/servers.py:238 #, python-format diff --git a/po/tl.po b/po/tl.po new file mode 100644 index 000000000..7f600e8f1 --- /dev/null +++ b/po/tl.po @@ -0,0 +1,2855 @@ +# Tagalog translation for nova +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the nova package. +# FIRST AUTHOR <EMAIL@ADDRESS>, 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: nova\n" +"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n" +"POT-Creation-Date: 2011-02-21 10:03-0500\n" +"PO-Revision-Date: 2011-02-17 03:24+0000\n" +"Last-Translator: John Michael Baterna <Unknown>\n" +"Language-Team: Tagalog <tl@li.org>\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n" +"X-Generator: Launchpad (build 13405)\n" + +#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 +#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 +#: ../nova/scheduler/simple.py:122 +msgid "No hosts found" +msgstr "" + +#: ../nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "Merong hindi-inaasahang pagkakamali habang tumatakbo ang command." + +#: ../nova/exception.py:36 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: ../nova/exception.py:107 +msgid "DB exception wrapped" +msgstr "" + +#. exc_type, exc_value, exc_traceback = sys.exc_info() +#: ../nova/exception.py:120 +msgid "Uncaught exception" +msgstr "" + +#: ../nova/volume/api.py:45 +#, python-format +msgid "Quota exceeeded for %(pid)s, tried to create %(size)sG volume" +msgstr "" + +#: ../nova/volume/api.py:47 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %sG" +msgstr "" + +#: ../nova/volume/api.py:71 ../nova/volume/api.py:96 +msgid "Volume status must be available" +msgstr "" + +#: ../nova/volume/api.py:98 +msgid "Volume is already attached" +msgstr "" + +#: ../nova/volume/api.py:104 +msgid "Volume is already detached" +msgstr "" + +#: ../nova/api/openstack/servers.py:72 +msgid "Failed to read private ip" +msgstr "" + +#: ../nova/api/openstack/servers.py:79 +msgid "Failed to read public ip(s)" +msgstr "" + +#: ../nova/api/openstack/servers.py:152 +#, python-format +msgid "%(param)s property not found for image %(_image_id)s" +msgstr "" + +#: ../nova/api/openstack/servers.py:168 +msgid "No keypairs defined" +msgstr "" + +#: ../nova/api/openstack/servers.py:238 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:253 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:267 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:281 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:292 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:303 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:314 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:325 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: ../nova/twistd.py:157 +msgid "Wrong number of arguments." +msgstr "" + +#: ../nova/twistd.py:209 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "" + +#: ../nova/twistd.py:221 +msgid "No such process" +msgstr "" + +#: ../nova/twistd.py:230 ../nova/service.py:224 +#, python-format +msgid "Serving %s" +msgstr "" + +#: ../nova/twistd.py:262 ../nova/service.py:225 +msgid "Full set of FLAGS:" +msgstr "" + +#: ../nova/twistd.py:266 +#, python-format +msgid "Starting %s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 +#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 +#: ../nova/api/ec2/__init__.py:317 +#, python-format +msgid "Instance %s not found" +msgstr "" + +#. NOTE: No Resource Pool concept so far +#: ../nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %(instance_name)s, %(device_path)s, %(mountpoint)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:80 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:91 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "" + +#. Detach VBD from VM +#: ../nova/virt/xenapi/volumeops.py:104 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:112 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:120 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:127 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "" + +#: ../nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "" + +#: ../nova/crypto.py:46 +msgid "Filename of root CA" +msgstr "" + +#: ../nova/crypto.py:49 +msgid "Filename of private key" +msgstr "" + +#: ../nova/crypto.py:51 +msgid "Filename of root Certificate Revokation List" +msgstr "" + +#: ../nova/crypto.py:53 +msgid "Where we keep our keys" +msgstr "" + +#: ../nova/crypto.py:55 +msgid "Where we keep our root CA" +msgstr "" + +#: ../nova/crypto.py:57 +msgid "Should we use a CA for each project?" +msgstr "Kailangan bang gumamit ng CA bawat proyekto?" + +#: ../nova/crypto.py:61 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" + +#: ../nova/crypto.py:66 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "" + +#: ../nova/crypto.py:71 +#, python-format +msgid "Subject for certificate for vpns, %s for project, timestamp" +msgstr "" + +#: ../nova/crypto.py:258 +#, python-format +msgid "Flags path: %s" +msgstr "" + +#: ../nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %(topic)s %(host)s for %(method)s" +msgstr "" + +#: ../nova/compute/manager.py:78 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: ../nova/compute/manager.py:80 +#, python-format +msgid "" +"check_instance_lock: arguments: |%(self)s| |%(context)s| |%(instance_id)s|" +msgstr "" + +#: ../nova/compute/manager.py:84 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: ../nova/compute/manager.py:86 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: ../nova/compute/manager.py:91 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: ../nova/compute/manager.py:95 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: ../nova/compute/manager.py:179 +msgid "Instance has already been created" +msgstr "" + +#: ../nova/compute/manager.py:180 +#, python-format +msgid "instance %s: starting..." +msgstr "" + +#. pylint: disable=W0702 +#: ../nova/compute/manager.py:219 +#, python-format +msgid "instance %s: Failed to spawn" +msgstr "" + +#: ../nova/compute/manager.py:233 ../nova/tests/test_cloud.py:286 +#, python-format +msgid "Terminating instance %s" +msgstr "" + +#: ../nova/compute/manager.py:255 +#, python-format +msgid "Deallocating address %s" +msgstr "" + +#: ../nova/compute/manager.py:268 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "" + +#: ../nova/compute/manager.py:282 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: ../nova/compute/manager.py:287 +#, python-format +msgid "" +"trying to reboot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" +msgstr "" + +#: ../nova/compute/manager.py:311 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: ../nova/compute/manager.py:316 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" +msgstr "" + +#: ../nova/compute/manager.py:332 +#, python-format +msgid "" +"trying to reset the password on a non-running instance: %(instance_id)s " +"(state: %(instance_state)s expected: %(expected_state)s)" +msgstr "" + +#: ../nova/compute/manager.py:335 +#, python-format +msgid "instance %s: setting admin password" +msgstr "" + +#: ../nova/compute/manager.py:353 +#, python-format +msgid "" +"trying to inject a file into a non-running instance: %(instance_id)s (state: " +"%(instance_state)s expected: %(expected_state)s)" +msgstr "" + +#: ../nova/compute/manager.py:362 +#, python-format +msgid "instance %(nm)s: injecting file to %(plain_path)s" +msgstr "" + +#: ../nova/compute/manager.py:372 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: ../nova/compute/manager.py:387 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: ../nova/compute/manager.py:406 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: ../nova/compute/manager.py:423 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: ../nova/compute/manager.py:440 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: ../nova/compute/manager.py:453 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: ../nova/compute/manager.py:472 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: ../nova/compute/manager.py:491 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: ../nova/compute/manager.py:503 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: ../nova/compute/manager.py:513 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: ../nova/compute/manager.py:526 +#, python-format +msgid "instance %s: reset network" +msgstr "" + +#: ../nova/compute/manager.py:535 ../nova/api/ec2/cloud.py:515 +#, python-format +msgid "Get console output for instance %s" +msgstr "" + +#: ../nova/compute/manager.py:543 +#, python-format +msgid "instance %s: getting ajax console" +msgstr "" + +#: ../nova/compute/manager.py:553 +#, python-format +msgid "" +"instance %(instance_id)s: attaching volume %(volume_id)s to %(mountpoint)s" +msgstr "" + +#. pylint: disable=W0702 +#. NOTE(vish): The inline callback eats the exception info so we +#. log the traceback here and reraise the same +#. ecxception below. +#: ../nova/compute/manager.py:569 +#, python-format +msgid "instance %(instance_id)s: attach failed %(mountpoint)s, removing" +msgstr "" + +#: ../nova/compute/manager.py:585 +#, python-format +msgid "" +"Detach volume %(volume_id)s from mountpoint %(mp)s on instance " +"%(instance_id)s" +msgstr "" + +#: ../nova/compute/manager.py:588 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: ../nova/scheduler/simple.py:53 +#, python-format +msgid "Host %s is not alive" +msgstr "" + +#: ../nova/scheduler/simple.py:65 +msgid "All hosts have too many cores" +msgstr "" + +#: ../nova/scheduler/simple.py:87 +#, python-format +msgid "Host %s not available" +msgstr "" + +#: ../nova/scheduler/simple.py:99 +msgid "All hosts have too many gigabytes" +msgstr "" + +#: ../nova/scheduler/simple.py:119 +msgid "All hosts have too many networks" +msgstr "" + +#: ../nova/volume/manager.py:85 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: ../nova/volume/manager.py:90 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: ../nova/volume/manager.py:96 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: ../nova/volume/manager.py:108 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: ../nova/volume/manager.py:112 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: ../nova/volume/manager.py:123 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: ../nova/volume/manager.py:131 +msgid "Volume is still attached" +msgstr "" + +#: ../nova/volume/manager.py:133 +msgid "Volume is not local to this node" +msgstr "" + +#: ../nova/volume/manager.py:136 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: ../nova/volume/manager.py:138 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: ../nova/volume/manager.py:147 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:74 +#, python-format +msgid "%(text)s: _db_content => %(content)s" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:304 ../nova/virt/xenapi/fake.py:404 +#: ../nova/virt/xenapi/fake.py:422 ../nova/virt/xenapi/fake.py:478 +msgid "Raising NotImplemented" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:306 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:341 +#, python-format +msgid "Calling %(localname)s %(impl)s" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:346 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:406 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: ../nova/tests/test_cloud.py:256 +msgid "Can't test instances without a real virtual env." +msgstr "" + +#: ../nova/tests/test_cloud.py:268 +#, python-format +msgid "Need to watch instance %s until it's running..." +msgstr "" + +#: ../nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: ../nova/network/linux_net.py:187 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: ../nova/network/linux_net.py:208 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:314 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: ../nova/network/linux_net.py:316 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:358 +#, python-format +msgid "killing radvd threw %s" +msgstr "" + +#: ../nova/network/linux_net.py:360 +#, python-format +msgid "Pid %d is stale, relaunching radvd" +msgstr "" + +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:449 +#, python-format +msgid "Killing dnsmasq threw %s" +msgstr "" + +#: ../nova/utils.py:58 +#, python-format +msgid "Inner Exception: %s" +msgstr "" + +#: ../nova/utils.py:59 +#, python-format +msgid "Class %s cannot be found" +msgstr "" + +#: ../nova/utils.py:118 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: ../nova/utils.py:130 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: ../nova/utils.py:143 ../nova/utils.py:183 +#, python-format +msgid "Result was %s" +msgstr "" + +#: ../nova/utils.py:159 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: ../nova/utils.py:217 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: ../nova/utils.py:222 +#, python-format +msgid "Running %s" +msgstr "" + +#: ../nova/utils.py:262 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: ../nova/utils.py:265 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: ../nova/utils.py:363 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: ../nova/utils.py:374 +#, python-format +msgid "backend %s" +msgstr "" + +#: ../nova/fakerabbit.py:49 +#, python-format +msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +msgstr "" + +#: ../nova/fakerabbit.py:54 +#, python-format +msgid "Publishing to route %s" +msgstr "" + +#: ../nova/fakerabbit.py:84 +#, python-format +msgid "Declaring queue %s" +msgstr "" + +#: ../nova/fakerabbit.py:90 +#, python-format +msgid "Declaring exchange %s" +msgstr "" + +#: ../nova/fakerabbit.py:96 +#, python-format +msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +msgstr "" + +#: ../nova/fakerabbit.py:121 +#, python-format +msgid "Getting from %(queue)s: %(message)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:135 ../nova/virt/hyperv.py:171 +#, python-format +msgid "Created VM %s..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:138 +#, python-format +msgid "Created VM %(instance_name)s as %(vm_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:168 +#, python-format +msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:171 +#, python-format +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:197 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:209 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:224 +#, python-format +msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:227 +#, python-format +msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:246 +#, python-format +msgid "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) on " +"%(sr_ref)s." +msgstr "" + +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vm_utils.py:258 +#, python-format +msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:272 +#, python-format +msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:286 +#, python-format +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:327 +#, python-format +msgid "Size for image %(image)s:%(virtual_size)d" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:332 +#, python-format +msgid "Glance image %s" +msgstr "" + +#. we need to invoke a plugin for copying VDI's +#. content into proper path +#: ../nova/virt/xenapi/vm_utils.py:342 +#, python-format +msgid "Copying VDI %s to /boot/guest on dom0" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:352 +#, python-format +msgid "Kernel/Ramdisk VDI %s destroyed" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:361 +#, python-format +msgid "Asking xapi to fetch %(url)s as %(access)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:386 ../nova/virt/xenapi/vm_utils.py:402 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:397 +#, python-format +msgid "PV Kernel in VDI:%s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:405 +#, python-format +msgid "Running pygrub against %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:411 +#, python-format +msgid "Found Xen kernel %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:413 +msgid "No Xen kernel found. Booting HVM." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:425 ../nova/virt/hyperv.py:431 +#, python-format +msgid "duplicate name found: %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:442 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:463 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:465 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:525 +#, python-format +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:542 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:567 +#, python-format +msgid "" +"VHD coalesce attempts exceeded (%(counter)d > %(max_attempts)d), giving up..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:574 +#, python-format +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:590 +#, python-format +msgid "No VDIs found for VM %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:594 +#, python-format +msgid "Unexpected number of VDIs (%(num_vdis)s) found for VM %(vm_ref)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:653 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:188 +#, python-format +msgid "Creating VBD for VDI %s ... " +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:655 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:190 +#, python-format +msgid "Creating VBD for VDI %s done." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:657 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:192 +#, python-format +msgid "Plugging VBD %s ... " +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:659 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:194 +#, python-format +msgid "Plugging VBD %s done." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:661 +#, python-format +msgid "VBD %(vbd)s plugged as %(orig_dev)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:664 +#, python-format +msgid "VBD %(vbd)s plugged into wrong dev, remapping to %(dev)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:668 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:197 +#, python-format +msgid "Destroying VBD for VDI %s ... " +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:671 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:200 +#, python-format +msgid "Destroying VBD for VDI %s done." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:683 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:211 +msgid "VBD.unplug successful first time." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:688 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:216 +msgid "VBD.unplug rejected: retrying..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:692 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:220 +msgid "VBD.unplug successful eventually." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:695 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:223 +#, python-format +msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:704 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:66 +#, python-format +msgid "Ignoring XenAPI.Failure %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:735 +#, python-format +msgid "" +"Writing partition table %(primary_first)d %(primary_last)d to %(dest)s..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:747 +#, python-format +msgid "Writing partition table %s done." +msgstr "" + +#: ../nova/tests/test_rpc.py:89 +#, python-format +msgid "Nested received %(queue)s, %(value)s" +msgstr "" + +#: ../nova/tests/test_rpc.py:95 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: ../nova/tests/test_rpc.py:120 ../nova/tests/test_rpc.py:126 +#, python-format +msgid "Received %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:44 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:133 +#, python-format +msgid "No service for id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:251 +#, python-format +msgid "No service for %(host)s, %(binary)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:592 +msgid "No fixed ips defined" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:608 +#, python-format +msgid "No floating ip for address %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:629 +#, python-format +msgid "No address for instance %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:961 +#, python-format +msgid "no keypair for user %(user_id)s, name %(name)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1076 ../nova/db/sqlalchemy/api.py:1156 +#, python-format +msgid "No network for id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1086 +msgid "No networks defined" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1115 +#, python-format +msgid "No network for bridge %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1129 ../nova/db/sqlalchemy/api.py:1142 +#, python-format +msgid "No network for instance %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1277 +#, python-format +msgid "Token %s does not exist" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1302 +#, python-format +msgid "No quota for project_id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1455 ../nova/db/sqlalchemy/api.py:1501 +#: ../nova/api/ec2/__init__.py:323 +#, python-format +msgid "Volume %s not found" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1514 +#, python-format +msgid "No export device found for volume %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1527 +#, python-format +msgid "No target id found for volume %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1572 +#, python-format +msgid "No security group with id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1589 +#, python-format +msgid "No security group named %(group_name)s for project: %(project_id)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1682 +#, python-format +msgid "No secuity group rule with id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1756 +#, python-format +msgid "No user for id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1772 +#, python-format +msgid "No user for access key %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1834 +#, python-format +msgid "No project with id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1979 +#, python-format +msgid "No console pool with id %(pool_id)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1996 +#, python-format +msgid "" +"No console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:2035 +#, python-format +msgid "No console for instance %(instance_id)s in pool %(pool_id)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:2057 +#, python-format +msgid "on instance %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:2058 +#, python-format +msgid "No console with id %(console_id)s %(idesc)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:2078 ../nova/db/sqlalchemy/api.py:2097 +#, python-format +msgid "No zone with id %(zone_id)s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:160 +#, python-format +msgid "Checking state of %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:165 +#, python-format +msgid "Current state of %(name)s was %(state)s." +msgstr "" + +#: ../nova/virt/libvirt_conn.py:183 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:196 +msgid "Connection to libvirt broke" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:258 +#, python-format +msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:283 +#, python-format +msgid "Invalid device path %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:313 +#, python-format +msgid "No disk at %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:320 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:336 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:339 +#, python-format +msgid "_wait_for_reboot failed: %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:382 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:385 +#, python-format +msgid "_wait_for_rescue failed: %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:411 +#, python-format +msgid "instance %s: is running" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:422 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:425 ../nova/virt/xenapi/vmops.py:186 +#, python-format +msgid "instance %s: failed to boot" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:436 +#, python-format +msgid "virsh said: %r" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:440 +msgid "cool, it's a device" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:448 +#, python-format +msgid "data: %(data)r, fpath: %(fpath)r" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:456 +#, python-format +msgid "Contents of file %(fpath)s: %(contents)r" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:489 +msgid "Unable to find an open port" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:563 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:646 +#, python-format +msgid "instance %(inst_name)s: injecting key into image %(img_id)s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:649 +#, python-format +msgid "instance %(inst_name)s: injecting net into image %(img_id)s" +msgstr "" + +#. This could be a windows image, or a vmdk format disk +#: ../nova/virt/libvirt_conn.py:657 +#, python-format +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image %(img_id)s " +"(%(e)s)" +msgstr "" + +#. TODO(termie): cache? +#: ../nova/virt/libvirt_conn.py:665 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:732 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:751 +msgid "diagnostics are not supported for libvirt" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:1225 +#, python-format +msgid "Attempted to unfilter instance %s which is not filtered" +msgstr "" + +#: ../nova/api/ec2/metadatarequesthandler.py:76 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: ../nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: ../nova/network/api.py:39 +#, python-format +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "" + +#: ../nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" + +#: ../nova/tests/test_volume.py:162 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: ../nova/virt/images.py:70 +#, python-format +msgid "Finished retreving %(url)s -- placed in %(path)s" +msgstr "" + +#: ../nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "" + +#: ../nova/console/manager.py:70 +msgid "Adding console" +msgstr "" + +#: ../nova/console/manager.py:90 +#, python-format +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "" + +#: ../nova/api/direct.py:149 +msgid "not available" +msgstr "" + +#: ../nova/api/ec2/cloud.py:62 +#, python-format +msgid "The key_pair %s already exists" +msgstr "" + +#. TODO(vish): Do this with M2Crypto instead +#: ../nova/api/ec2/cloud.py:118 +#, python-format +msgid "Generating root CA: %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:303 +#, python-format +msgid "Create key pair %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:311 +#, python-format +msgid "Delete key pair %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:386 +#, python-format +msgid "%s is not a valid ipProtocol" +msgstr "" + +#: ../nova/api/ec2/cloud.py:390 +msgid "Invalid port range" +msgstr "" + +#: ../nova/api/ec2/cloud.py:421 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:430 ../nova/api/ec2/cloud.py:459 +msgid "Not enough parameters to build a valid rule." +msgstr "" + +#: ../nova/api/ec2/cloud.py:443 +msgid "No rule for the specified parameters." +msgstr "" + +#: ../nova/api/ec2/cloud.py:450 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:464 +#, python-format +msgid "This rule already exists in group %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:492 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:495 +#, python-format +msgid "group %s already exists" +msgstr "" + +#: ../nova/api/ec2/cloud.py:507 +#, python-format +msgid "Delete security group %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:584 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: ../nova/api/ec2/cloud.py:612 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:629 +#, python-format +msgid "Detach volume %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:761 +msgid "Allocate address" +msgstr "" + +#: ../nova/api/ec2/cloud.py:766 +#, python-format +msgid "Release address %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:771 +#, python-format +msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:780 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:807 +msgid "Going to start terminating instances" +msgstr "" + +#: ../nova/api/ec2/cloud.py:815 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: ../nova/api/ec2/cloud.py:867 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:875 +#, python-format +msgid "Registered image %(image_location)s with id %(image_id)s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:882 ../nova/api/ec2/cloud.py:900 +#, python-format +msgid "attribute not supported: %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:890 +#, python-format +msgid "invalid id: %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:903 +msgid "user or group not specified" +msgstr "" + +#: ../nova/api/ec2/cloud.py:905 +msgid "only group \"all\" is supported" +msgstr "" + +#: ../nova/api/ec2/cloud.py:907 +msgid "operation_type must be add or remove" +msgstr "" + +#: ../nova/api/ec2/cloud.py:908 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: ../bin/nova-api.py:52 +#, python-format +msgid "Using paste.deploy config at: %s" +msgstr "" + +#: ../bin/nova-api.py:57 +#, python-format +msgid "No paste configuration for app: %s" +msgstr "" + +#: ../bin/nova-api.py:59 +#, python-format +msgid "" +"App Config: %(api)s\n" +"%(config)r" +msgstr "" + +#: ../bin/nova-api.py:64 +#, python-format +msgid "Running %s API" +msgstr "" + +#: ../bin/nova-api.py:69 +#, python-format +msgid "No known API applications configured in %s." +msgstr "" + +#: ../bin/nova-api.py:83 +#, python-format +msgid "Starting nova-api node (version %s)" +msgstr "" + +#: ../bin/nova-api.py:89 +#, python-format +msgid "No paste configuration found for: %s" +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:84 +#, python-format +msgid "Argument %(key)s value %(value)s is too short." +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:89 +#, python-format +msgid "Argument %(key)s value %(value)s contains invalid characters." +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:94 +#, python-format +msgid "Argument %(key)s value %(value)s starts with a hyphen." +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:102 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:130 +#, python-format +msgid "Argument %s is required." +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:117 +#, python-format +msgid "" +"Argument %(key)s may not take value %(value)s. Valid values are ['true', " +"'false']." +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:163 +#, python-format +msgid "" +"Created VDI %(vdi_ref)s (%(label)s, %(size)s, %(read_only)s) on %(sr_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:67 +#, python-format +msgid "Attempted to create non-unique name %s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:73 +#, python-format +msgid "instance %(name)s: not enough free memory" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:148 +#, python-format +msgid "Starting VM %s..." +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:151 +#, python-format +msgid "Spawning VM %(instance_name)s created %(vm_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:162 +#, python-format +msgid "Invalid value for onset_files: '%s'" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:167 +#, python-format +msgid "Injecting file path: '%s'" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:180 +#, python-format +msgid "Instance %s: booted" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:232 +#, python-format +msgid "Instance not present %s" +msgstr "" + +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vmops.py:261 +#, python-format +msgid "Starting snapshot for VM %s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:269 +#, python-format +msgid "Unable to Snapshot %(vm_ref)s: %(exc)s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:280 +#, python-format +msgid "Finished snapshot and upload for VM %s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:356 +#, python-format +msgid "VM %(vm)s already halted, skipping shutdown..." +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:389 +msgid "Removing kernel/ramdisk files" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:399 +msgid "kernel/ramdisk files removed" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:561 +#, python-format +msgid "" +"TIMEOUT: The call to %(method)s timed out. VM id=%(instance_id)s; " +"args=%(strargs)s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:564 +#, python-format +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. VM " +"id=%(instance_id)s; args=%(strargs)s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:569 +#, python-format +msgid "" +"The call to %(method)s returned an error: %(e)s. VM id=%(instance_id)s; " +"args=%(strargs)s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:760 +#, python-format +msgid "OpenSSL error: %s" +msgstr "" + +#: ../nova/tests/test_compute.py:148 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: ../nova/tests/test_compute.py:154 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:97 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: ../nova/db/sqlalchemy/migration.py:35 +msgid "python-migrate is not installed. Exiting." +msgstr "" + +#: ../nova/image/s3.py:99 +#, python-format +msgid "Image %s could not be found" +msgstr "" + +#: ../nova/api/ec2/__init__.py:121 +msgid "Too many failed authentications." +msgstr "" + +#: ../nova/api/ec2/__init__.py:131 +#, python-format +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and " +"will be locked out for %(lock_mins)d minutes." +msgstr "" + +#: ../nova/api/ec2/__init__.py:169 ../nova/objectstore/handler.py:140 +#, python-format +msgid "Authentication Failure: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:182 +#, python-format +msgid "Authenticated Request For %(uname)s:%(pname)s)" +msgstr "" + +#: ../nova/api/ec2/__init__.py:207 +#, python-format +msgid "action: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:209 +#, python-format +msgid "arg: %(key)s\t\tval: %(value)s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:281 +#, python-format +msgid "" +"Unauthorized request for controller=%(controller)s and action=%(action)s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:314 +#, python-format +msgid "InstanceNotFound raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:320 +#, python-format +msgid "VolumeNotFound raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:326 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:329 +#, python-format +msgid "ApiError raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:338 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:343 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: ../nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" +msgstr "" + +#: ../nova/auth/dbdriver.py:106 ../nova/auth/ldapdriver.py:232 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "" + +#: ../nova/auth/dbdriver.py:122 ../nova/auth/ldapdriver.py:243 +#, python-format +msgid "Project can't be created because user %s doesn't exist" +msgstr "" + +#: ../nova/auth/dbdriver.py:135 ../nova/auth/ldapdriver.py:229 +#, python-format +msgid "Project can't be created because project %s already exists" +msgstr "" + +#: ../nova/auth/dbdriver.py:157 ../nova/auth/ldapdriver.py:268 +#, python-format +msgid "Project can't be modified because manager %s doesn't exist" +msgstr "" + +#: ../nova/auth/dbdriver.py:245 +#, python-format +msgid "User \"%s\" not found" +msgstr "" + +#: ../nova/auth/dbdriver.py:248 +#, python-format +msgid "Project \"%s\" not found" +msgstr "" + +#: ../nova/virt/xenapi_conn.py:129 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" +msgstr "" + +#: ../nova/virt/xenapi_conn.py:311 +#, python-format +msgid "Task [%(name)s] %(task)s status: success %(result)s" +msgstr "" + +#: ../nova/virt/xenapi_conn.py:317 +#, python-format +msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +msgstr "" + +#: ../nova/virt/xenapi_conn.py:331 ../nova/virt/xenapi_conn.py:344 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: ../nova/compute/monitor.py:259 +#, python-format +msgid "updating %s..." +msgstr "" + +#: ../nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "" + +#: ../nova/compute/monitor.py:356 +#, python-format +msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" +msgstr "" + +#: ../nova/compute/monitor.py:379 +#, python-format +msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" +msgstr "" + +#: ../nova/compute/monitor.py:414 +msgid "unexpected exception getting connection" +msgstr "" + +#: ../nova/compute/monitor.py:429 +#, python-format +msgid "Found instance: %s" +msgstr "" + +#: ../nova/volume/san.py:67 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: ../nova/api/ec2/apirequest.py:100 +#, python-format +msgid "" +"Unsupported API request: controller = %(controller)s, action = %(action)s" +msgstr "" + +#: ../nova/api/openstack/__init__.py:55 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: ../nova/api/openstack/__init__.py:76 +msgid "Including admin operations in API." +msgstr "" + +#: ../nova/console/xvp.py:99 +msgid "Rebuilding xvp conf" +msgstr "" + +#: ../nova/console/xvp.py:116 +#, python-format +msgid "Re-wrote %s" +msgstr "" + +#: ../nova/console/xvp.py:121 +msgid "Stopping xvp" +msgstr "" + +#: ../nova/console/xvp.py:134 +msgid "Starting xvp" +msgstr "" + +#: ../nova/console/xvp.py:141 +#, python-format +msgid "Error starting xvp: %s" +msgstr "" + +#: ../nova/console/xvp.py:144 +msgid "Restarting xvp" +msgstr "" + +#: ../nova/console/xvp.py:146 +msgid "xvp not running..." +msgstr "" + +#: ../bin/nova-manage.py:272 +msgid "" +"The above error may show that the database has not been created.\n" +"Please create a database using nova-manage sync db before running this " +"command." +msgstr "" + +#: ../bin/nova-manage.py:426 +msgid "" +"No more networks available. If this is a new installation, you need\n" +"to call something like this:\n" +"\n" +" nova-manage network create 10.0.0.0/8 10 64\n" +"\n" +msgstr "" + +#: ../bin/nova-manage.py:431 +msgid "" +"The above error may show that the certificate db has not been created.\n" +"Please create a database by running a nova-api server on this host." +msgstr "" + +#: ../bin/nova-manage.py:447 ../bin/nova-manage.py:536 +msgid "network" +msgstr "" + +#: ../bin/nova-manage.py:448 +msgid "IP address" +msgstr "" + +#: ../bin/nova-manage.py:449 +msgid "MAC address" +msgstr "" + +#: ../bin/nova-manage.py:450 +msgid "hostname" +msgstr "" + +#: ../bin/nova-manage.py:451 +msgid "host" +msgstr "" + +#: ../bin/nova-manage.py:537 +msgid "netmask" +msgstr "" + +#: ../bin/nova-manage.py:538 +msgid "start address" +msgstr "" + +#: ../nova/virt/disk.py:69 +#, python-format +msgid "Failed to load partition: %s" +msgstr "" + +#: ../nova/virt/disk.py:91 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: ../nova/virt/disk.py:124 +#, python-format +msgid "nbd device %s did not show up" +msgstr "" + +#: ../nova/virt/disk.py:128 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: ../nova/virt/disk.py:151 +msgid "No free nbd devices" +msgstr "" + +#: ../doc/ext/nova_todo.py:46 +#, python-format +msgid "%(filename)s, line %(line_info)d" +msgstr "" + +#. FIXME(chiradeep): implement this +#: ../nova/virt/hyperv.py:118 +msgid "In init host" +msgstr "" + +#: ../nova/virt/hyperv.py:131 +#, python-format +msgid "Attempt to create duplicate vm %s" +msgstr "" + +#: ../nova/virt/hyperv.py:148 +#, python-format +msgid "Starting VM %s " +msgstr "" + +#: ../nova/virt/hyperv.py:150 +#, python-format +msgid "Started VM %s " +msgstr "" + +#: ../nova/virt/hyperv.py:152 +#, python-format +msgid "spawn vm failed: %s" +msgstr "" + +#: ../nova/virt/hyperv.py:169 +#, python-format +msgid "Failed to create VM %s" +msgstr "" + +#: ../nova/virt/hyperv.py:188 +#, python-format +msgid "Set memory for vm %s..." +msgstr "" + +#: ../nova/virt/hyperv.py:198 +#, python-format +msgid "Set vcpus for vm %s..." +msgstr "" + +#: ../nova/virt/hyperv.py:202 +#, python-format +msgid "Creating disk for %(vm_name)s by attaching disk file %(vhdfile)s" +msgstr "" + +#: ../nova/virt/hyperv.py:227 +#, python-format +msgid "Failed to add diskdrive to VM %s" +msgstr "" + +#: ../nova/virt/hyperv.py:230 +#, python-format +msgid "New disk drive path is %s" +msgstr "" + +#: ../nova/virt/hyperv.py:247 +#, python-format +msgid "Failed to add vhd file to VM %s" +msgstr "" + +#: ../nova/virt/hyperv.py:249 +#, python-format +msgid "Created disk for %s" +msgstr "" + +#: ../nova/virt/hyperv.py:253 +#, python-format +msgid "Creating nic for %s " +msgstr "" + +#: ../nova/virt/hyperv.py:272 +msgid "Failed creating a port on the external vswitch" +msgstr "" + +#: ../nova/virt/hyperv.py:273 +#, python-format +msgid "Failed creating port for %s" +msgstr "" + +#: ../nova/virt/hyperv.py:276 +#, python-format +msgid "Created switch port %(vm_name)s on switch %(ext_path)s" +msgstr "" + +#: ../nova/virt/hyperv.py:286 +#, python-format +msgid "Failed to add nic to VM %s" +msgstr "" + +#: ../nova/virt/hyperv.py:288 +#, python-format +msgid "Created nic for %s " +msgstr "" + +#: ../nova/virt/hyperv.py:321 +#, python-format +msgid "WMI job failed: %s" +msgstr "" + +#: ../nova/virt/hyperv.py:325 +#, python-format +msgid "WMI job succeeded: %(desc)s, Elapsed=%(elap)s " +msgstr "" + +#: ../nova/virt/hyperv.py:361 +#, python-format +msgid "Got request to destroy vm %s" +msgstr "" + +#: ../nova/virt/hyperv.py:386 +#, python-format +msgid "Failed to destroy vm %s" +msgstr "" + +#: ../nova/virt/hyperv.py:393 +#, python-format +msgid "Del: disk %(vhdfile)s vm %(instance_name)s" +msgstr "" + +#: ../nova/virt/hyperv.py:415 +#, python-format +msgid "" +"Got Info for vm %(instance_id)s: state=%(state)s, mem=%(memusage)s, " +"num_cpu=%(numprocs)s, cpu_time=%(uptime)s" +msgstr "" + +#: ../nova/virt/hyperv.py:451 +#, python-format +msgid "Successfully changed vm state of %(vm_name)s to %(req_state)s" +msgstr "" + +#: ../nova/virt/hyperv.py:454 +#, python-format +msgid "Failed to change vm state of %(vm_name)s to %(req_state)s" +msgstr "" + +#: ../nova/compute/api.py:71 +#, python-format +msgid "Instance %d was not found in get_network_topic" +msgstr "" + +#: ../nova/compute/api.py:77 +#, python-format +msgid "Instance %d has no host" +msgstr "" + +#: ../nova/compute/api.py:97 +#, python-format +msgid "Quota exceeeded for %(pid)s, tried to run %(min_count)s instances" +msgstr "" + +#: ../nova/compute/api.py:99 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." +msgstr "" + +#: ../nova/compute/api.py:112 +msgid "Creating a raw instance" +msgstr "" + +#: ../nova/compute/api.py:160 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: ../nova/compute/api.py:187 +#, python-format +msgid "Casting to scheduler for %(pid)s/%(uid)s's instance %(instance_id)s" +msgstr "" + +#: ../nova/compute/api.py:292 +#, python-format +msgid "Going to try to terminate %s" +msgstr "" + +#: ../nova/compute/api.py:296 +#, python-format +msgid "Instance %d was not found during terminate" +msgstr "" + +#: ../nova/compute/api.py:301 +#, python-format +msgid "Instance %d is already being terminated" +msgstr "" + +#: ../nova/compute/api.py:481 +#, python-format +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "" + +#: ../nova/compute/api.py:496 +msgid "Volume isn't attached to anything!" +msgstr "" + +#: ../nova/rpc.py:98 +#, python-format +msgid "" +"AMQP server on %(fl_host)s:%(fl_port)d is unreachable. Trying again in " +"%(fl_intv)d seconds." +msgstr "" + +#: ../nova/rpc.py:103 +#, python-format +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "" +"Hindi maka-konekta sa AMQP server pagkatapos ng %d ulit. Isasara na ang " +"sistema." + +#: ../nova/rpc.py:122 +msgid "Reconnected to queue" +msgstr "Muling kumonekta sa queue" + +#: ../nova/rpc.py:129 +msgid "Failed to fetch message from queue" +msgstr "Hindi nai-abot ang mensahe buhat sa queue" + +#: ../nova/rpc.py:159 +#, python-format +msgid "Initing the Adapter Consumer for %s" +msgstr "" + +#: ../nova/rpc.py:178 +#, python-format +msgid "received %s" +msgstr "natanggap %s" + +#. NOTE(vish): we may not want to ack here, but that means that bad +#. messages stay in the queue indefinitely, so for now +#. we just log the message and send an error string +#. back to the caller +#: ../nova/rpc.py:191 +#, python-format +msgid "no method for message: %s" +msgstr "walang paraan para sa mensahe: %s" + +#: ../nova/rpc.py:192 +#, python-format +msgid "No method for message: %s" +msgstr "Walang paraan para sa mensahe: %s" + +#: ../nova/rpc.py:253 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: ../nova/rpc.py:294 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: ../nova/rpc.py:313 +msgid "Making asynchronous call..." +msgstr "" + +#: ../nova/rpc.py:316 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: ../nova/rpc.py:354 +msgid "Making asynchronous cast..." +msgstr "" + +#: ../nova/rpc.py:364 +#, python-format +msgid "response %s" +msgstr "" + +#: ../nova/rpc.py:373 +#, python-format +msgid "topic is %s" +msgstr "" + +#: ../nova/rpc.py:374 +#, python-format +msgid "message %s" +msgstr "" + +#: ../nova/volume/driver.py:78 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: ../nova/volume/driver.py:87 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: ../nova/volume/driver.py:220 +#, python-format +msgid "FAKE AOE: %s" +msgstr "" + +#: ../nova/volume/driver.py:233 +msgid "Skipping ensure_export. No iscsi_target " +msgstr "" + +#: ../nova/volume/driver.py:279 ../nova/volume/driver.py:288 +msgid "Skipping remove_export. No iscsi_target " +msgstr "" + +#: ../nova/volume/driver.py:347 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: ../nova/volume/driver.py:359 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: ../nova/volume/driver.py:414 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: ../nova/volume/driver.py:416 +msgid "Sheepdog is not working" +msgstr "" + +#: ../nova/wsgi.py:68 +#, python-format +msgid "Starting %(arg0)s on %(host)s:%(port)s" +msgstr "" + +#: ../nova/wsgi.py:147 +msgid "You must implement __call__" +msgstr "" + +#: ../bin/nova-instancemonitor.py:55 +msgid "Starting instance monitor" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:58 +msgid "leasing ip" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:73 +msgid "Adopted old lease or got a change of mac/hostname" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:80 +msgid "releasing ip" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:123 +#, python-format +msgid "" +"Called %(action)s for mac %(mac)s with ip %(ip)s and hostname %(hostname)s " +"on interface %(interface)s" +msgstr "" + +#: ../nova/virt/fake.py:239 +#, python-format +msgid "Instance %s Not Found" +msgstr "" + +#: ../nova/network/manager.py:153 +#, python-format +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "" + +#: ../nova/network/manager.py:157 +msgid "setting network host" +msgstr "" + +#: ../nova/network/manager.py:212 +#, python-format +msgid "Leasing IP %s" +msgstr "" + +#: ../nova/network/manager.py:216 +#, python-format +msgid "IP %s leased that isn't associated" +msgstr "" + +#: ../nova/network/manager.py:220 +#, python-format +msgid "IP %(address)s leased to bad mac %(inst_addr)s vs %(mac)s" +msgstr "" + +#: ../nova/network/manager.py:228 +#, python-format +msgid "IP %s leased that was already deallocated" +msgstr "" + +#: ../nova/network/manager.py:233 +#, python-format +msgid "Releasing IP %s" +msgstr "" + +#: ../nova/network/manager.py:237 +#, python-format +msgid "IP %s released that isn't associated" +msgstr "" + +#: ../nova/network/manager.py:241 +#, python-format +msgid "IP %(address)s released from bad mac %(inst_addr)s vs %(mac)s" +msgstr "" + +#: ../nova/network/manager.py:244 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: ../nova/network/manager.py:519 +msgid "" +"The sum between the number of networks and the vlan start cannot be greater " +"than 4094" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:57 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:74 +#, python-format +msgid "Introduced %(label)s as %(sr_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:90 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:96 +#, python-format +msgid "Forgetting SR %s ... " +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:101 +#, python-format +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:107 +#, python-format +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:111 +#, python-format +msgid "Forgetting SR %s done." +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:113 +#, python-format +msgid "Ignoring exception %(exc)s when forgetting SR %(sr_ref)s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:123 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:128 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:146 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:175 +#, python-format +msgid "Unable to obtain target information %(device_path)s, %(mountpoint)s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:197 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: ../nova/objectstore/image.py:262 +#, python-format +msgid "Failed to decrypt private key: %s" +msgstr "" + +#: ../nova/objectstore/image.py:269 +#, python-format +msgid "Failed to decrypt initialization vector: %s" +msgstr "" + +#: ../nova/objectstore/image.py:277 +#, python-format +msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgstr "" + +#: ../nova/objectstore/handler.py:106 +#, python-format +msgid "Unknown S3 value type %r" +msgstr "" + +#: ../nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "" + +#: ../nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "" + +#: ../nova/objectstore/handler.py:209 +#, python-format +msgid "List keys for bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:217 +#, python-format +msgid "Unauthorized attempt to access bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:235 +#, python-format +msgid "Creating bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:245 +#, python-format +msgid "Deleting bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:249 +#, python-format +msgid "Unauthorized attempt to delete bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:273 +#, python-format +msgid "Getting object: %(bname)s / %(nm)s" +msgstr "" + +#: ../nova/objectstore/handler.py:276 +#, python-format +msgid "Unauthorized attempt to get object %(nm)s from bucket %(bname)s" +msgstr "" + +#: ../nova/objectstore/handler.py:296 +#, python-format +msgid "Putting object: %(bname)s / %(nm)s" +msgstr "" + +#: ../nova/objectstore/handler.py:299 +#, python-format +msgid "Unauthorized attempt to upload object %(nm)s to bucket %(bname)s" +msgstr "" + +#: ../nova/objectstore/handler.py:318 +#, python-format +msgid "Deleting object: %(bname)s / %(nm)s" +msgstr "" + +#: ../nova/objectstore/handler.py:322 +#, python-format +msgid "Unauthorized attempt to delete object %(nm)s from bucket %(bname)s" +msgstr "" + +#: ../nova/objectstore/handler.py:396 +#, python-format +msgid "Not authorized to upload image: invalid directory %s" +msgstr "" + +#: ../nova/objectstore/handler.py:404 +#, python-format +msgid "Not authorized to upload image: unauthorized bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:409 +#, python-format +msgid "Starting image upload: %s" +msgstr "" + +#: ../nova/objectstore/handler.py:423 +#, python-format +msgid "Not authorized to update attributes of image %s" +msgstr "" + +#: ../nova/objectstore/handler.py:431 +#, python-format +msgid "Toggling publicity flag of image %(image_id)s %(newstatus)r" +msgstr "" + +#. other attributes imply update +#: ../nova/objectstore/handler.py:436 +#, python-format +msgid "Updating user fields on image %s" +msgstr "" + +#: ../nova/objectstore/handler.py:450 +#, python-format +msgid "Unauthorized attempt to delete image %s" +msgstr "" + +#: ../nova/objectstore/handler.py:455 +#, python-format +msgid "Deleted image: %s" +msgstr "" + +#: ../nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: ../nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: ../nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" +msgstr "" + +#: ../nova/auth/manager.py:270 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: ../nova/auth/manager.py:277 +#, python-format +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgstr "" + +#: ../nova/auth/manager.py:279 +#, python-format +msgid "No project called %s could be found" +msgstr "" + +#: ../nova/auth/manager.py:287 +#, python-format +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" +msgstr "" + +#: ../nova/auth/manager.py:289 +#, python-format +msgid "User %(uid)s is not a member of project %(pjid)s" +msgstr "" + +#: ../nova/auth/manager.py:298 ../nova/auth/manager.py:309 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: ../nova/auth/manager.py:299 ../nova/auth/manager.py:310 +msgid "Signature does not match" +msgstr "" + +#: ../nova/auth/manager.py:380 +msgid "Must specify project" +msgstr "" + +#: ../nova/auth/manager.py:414 +#, python-format +msgid "The %s role can not be found" +msgstr "" + +#: ../nova/auth/manager.py:416 +#, python-format +msgid "The %s role is global only" +msgstr "" + +#: ../nova/auth/manager.py:420 +#, python-format +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +msgstr "" + +#: ../nova/auth/manager.py:423 +#, python-format +msgid "Adding sitewide role %(role)s to user %(uid)s" +msgstr "" + +#: ../nova/auth/manager.py:448 +#, python-format +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgstr "" + +#: ../nova/auth/manager.py:451 +#, python-format +msgid "Removing sitewide role %(role)s from user %(uid)s" +msgstr "" + +#: ../nova/auth/manager.py:515 +#, python-format +msgid "Created project %(name)s with manager %(manager_user)s" +msgstr "" + +#: ../nova/auth/manager.py:533 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: ../nova/auth/manager.py:545 +#, python-format +msgid "Adding user %(uid)s to project %(pid)s" +msgstr "" + +#: ../nova/auth/manager.py:566 +#, python-format +msgid "Remove user %(uid)s from project %(pid)s" +msgstr "" + +#: ../nova/auth/manager.py:592 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: ../nova/auth/manager.py:650 +#, python-format +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgstr "" + +#: ../nova/auth/manager.py:659 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: ../nova/auth/manager.py:669 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: ../nova/auth/manager.py:671 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: ../nova/auth/manager.py:673 +#, python-format +msgid "Admin status set to %(admin)r for user %(uid)s" +msgstr "" + +#: ../nova/auth/manager.py:722 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: ../nova/service.py:161 +#, python-format +msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgstr "" + +#: ../nova/service.py:174 +msgid "Service killed that has no database entry" +msgstr "" + +#: ../nova/service.py:195 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: ../nova/service.py:207 +msgid "Recovered model server connection!" +msgstr "" + +#: ../nova/service.py:213 +msgid "model server went away" +msgstr "" + +#: ../nova/auth/ldapdriver.py:174 +#, python-format +msgid "LDAP user %s already exists" +msgstr "" + +#: ../nova/auth/ldapdriver.py:205 +#, python-format +msgid "LDAP object for %s doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:348 +#, python-format +msgid "User %s doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:472 +#, python-format +msgid "Group can't be created because group %s already exists" +msgstr "" + +#: ../nova/auth/ldapdriver.py:478 +#, python-format +msgid "Group can't be created because user %s doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:495 +#, python-format +msgid "User %s can't be searched in group because the user doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:507 +#, python-format +msgid "User %s can't be added to the group because the user doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:510 ../nova/auth/ldapdriver.py:521 +#, python-format +msgid "The group at dn %s doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:513 +#, python-format +msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgstr "" + +#: ../nova/auth/ldapdriver.py:524 +#, python-format +msgid "" +"User %s can't be removed from the group because the user doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:528 +#, python-format +msgid "User %s is not a member of the group" +msgstr "" + +#: ../nova/auth/ldapdriver.py:542 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: ../nova/auth/ldapdriver.py:549 +#, python-format +msgid "User %s can't be removed from all because the user doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:564 +#, python-format +msgid "Group at dn %s doesn't exist" +msgstr "" + +#: ../nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: ../nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:97 +#, python-format +msgid "Creating new user: %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:105 +#, python-format +msgid "Deleting user: %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:127 +#, python-format +msgid "Adding role %(role)s to user %(user)s for project %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:131 +#, python-format +msgid "Adding sitewide role %(role)s to user %(user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:137 +#, python-format +msgid "Removing role %(role)s from user %(user)s for project %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:141 +#, python-format +msgid "Removing sitewide role %(role)s from user %(user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:146 ../nova/api/ec2/admin.py:223 +msgid "operation must be add or remove" +msgstr "" + +#: ../nova/api/ec2/admin.py:159 +#, python-format +msgid "Getting x509 for user: %(name)s on project: %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:177 +#, python-format +msgid "Create project %(name)s managed by %(manager_user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:190 +#, python-format +msgid "Modify project: %(name)s managed by %(manager_user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:200 +#, python-format +msgid "Delete project: %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:214 +#, python-format +msgid "Adding user %(user)s to project %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:218 +#, python-format +msgid "Removing user %(user)s from project %(project)s" +msgstr "" + +#, python-format +#~ msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +#~ msgstr "" +#~ "Hindi makita o maabot ang AMQP server sa %s:%d. Muling subukan sa %d segundo." @@ -14,8 +14,8 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-03-19 06:19+0000\n" -"X-Generator: Launchpad (build 12559)\n" +"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n" +"X-Generator: Launchpad (build 13405)\n" #: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 #: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 diff --git a/po/zh_CN.po b/po/zh_CN.po index 9690356f5..c3d292a93 100644 --- a/po/zh_CN.po +++ b/po/zh_CN.po @@ -8,14 +8,14 @@ msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n" "POT-Creation-Date: 2011-02-21 10:03-0500\n" -"PO-Revision-Date: 2011-04-07 05:01+0000\n" -"Last-Translator: ben <Unknown>\n" +"PO-Revision-Date: 2011-06-14 14:44+0000\n" +"Last-Translator: chong <Unknown>\n" "Language-Team: Chinese (Simplified) <zh_CN@li.org>\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-04-08 05:28+0000\n" -"X-Generator: Launchpad (build 12735)\n" +"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n" +"X-Generator: Launchpad (build 13405)\n" #: ../nova/twistd.py:266 #, python-format @@ -26,7 +26,7 @@ msgstr "å¯åЍ %s ä¸" #: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 #: ../nova/scheduler/simple.py:122 msgid "No hosts found" -msgstr "未找到主机" +msgstr "没有找到主机" #: ../nova/exception.py:33 msgid "Unexpected error while running command." @@ -41,6 +41,11 @@ msgid "" "Stdout: %(stdout)r\n" "Stderr: %(stderr)r" msgstr "" +"%(description)s\n" +"命令: %(cmd)s\n" +"退出代ç : %(exit_code)s\n" +"æ ‡å‡†è¾“å‡º: %(stdout)r\n" +"æ ‡å‡†å‡ºé”™: %(stderr)r" #: ../nova/exception.py:107 msgid "DB exception wrapped" @@ -309,17 +314,17 @@ msgstr "" #: ../nova/compute/manager.py:233 ../nova/tests/test_cloud.py:286 #, python-format msgid "Terminating instance %s" -msgstr "" +msgstr "æ£åœ¨ç»“æŸå®žä¾‹ %s" #: ../nova/compute/manager.py:255 #, python-format msgid "Deallocating address %s" -msgstr "" +msgstr "å–æ¶ˆåˆ†é…åœ°å€ %s" #: ../nova/compute/manager.py:268 #, python-format msgid "trying to destroy already destroyed instance: %s" -msgstr "" +msgstr "å°è¯•销æ¯å·²ç»é”€æ¯çš„实例: %s" #: ../nova/compute/manager.py:282 #, python-format @@ -331,12 +336,12 @@ msgstr "é‡å¯è™šæ‹Ÿæœº %s" msgid "" "trying to reboot a non-running instance: %(instance_id)s (state: %(state)s " "expected: %(running)s)" -msgstr "" +msgstr "å°è¯•é‡å¯æ²¡æœ‰åœ¨è¿è¡Œä¸å®žä¾‹: %(instance_id)s (状æ€: %(state)s 预料: %(running)s)" #: ../nova/compute/manager.py:311 #, python-format msgid "instance %s: snapshotting" -msgstr "" +msgstr "实例 %s: å¿«ç…§ä¸" #: ../nova/compute/manager.py:316 #, python-format @@ -351,6 +356,8 @@ msgid "" "trying to reset the password on a non-running instance: %(instance_id)s " "(state: %(instance_state)s expected: %(expected_state)s)" msgstr "" +"å°è¯•对没有在è¿è¡Œçš„实例é‡ç½®å¯†ç : %(instance_id)s (状æ€: %(instance_state)s 预料: " +"%(expected_state)s)" #: ../nova/compute/manager.py:335 #, python-format diff --git a/po/zh_TW.po b/po/zh_TW.po new file mode 100644 index 000000000..ad14c0e32 --- /dev/null +++ b/po/zh_TW.po @@ -0,0 +1,2848 @@ +# Chinese (Traditional) translation for nova +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the nova package. +# FIRST AUTHOR <EMAIL@ADDRESS>, 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: nova\n" +"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n" +"POT-Creation-Date: 2011-02-21 10:03-0500\n" +"PO-Revision-Date: 2011-03-25 07:14+0000\n" +"Last-Translator: Hugo Kou <Unknown>\n" +"Language-Team: Chinese (Traditional) <zh_TW@li.org>\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n" +"X-Generator: Launchpad (build 13405)\n" + +#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 +#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 +#: ../nova/scheduler/simple.py:122 +msgid "No hosts found" +msgstr "找ä¸åˆ°ä¸»æ©Ÿ" + +#: ../nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "éžé 期的執行錯誤" + +#: ../nova/exception.py:36 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: ../nova/exception.py:107 +msgid "DB exception wrapped" +msgstr "" + +#. exc_type, exc_value, exc_traceback = sys.exc_info() +#: ../nova/exception.py:120 +msgid "Uncaught exception" +msgstr "" + +#: ../nova/volume/api.py:45 +#, python-format +msgid "Quota exceeeded for %(pid)s, tried to create %(size)sG volume" +msgstr "" + +#: ../nova/volume/api.py:47 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %sG" +msgstr "Volume è¶…éŽé™åˆ¶ã€‚無法創建 volume å¤§å° %sG" + +#: ../nova/volume/api.py:71 ../nova/volume/api.py:96 +msgid "Volume status must be available" +msgstr "Volume 狀態需è¦å¯è¢«ä½¿ç”¨" + +#: ../nova/volume/api.py:98 +msgid "Volume is already attached" +msgstr "Volume 已掛載" + +#: ../nova/volume/api.py:104 +msgid "Volume is already detached" +msgstr "Volume 已經å¸è¼‰" + +#: ../nova/api/openstack/servers.py:72 +msgid "Failed to read private ip" +msgstr "讀å–private ip 失敗" + +#: ../nova/api/openstack/servers.py:79 +msgid "Failed to read public ip(s)" +msgstr "讀å–public ip 失敗" + +#: ../nova/api/openstack/servers.py:152 +#, python-format +msgid "%(param)s property not found for image %(_image_id)s" +msgstr "" + +#: ../nova/api/openstack/servers.py:168 +msgid "No keypairs defined" +msgstr "沒有定義的金鑰(keypair)é…å°" + +#: ../nova/api/openstack/servers.py:238 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:253 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:267 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:281 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:292 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:303 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:314 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:325 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: ../nova/twistd.py:157 +msgid "Wrong number of arguments." +msgstr "" + +#: ../nova/twistd.py:209 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "pidfile %s ä¸å˜åœ¨. Daemon未啟動?\n" + +#: ../nova/twistd.py:221 +msgid "No such process" +msgstr "沒有æ¤ä¸€ç¨‹åº" + +#: ../nova/twistd.py:230 ../nova/service.py:224 +#, python-format +msgid "Serving %s" +msgstr "" + +#: ../nova/twistd.py:262 ../nova/service.py:225 +msgid "Full set of FLAGS:" +msgstr "" + +#: ../nova/twistd.py:266 +#, python-format +msgid "Starting %s" +msgstr "æ£åœ¨å•Ÿå‹• %s" + +#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 +#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 +#: ../nova/api/ec2/__init__.py:317 +#, python-format +msgid "Instance %s not found" +msgstr "找ä¸åˆ°è™›æ“¬æ©Ÿå™¨ %s" + +#. NOTE: No Resource Pool concept so far +#: ../nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %(instance_name)s, %(device_path)s, %(mountpoint)s" +msgstr "掛載_Volume: %(instance_name)s, %(device_path)s, %(mountpoint)s" + +#: ../nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:80 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:91 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "無法掛載Volume 到虛擬機器 %s" + +#: ../nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "掛載點 %(mountpoint)s 掛載到虛擬機器 %(instance_name)s" + +#. Detach VBD from VM +#: ../nova/virt/xenapi/volumeops.py:104 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "å¸è¼‰_Volume: %(instance_name)s, %(mountpoint)s" + +#: ../nova/virt/xenapi/volumeops.py:112 +#, python-format +msgid "Unable to locate volume %s" +msgstr "找ä¸åˆ°Volume %s" + +#: ../nova/virt/xenapi/volumeops.py:120 +#, python-format +msgid "Unable to detach volume %s" +msgstr "無法å¸è¼‰ Volume %s" + +#: ../nova/virt/xenapi/volumeops.py:127 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "掛載點 %(mountpoint)s 從虛擬機器 %(instance_name)s å¸è¼‰" + +#: ../nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "æœªçŸ¥çš„è™›æ“¬æ©Ÿå™¨è¦æ ¼: %s" + +#: ../nova/crypto.py:46 +msgid "Filename of root CA" +msgstr "" + +#: ../nova/crypto.py:49 +msgid "Filename of private key" +msgstr "Private key ç§é‘°æª”案å稱" + +#: ../nova/crypto.py:51 +msgid "Filename of root Certificate Revokation List" +msgstr "" + +#: ../nova/crypto.py:53 +msgid "Where we keep our keys" +msgstr "" + +#: ../nova/crypto.py:55 +msgid "Where we keep our root CA" +msgstr "" + +#: ../nova/crypto.py:57 +msgid "Should we use a CA for each project?" +msgstr "" + +#: ../nova/crypto.py:61 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" + +#: ../nova/crypto.py:66 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "" + +#: ../nova/crypto.py:71 +#, python-format +msgid "Subject for certificate for vpns, %s for project, timestamp" +msgstr "" + +#: ../nova/crypto.py:258 +#, python-format +msgid "Flags path: %s" +msgstr "" + +#: ../nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %(topic)s %(host)s for %(method)s" +msgstr "" + +#: ../nova/compute/manager.py:78 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: ../nova/compute/manager.py:80 +#, python-format +msgid "" +"check_instance_lock: arguments: |%(self)s| |%(context)s| |%(instance_id)s|" +msgstr "" + +#: ../nova/compute/manager.py:84 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: ../nova/compute/manager.py:86 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: ../nova/compute/manager.py:91 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: ../nova/compute/manager.py:95 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: ../nova/compute/manager.py:179 +msgid "Instance has already been created" +msgstr "" + +#: ../nova/compute/manager.py:180 +#, python-format +msgid "instance %s: starting..." +msgstr "" + +#. pylint: disable=W0702 +#: ../nova/compute/manager.py:219 +#, python-format +msgid "instance %s: Failed to spawn" +msgstr "" + +#: ../nova/compute/manager.py:233 ../nova/tests/test_cloud.py:286 +#, python-format +msgid "Terminating instance %s" +msgstr "" + +#: ../nova/compute/manager.py:255 +#, python-format +msgid "Deallocating address %s" +msgstr "" + +#: ../nova/compute/manager.py:268 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "" + +#: ../nova/compute/manager.py:282 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: ../nova/compute/manager.py:287 +#, python-format +msgid "" +"trying to reboot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" +msgstr "" + +#: ../nova/compute/manager.py:311 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: ../nova/compute/manager.py:316 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" +msgstr "" + +#: ../nova/compute/manager.py:332 +#, python-format +msgid "" +"trying to reset the password on a non-running instance: %(instance_id)s " +"(state: %(instance_state)s expected: %(expected_state)s)" +msgstr "" + +#: ../nova/compute/manager.py:335 +#, python-format +msgid "instance %s: setting admin password" +msgstr "" + +#: ../nova/compute/manager.py:353 +#, python-format +msgid "" +"trying to inject a file into a non-running instance: %(instance_id)s (state: " +"%(instance_state)s expected: %(expected_state)s)" +msgstr "" + +#: ../nova/compute/manager.py:362 +#, python-format +msgid "instance %(nm)s: injecting file to %(plain_path)s" +msgstr "" + +#: ../nova/compute/manager.py:372 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: ../nova/compute/manager.py:387 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: ../nova/compute/manager.py:406 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: ../nova/compute/manager.py:423 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: ../nova/compute/manager.py:440 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: ../nova/compute/manager.py:453 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: ../nova/compute/manager.py:472 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: ../nova/compute/manager.py:491 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: ../nova/compute/manager.py:503 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: ../nova/compute/manager.py:513 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: ../nova/compute/manager.py:526 +#, python-format +msgid "instance %s: reset network" +msgstr "" + +#: ../nova/compute/manager.py:535 ../nova/api/ec2/cloud.py:515 +#, python-format +msgid "Get console output for instance %s" +msgstr "" + +#: ../nova/compute/manager.py:543 +#, python-format +msgid "instance %s: getting ajax console" +msgstr "" + +#: ../nova/compute/manager.py:553 +#, python-format +msgid "" +"instance %(instance_id)s: attaching volume %(volume_id)s to %(mountpoint)s" +msgstr "" + +#. pylint: disable=W0702 +#. NOTE(vish): The inline callback eats the exception info so we +#. log the traceback here and reraise the same +#. ecxception below. +#: ../nova/compute/manager.py:569 +#, python-format +msgid "instance %(instance_id)s: attach failed %(mountpoint)s, removing" +msgstr "" + +#: ../nova/compute/manager.py:585 +#, python-format +msgid "" +"Detach volume %(volume_id)s from mountpoint %(mp)s on instance " +"%(instance_id)s" +msgstr "" + +#: ../nova/compute/manager.py:588 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: ../nova/scheduler/simple.py:53 +#, python-format +msgid "Host %s is not alive" +msgstr "" + +#: ../nova/scheduler/simple.py:65 +msgid "All hosts have too many cores" +msgstr "" + +#: ../nova/scheduler/simple.py:87 +#, python-format +msgid "Host %s not available" +msgstr "" + +#: ../nova/scheduler/simple.py:99 +msgid "All hosts have too many gigabytes" +msgstr "" + +#: ../nova/scheduler/simple.py:119 +msgid "All hosts have too many networks" +msgstr "" + +#: ../nova/volume/manager.py:85 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: ../nova/volume/manager.py:90 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: ../nova/volume/manager.py:96 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: ../nova/volume/manager.py:108 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: ../nova/volume/manager.py:112 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: ../nova/volume/manager.py:123 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: ../nova/volume/manager.py:131 +msgid "Volume is still attached" +msgstr "" + +#: ../nova/volume/manager.py:133 +msgid "Volume is not local to this node" +msgstr "" + +#: ../nova/volume/manager.py:136 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: ../nova/volume/manager.py:138 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: ../nova/volume/manager.py:147 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:74 +#, python-format +msgid "%(text)s: _db_content => %(content)s" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:304 ../nova/virt/xenapi/fake.py:404 +#: ../nova/virt/xenapi/fake.py:422 ../nova/virt/xenapi/fake.py:478 +msgid "Raising NotImplemented" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:306 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:341 +#, python-format +msgid "Calling %(localname)s %(impl)s" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:346 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:406 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: ../nova/tests/test_cloud.py:256 +msgid "Can't test instances without a real virtual env." +msgstr "" + +#: ../nova/tests/test_cloud.py:268 +#, python-format +msgid "Need to watch instance %s until it's running..." +msgstr "" + +#: ../nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: ../nova/network/linux_net.py:187 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: ../nova/network/linux_net.py:208 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:314 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: ../nova/network/linux_net.py:316 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:358 +#, python-format +msgid "killing radvd threw %s" +msgstr "" + +#: ../nova/network/linux_net.py:360 +#, python-format +msgid "Pid %d is stale, relaunching radvd" +msgstr "" + +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:449 +#, python-format +msgid "Killing dnsmasq threw %s" +msgstr "" + +#: ../nova/utils.py:58 +#, python-format +msgid "Inner Exception: %s" +msgstr "" + +#: ../nova/utils.py:59 +#, python-format +msgid "Class %s cannot be found" +msgstr "" + +#: ../nova/utils.py:118 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: ../nova/utils.py:130 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: ../nova/utils.py:143 ../nova/utils.py:183 +#, python-format +msgid "Result was %s" +msgstr "" + +#: ../nova/utils.py:159 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: ../nova/utils.py:217 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: ../nova/utils.py:222 +#, python-format +msgid "Running %s" +msgstr "" + +#: ../nova/utils.py:262 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: ../nova/utils.py:265 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: ../nova/utils.py:363 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: ../nova/utils.py:374 +#, python-format +msgid "backend %s" +msgstr "" + +#: ../nova/fakerabbit.py:49 +#, python-format +msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +msgstr "" + +#: ../nova/fakerabbit.py:54 +#, python-format +msgid "Publishing to route %s" +msgstr "" + +#: ../nova/fakerabbit.py:84 +#, python-format +msgid "Declaring queue %s" +msgstr "" + +#: ../nova/fakerabbit.py:90 +#, python-format +msgid "Declaring exchange %s" +msgstr "" + +#: ../nova/fakerabbit.py:96 +#, python-format +msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +msgstr "" + +#: ../nova/fakerabbit.py:121 +#, python-format +msgid "Getting from %(queue)s: %(message)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:135 ../nova/virt/hyperv.py:171 +#, python-format +msgid "Created VM %s..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:138 +#, python-format +msgid "Created VM %(instance_name)s as %(vm_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:168 +#, python-format +msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:171 +#, python-format +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:197 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:209 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:224 +#, python-format +msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:227 +#, python-format +msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:246 +#, python-format +msgid "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) on " +"%(sr_ref)s." +msgstr "" + +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vm_utils.py:258 +#, python-format +msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:272 +#, python-format +msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:286 +#, python-format +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:327 +#, python-format +msgid "Size for image %(image)s:%(virtual_size)d" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:332 +#, python-format +msgid "Glance image %s" +msgstr "" + +#. we need to invoke a plugin for copying VDI's +#. content into proper path +#: ../nova/virt/xenapi/vm_utils.py:342 +#, python-format +msgid "Copying VDI %s to /boot/guest on dom0" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:352 +#, python-format +msgid "Kernel/Ramdisk VDI %s destroyed" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:361 +#, python-format +msgid "Asking xapi to fetch %(url)s as %(access)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:386 ../nova/virt/xenapi/vm_utils.py:402 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:397 +#, python-format +msgid "PV Kernel in VDI:%s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:405 +#, python-format +msgid "Running pygrub against %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:411 +#, python-format +msgid "Found Xen kernel %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:413 +msgid "No Xen kernel found. Booting HVM." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:425 ../nova/virt/hyperv.py:431 +#, python-format +msgid "duplicate name found: %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:442 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:463 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:465 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:525 +#, python-format +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:542 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:567 +#, python-format +msgid "" +"VHD coalesce attempts exceeded (%(counter)d > %(max_attempts)d), giving up..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:574 +#, python-format +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:590 +#, python-format +msgid "No VDIs found for VM %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:594 +#, python-format +msgid "Unexpected number of VDIs (%(num_vdis)s) found for VM %(vm_ref)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:653 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:188 +#, python-format +msgid "Creating VBD for VDI %s ... " +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:655 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:190 +#, python-format +msgid "Creating VBD for VDI %s done." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:657 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:192 +#, python-format +msgid "Plugging VBD %s ... " +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:659 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:194 +#, python-format +msgid "Plugging VBD %s done." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:661 +#, python-format +msgid "VBD %(vbd)s plugged as %(orig_dev)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:664 +#, python-format +msgid "VBD %(vbd)s plugged into wrong dev, remapping to %(dev)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:668 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:197 +#, python-format +msgid "Destroying VBD for VDI %s ... " +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:671 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:200 +#, python-format +msgid "Destroying VBD for VDI %s done." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:683 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:211 +msgid "VBD.unplug successful first time." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:688 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:216 +msgid "VBD.unplug rejected: retrying..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:692 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:220 +msgid "VBD.unplug successful eventually." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:695 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:223 +#, python-format +msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:704 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:66 +#, python-format +msgid "Ignoring XenAPI.Failure %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:735 +#, python-format +msgid "" +"Writing partition table %(primary_first)d %(primary_last)d to %(dest)s..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:747 +#, python-format +msgid "Writing partition table %s done." +msgstr "" + +#: ../nova/tests/test_rpc.py:89 +#, python-format +msgid "Nested received %(queue)s, %(value)s" +msgstr "" + +#: ../nova/tests/test_rpc.py:95 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: ../nova/tests/test_rpc.py:120 ../nova/tests/test_rpc.py:126 +#, python-format +msgid "Received %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:44 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:133 +#, python-format +msgid "No service for id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:251 +#, python-format +msgid "No service for %(host)s, %(binary)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:592 +msgid "No fixed ips defined" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:608 +#, python-format +msgid "No floating ip for address %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:629 +#, python-format +msgid "No address for instance %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:961 +#, python-format +msgid "no keypair for user %(user_id)s, name %(name)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1076 ../nova/db/sqlalchemy/api.py:1156 +#, python-format +msgid "No network for id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1086 +msgid "No networks defined" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1115 +#, python-format +msgid "No network for bridge %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1129 ../nova/db/sqlalchemy/api.py:1142 +#, python-format +msgid "No network for instance %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1277 +#, python-format +msgid "Token %s does not exist" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1302 +#, python-format +msgid "No quota for project_id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1455 ../nova/db/sqlalchemy/api.py:1501 +#: ../nova/api/ec2/__init__.py:323 +#, python-format +msgid "Volume %s not found" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1514 +#, python-format +msgid "No export device found for volume %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1527 +#, python-format +msgid "No target id found for volume %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1572 +#, python-format +msgid "No security group with id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1589 +#, python-format +msgid "No security group named %(group_name)s for project: %(project_id)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1682 +#, python-format +msgid "No secuity group rule with id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1756 +#, python-format +msgid "No user for id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1772 +#, python-format +msgid "No user for access key %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1834 +#, python-format +msgid "No project with id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1979 +#, python-format +msgid "No console pool with id %(pool_id)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1996 +#, python-format +msgid "" +"No console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:2035 +#, python-format +msgid "No console for instance %(instance_id)s in pool %(pool_id)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:2057 +#, python-format +msgid "on instance %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:2058 +#, python-format +msgid "No console with id %(console_id)s %(idesc)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:2078 ../nova/db/sqlalchemy/api.py:2097 +#, python-format +msgid "No zone with id %(zone_id)s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:160 +#, python-format +msgid "Checking state of %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:165 +#, python-format +msgid "Current state of %(name)s was %(state)s." +msgstr "" + +#: ../nova/virt/libvirt_conn.py:183 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:196 +msgid "Connection to libvirt broke" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:258 +#, python-format +msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:283 +#, python-format +msgid "Invalid device path %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:313 +#, python-format +msgid "No disk at %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:320 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:336 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:339 +#, python-format +msgid "_wait_for_reboot failed: %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:382 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:385 +#, python-format +msgid "_wait_for_rescue failed: %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:411 +#, python-format +msgid "instance %s: is running" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:422 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:425 ../nova/virt/xenapi/vmops.py:186 +#, python-format +msgid "instance %s: failed to boot" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:436 +#, python-format +msgid "virsh said: %r" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:440 +msgid "cool, it's a device" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:448 +#, python-format +msgid "data: %(data)r, fpath: %(fpath)r" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:456 +#, python-format +msgid "Contents of file %(fpath)s: %(contents)r" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:489 +msgid "Unable to find an open port" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:563 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:646 +#, python-format +msgid "instance %(inst_name)s: injecting key into image %(img_id)s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:649 +#, python-format +msgid "instance %(inst_name)s: injecting net into image %(img_id)s" +msgstr "" + +#. This could be a windows image, or a vmdk format disk +#: ../nova/virt/libvirt_conn.py:657 +#, python-format +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image %(img_id)s " +"(%(e)s)" +msgstr "" + +#. TODO(termie): cache? +#: ../nova/virt/libvirt_conn.py:665 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:732 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:751 +msgid "diagnostics are not supported for libvirt" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:1225 +#, python-format +msgid "Attempted to unfilter instance %s which is not filtered" +msgstr "" + +#: ../nova/api/ec2/metadatarequesthandler.py:76 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: ../nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: ../nova/network/api.py:39 +#, python-format +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "" + +#: ../nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" + +#: ../nova/tests/test_volume.py:162 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: ../nova/virt/images.py:70 +#, python-format +msgid "Finished retreving %(url)s -- placed in %(path)s" +msgstr "" + +#: ../nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "" + +#: ../nova/console/manager.py:70 +msgid "Adding console" +msgstr "" + +#: ../nova/console/manager.py:90 +#, python-format +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "" + +#: ../nova/api/direct.py:149 +msgid "not available" +msgstr "" + +#: ../nova/api/ec2/cloud.py:62 +#, python-format +msgid "The key_pair %s already exists" +msgstr "" + +#. TODO(vish): Do this with M2Crypto instead +#: ../nova/api/ec2/cloud.py:118 +#, python-format +msgid "Generating root CA: %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:303 +#, python-format +msgid "Create key pair %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:311 +#, python-format +msgid "Delete key pair %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:386 +#, python-format +msgid "%s is not a valid ipProtocol" +msgstr "" + +#: ../nova/api/ec2/cloud.py:390 +msgid "Invalid port range" +msgstr "" + +#: ../nova/api/ec2/cloud.py:421 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:430 ../nova/api/ec2/cloud.py:459 +msgid "Not enough parameters to build a valid rule." +msgstr "" + +#: ../nova/api/ec2/cloud.py:443 +msgid "No rule for the specified parameters." +msgstr "" + +#: ../nova/api/ec2/cloud.py:450 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:464 +#, python-format +msgid "This rule already exists in group %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:492 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:495 +#, python-format +msgid "group %s already exists" +msgstr "" + +#: ../nova/api/ec2/cloud.py:507 +#, python-format +msgid "Delete security group %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:584 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: ../nova/api/ec2/cloud.py:612 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:629 +#, python-format +msgid "Detach volume %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:761 +msgid "Allocate address" +msgstr "" + +#: ../nova/api/ec2/cloud.py:766 +#, python-format +msgid "Release address %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:771 +#, python-format +msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:780 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:807 +msgid "Going to start terminating instances" +msgstr "" + +#: ../nova/api/ec2/cloud.py:815 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: ../nova/api/ec2/cloud.py:867 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:875 +#, python-format +msgid "Registered image %(image_location)s with id %(image_id)s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:882 ../nova/api/ec2/cloud.py:900 +#, python-format +msgid "attribute not supported: %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:890 +#, python-format +msgid "invalid id: %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:903 +msgid "user or group not specified" +msgstr "" + +#: ../nova/api/ec2/cloud.py:905 +msgid "only group \"all\" is supported" +msgstr "" + +#: ../nova/api/ec2/cloud.py:907 +msgid "operation_type must be add or remove" +msgstr "" + +#: ../nova/api/ec2/cloud.py:908 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: ../bin/nova-api.py:52 +#, python-format +msgid "Using paste.deploy config at: %s" +msgstr "" + +#: ../bin/nova-api.py:57 +#, python-format +msgid "No paste configuration for app: %s" +msgstr "" + +#: ../bin/nova-api.py:59 +#, python-format +msgid "" +"App Config: %(api)s\n" +"%(config)r" +msgstr "" + +#: ../bin/nova-api.py:64 +#, python-format +msgid "Running %s API" +msgstr "" + +#: ../bin/nova-api.py:69 +#, python-format +msgid "No known API applications configured in %s." +msgstr "" + +#: ../bin/nova-api.py:83 +#, python-format +msgid "Starting nova-api node (version %s)" +msgstr "" + +#: ../bin/nova-api.py:89 +#, python-format +msgid "No paste configuration found for: %s" +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:84 +#, python-format +msgid "Argument %(key)s value %(value)s is too short." +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:89 +#, python-format +msgid "Argument %(key)s value %(value)s contains invalid characters." +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:94 +#, python-format +msgid "Argument %(key)s value %(value)s starts with a hyphen." +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:102 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:130 +#, python-format +msgid "Argument %s is required." +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:117 +#, python-format +msgid "" +"Argument %(key)s may not take value %(value)s. Valid values are ['true', " +"'false']." +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:163 +#, python-format +msgid "" +"Created VDI %(vdi_ref)s (%(label)s, %(size)s, %(read_only)s) on %(sr_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:67 +#, python-format +msgid "Attempted to create non-unique name %s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:73 +#, python-format +msgid "instance %(name)s: not enough free memory" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:148 +#, python-format +msgid "Starting VM %s..." +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:151 +#, python-format +msgid "Spawning VM %(instance_name)s created %(vm_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:162 +#, python-format +msgid "Invalid value for onset_files: '%s'" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:167 +#, python-format +msgid "Injecting file path: '%s'" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:180 +#, python-format +msgid "Instance %s: booted" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:232 +#, python-format +msgid "Instance not present %s" +msgstr "" + +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vmops.py:261 +#, python-format +msgid "Starting snapshot for VM %s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:269 +#, python-format +msgid "Unable to Snapshot %(vm_ref)s: %(exc)s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:280 +#, python-format +msgid "Finished snapshot and upload for VM %s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:356 +#, python-format +msgid "VM %(vm)s already halted, skipping shutdown..." +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:389 +msgid "Removing kernel/ramdisk files" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:399 +msgid "kernel/ramdisk files removed" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:561 +#, python-format +msgid "" +"TIMEOUT: The call to %(method)s timed out. VM id=%(instance_id)s; " +"args=%(strargs)s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:564 +#, python-format +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. VM " +"id=%(instance_id)s; args=%(strargs)s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:569 +#, python-format +msgid "" +"The call to %(method)s returned an error: %(e)s. VM id=%(instance_id)s; " +"args=%(strargs)s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:760 +#, python-format +msgid "OpenSSL error: %s" +msgstr "" + +#: ../nova/tests/test_compute.py:148 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: ../nova/tests/test_compute.py:154 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:97 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: ../nova/db/sqlalchemy/migration.py:35 +msgid "python-migrate is not installed. Exiting." +msgstr "" + +#: ../nova/image/s3.py:99 +#, python-format +msgid "Image %s could not be found" +msgstr "" + +#: ../nova/api/ec2/__init__.py:121 +msgid "Too many failed authentications." +msgstr "" + +#: ../nova/api/ec2/__init__.py:131 +#, python-format +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and " +"will be locked out for %(lock_mins)d minutes." +msgstr "" + +#: ../nova/api/ec2/__init__.py:169 ../nova/objectstore/handler.py:140 +#, python-format +msgid "Authentication Failure: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:182 +#, python-format +msgid "Authenticated Request For %(uname)s:%(pname)s)" +msgstr "" + +#: ../nova/api/ec2/__init__.py:207 +#, python-format +msgid "action: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:209 +#, python-format +msgid "arg: %(key)s\t\tval: %(value)s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:281 +#, python-format +msgid "" +"Unauthorized request for controller=%(controller)s and action=%(action)s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:314 +#, python-format +msgid "InstanceNotFound raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:320 +#, python-format +msgid "VolumeNotFound raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:326 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:329 +#, python-format +msgid "ApiError raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:338 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:343 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: ../nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" +msgstr "" + +#: ../nova/auth/dbdriver.py:106 ../nova/auth/ldapdriver.py:232 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "" + +#: ../nova/auth/dbdriver.py:122 ../nova/auth/ldapdriver.py:243 +#, python-format +msgid "Project can't be created because user %s doesn't exist" +msgstr "" + +#: ../nova/auth/dbdriver.py:135 ../nova/auth/ldapdriver.py:229 +#, python-format +msgid "Project can't be created because project %s already exists" +msgstr "" + +#: ../nova/auth/dbdriver.py:157 ../nova/auth/ldapdriver.py:268 +#, python-format +msgid "Project can't be modified because manager %s doesn't exist" +msgstr "" + +#: ../nova/auth/dbdriver.py:245 +#, python-format +msgid "User \"%s\" not found" +msgstr "" + +#: ../nova/auth/dbdriver.py:248 +#, python-format +msgid "Project \"%s\" not found" +msgstr "" + +#: ../nova/virt/xenapi_conn.py:129 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" +msgstr "" + +#: ../nova/virt/xenapi_conn.py:311 +#, python-format +msgid "Task [%(name)s] %(task)s status: success %(result)s" +msgstr "" + +#: ../nova/virt/xenapi_conn.py:317 +#, python-format +msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +msgstr "" + +#: ../nova/virt/xenapi_conn.py:331 ../nova/virt/xenapi_conn.py:344 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: ../nova/compute/monitor.py:259 +#, python-format +msgid "updating %s..." +msgstr "" + +#: ../nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "" + +#: ../nova/compute/monitor.py:356 +#, python-format +msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" +msgstr "" + +#: ../nova/compute/monitor.py:379 +#, python-format +msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" +msgstr "" + +#: ../nova/compute/monitor.py:414 +msgid "unexpected exception getting connection" +msgstr "" + +#: ../nova/compute/monitor.py:429 +#, python-format +msgid "Found instance: %s" +msgstr "" + +#: ../nova/volume/san.py:67 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: ../nova/api/ec2/apirequest.py:100 +#, python-format +msgid "" +"Unsupported API request: controller = %(controller)s, action = %(action)s" +msgstr "" + +#: ../nova/api/openstack/__init__.py:55 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: ../nova/api/openstack/__init__.py:76 +msgid "Including admin operations in API." +msgstr "" + +#: ../nova/console/xvp.py:99 +msgid "Rebuilding xvp conf" +msgstr "" + +#: ../nova/console/xvp.py:116 +#, python-format +msgid "Re-wrote %s" +msgstr "" + +#: ../nova/console/xvp.py:121 +msgid "Stopping xvp" +msgstr "" + +#: ../nova/console/xvp.py:134 +msgid "Starting xvp" +msgstr "" + +#: ../nova/console/xvp.py:141 +#, python-format +msgid "Error starting xvp: %s" +msgstr "" + +#: ../nova/console/xvp.py:144 +msgid "Restarting xvp" +msgstr "" + +#: ../nova/console/xvp.py:146 +msgid "xvp not running..." +msgstr "" + +#: ../bin/nova-manage.py:272 +msgid "" +"The above error may show that the database has not been created.\n" +"Please create a database using nova-manage sync db before running this " +"command." +msgstr "" + +#: ../bin/nova-manage.py:426 +msgid "" +"No more networks available. If this is a new installation, you need\n" +"to call something like this:\n" +"\n" +" nova-manage network create 10.0.0.0/8 10 64\n" +"\n" +msgstr "" + +#: ../bin/nova-manage.py:431 +msgid "" +"The above error may show that the certificate db has not been created.\n" +"Please create a database by running a nova-api server on this host." +msgstr "" + +#: ../bin/nova-manage.py:447 ../bin/nova-manage.py:536 +msgid "network" +msgstr "" + +#: ../bin/nova-manage.py:448 +msgid "IP address" +msgstr "" + +#: ../bin/nova-manage.py:449 +msgid "MAC address" +msgstr "" + +#: ../bin/nova-manage.py:450 +msgid "hostname" +msgstr "" + +#: ../bin/nova-manage.py:451 +msgid "host" +msgstr "" + +#: ../bin/nova-manage.py:537 +msgid "netmask" +msgstr "" + +#: ../bin/nova-manage.py:538 +msgid "start address" +msgstr "" + +#: ../nova/virt/disk.py:69 +#, python-format +msgid "Failed to load partition: %s" +msgstr "" + +#: ../nova/virt/disk.py:91 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: ../nova/virt/disk.py:124 +#, python-format +msgid "nbd device %s did not show up" +msgstr "" + +#: ../nova/virt/disk.py:128 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: ../nova/virt/disk.py:151 +msgid "No free nbd devices" +msgstr "" + +#: ../doc/ext/nova_todo.py:46 +#, python-format +msgid "%(filename)s, line %(line_info)d" +msgstr "" + +#. FIXME(chiradeep): implement this +#: ../nova/virt/hyperv.py:118 +msgid "In init host" +msgstr "" + +#: ../nova/virt/hyperv.py:131 +#, python-format +msgid "Attempt to create duplicate vm %s" +msgstr "" + +#: ../nova/virt/hyperv.py:148 +#, python-format +msgid "Starting VM %s " +msgstr "" + +#: ../nova/virt/hyperv.py:150 +#, python-format +msgid "Started VM %s " +msgstr "" + +#: ../nova/virt/hyperv.py:152 +#, python-format +msgid "spawn vm failed: %s" +msgstr "" + +#: ../nova/virt/hyperv.py:169 +#, python-format +msgid "Failed to create VM %s" +msgstr "" + +#: ../nova/virt/hyperv.py:188 +#, python-format +msgid "Set memory for vm %s..." +msgstr "" + +#: ../nova/virt/hyperv.py:198 +#, python-format +msgid "Set vcpus for vm %s..." +msgstr "" + +#: ../nova/virt/hyperv.py:202 +#, python-format +msgid "Creating disk for %(vm_name)s by attaching disk file %(vhdfile)s" +msgstr "" + +#: ../nova/virt/hyperv.py:227 +#, python-format +msgid "Failed to add diskdrive to VM %s" +msgstr "" + +#: ../nova/virt/hyperv.py:230 +#, python-format +msgid "New disk drive path is %s" +msgstr "" + +#: ../nova/virt/hyperv.py:247 +#, python-format +msgid "Failed to add vhd file to VM %s" +msgstr "" + +#: ../nova/virt/hyperv.py:249 +#, python-format +msgid "Created disk for %s" +msgstr "" + +#: ../nova/virt/hyperv.py:253 +#, python-format +msgid "Creating nic for %s " +msgstr "" + +#: ../nova/virt/hyperv.py:272 +msgid "Failed creating a port on the external vswitch" +msgstr "" + +#: ../nova/virt/hyperv.py:273 +#, python-format +msgid "Failed creating port for %s" +msgstr "" + +#: ../nova/virt/hyperv.py:276 +#, python-format +msgid "Created switch port %(vm_name)s on switch %(ext_path)s" +msgstr "" + +#: ../nova/virt/hyperv.py:286 +#, python-format +msgid "Failed to add nic to VM %s" +msgstr "" + +#: ../nova/virt/hyperv.py:288 +#, python-format +msgid "Created nic for %s " +msgstr "" + +#: ../nova/virt/hyperv.py:321 +#, python-format +msgid "WMI job failed: %s" +msgstr "" + +#: ../nova/virt/hyperv.py:325 +#, python-format +msgid "WMI job succeeded: %(desc)s, Elapsed=%(elap)s " +msgstr "" + +#: ../nova/virt/hyperv.py:361 +#, python-format +msgid "Got request to destroy vm %s" +msgstr "" + +#: ../nova/virt/hyperv.py:386 +#, python-format +msgid "Failed to destroy vm %s" +msgstr "" + +#: ../nova/virt/hyperv.py:393 +#, python-format +msgid "Del: disk %(vhdfile)s vm %(instance_name)s" +msgstr "" + +#: ../nova/virt/hyperv.py:415 +#, python-format +msgid "" +"Got Info for vm %(instance_id)s: state=%(state)s, mem=%(memusage)s, " +"num_cpu=%(numprocs)s, cpu_time=%(uptime)s" +msgstr "" + +#: ../nova/virt/hyperv.py:451 +#, python-format +msgid "Successfully changed vm state of %(vm_name)s to %(req_state)s" +msgstr "" + +#: ../nova/virt/hyperv.py:454 +#, python-format +msgid "Failed to change vm state of %(vm_name)s to %(req_state)s" +msgstr "" + +#: ../nova/compute/api.py:71 +#, python-format +msgid "Instance %d was not found in get_network_topic" +msgstr "" + +#: ../nova/compute/api.py:77 +#, python-format +msgid "Instance %d has no host" +msgstr "" + +#: ../nova/compute/api.py:97 +#, python-format +msgid "Quota exceeeded for %(pid)s, tried to run %(min_count)s instances" +msgstr "" + +#: ../nova/compute/api.py:99 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." +msgstr "" + +#: ../nova/compute/api.py:112 +msgid "Creating a raw instance" +msgstr "" + +#: ../nova/compute/api.py:160 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: ../nova/compute/api.py:187 +#, python-format +msgid "Casting to scheduler for %(pid)s/%(uid)s's instance %(instance_id)s" +msgstr "" + +#: ../nova/compute/api.py:292 +#, python-format +msgid "Going to try to terminate %s" +msgstr "" + +#: ../nova/compute/api.py:296 +#, python-format +msgid "Instance %d was not found during terminate" +msgstr "" + +#: ../nova/compute/api.py:301 +#, python-format +msgid "Instance %d is already being terminated" +msgstr "" + +#: ../nova/compute/api.py:481 +#, python-format +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "" + +#: ../nova/compute/api.py:496 +msgid "Volume isn't attached to anything!" +msgstr "" + +#: ../nova/rpc.py:98 +#, python-format +msgid "" +"AMQP server on %(fl_host)s:%(fl_port)d is unreachable. Trying again in " +"%(fl_intv)d seconds." +msgstr "" + +#: ../nova/rpc.py:103 +#, python-format +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "" + +#: ../nova/rpc.py:122 +msgid "Reconnected to queue" +msgstr "" + +#: ../nova/rpc.py:129 +msgid "Failed to fetch message from queue" +msgstr "" + +#: ../nova/rpc.py:159 +#, python-format +msgid "Initing the Adapter Consumer for %s" +msgstr "" + +#: ../nova/rpc.py:178 +#, python-format +msgid "received %s" +msgstr "" + +#. NOTE(vish): we may not want to ack here, but that means that bad +#. messages stay in the queue indefinitely, so for now +#. we just log the message and send an error string +#. back to the caller +#: ../nova/rpc.py:191 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: ../nova/rpc.py:192 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: ../nova/rpc.py:253 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: ../nova/rpc.py:294 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: ../nova/rpc.py:313 +msgid "Making asynchronous call..." +msgstr "" + +#: ../nova/rpc.py:316 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: ../nova/rpc.py:354 +msgid "Making asynchronous cast..." +msgstr "" + +#: ../nova/rpc.py:364 +#, python-format +msgid "response %s" +msgstr "" + +#: ../nova/rpc.py:373 +#, python-format +msgid "topic is %s" +msgstr "" + +#: ../nova/rpc.py:374 +#, python-format +msgid "message %s" +msgstr "" + +#: ../nova/volume/driver.py:78 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: ../nova/volume/driver.py:87 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: ../nova/volume/driver.py:220 +#, python-format +msgid "FAKE AOE: %s" +msgstr "" + +#: ../nova/volume/driver.py:233 +msgid "Skipping ensure_export. No iscsi_target " +msgstr "" + +#: ../nova/volume/driver.py:279 ../nova/volume/driver.py:288 +msgid "Skipping remove_export. No iscsi_target " +msgstr "" + +#: ../nova/volume/driver.py:347 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: ../nova/volume/driver.py:359 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: ../nova/volume/driver.py:414 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: ../nova/volume/driver.py:416 +msgid "Sheepdog is not working" +msgstr "" + +#: ../nova/wsgi.py:68 +#, python-format +msgid "Starting %(arg0)s on %(host)s:%(port)s" +msgstr "" + +#: ../nova/wsgi.py:147 +msgid "You must implement __call__" +msgstr "" + +#: ../bin/nova-instancemonitor.py:55 +msgid "Starting instance monitor" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:58 +msgid "leasing ip" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:73 +msgid "Adopted old lease or got a change of mac/hostname" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:80 +msgid "releasing ip" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:123 +#, python-format +msgid "" +"Called %(action)s for mac %(mac)s with ip %(ip)s and hostname %(hostname)s " +"on interface %(interface)s" +msgstr "" + +#: ../nova/virt/fake.py:239 +#, python-format +msgid "Instance %s Not Found" +msgstr "" + +#: ../nova/network/manager.py:153 +#, python-format +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "" + +#: ../nova/network/manager.py:157 +msgid "setting network host" +msgstr "" + +#: ../nova/network/manager.py:212 +#, python-format +msgid "Leasing IP %s" +msgstr "" + +#: ../nova/network/manager.py:216 +#, python-format +msgid "IP %s leased that isn't associated" +msgstr "" + +#: ../nova/network/manager.py:220 +#, python-format +msgid "IP %(address)s leased to bad mac %(inst_addr)s vs %(mac)s" +msgstr "" + +#: ../nova/network/manager.py:228 +#, python-format +msgid "IP %s leased that was already deallocated" +msgstr "" + +#: ../nova/network/manager.py:233 +#, python-format +msgid "Releasing IP %s" +msgstr "" + +#: ../nova/network/manager.py:237 +#, python-format +msgid "IP %s released that isn't associated" +msgstr "" + +#: ../nova/network/manager.py:241 +#, python-format +msgid "IP %(address)s released from bad mac %(inst_addr)s vs %(mac)s" +msgstr "" + +#: ../nova/network/manager.py:244 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: ../nova/network/manager.py:519 +msgid "" +"The sum between the number of networks and the vlan start cannot be greater " +"than 4094" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:57 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:74 +#, python-format +msgid "Introduced %(label)s as %(sr_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:90 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:96 +#, python-format +msgid "Forgetting SR %s ... " +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:101 +#, python-format +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:107 +#, python-format +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:111 +#, python-format +msgid "Forgetting SR %s done." +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:113 +#, python-format +msgid "Ignoring exception %(exc)s when forgetting SR %(sr_ref)s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:123 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:128 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:146 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:175 +#, python-format +msgid "Unable to obtain target information %(device_path)s, %(mountpoint)s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:197 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: ../nova/objectstore/image.py:262 +#, python-format +msgid "Failed to decrypt private key: %s" +msgstr "" + +#: ../nova/objectstore/image.py:269 +#, python-format +msgid "Failed to decrypt initialization vector: %s" +msgstr "" + +#: ../nova/objectstore/image.py:277 +#, python-format +msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgstr "" + +#: ../nova/objectstore/handler.py:106 +#, python-format +msgid "Unknown S3 value type %r" +msgstr "" + +#: ../nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "" + +#: ../nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "" + +#: ../nova/objectstore/handler.py:209 +#, python-format +msgid "List keys for bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:217 +#, python-format +msgid "Unauthorized attempt to access bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:235 +#, python-format +msgid "Creating bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:245 +#, python-format +msgid "Deleting bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:249 +#, python-format +msgid "Unauthorized attempt to delete bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:273 +#, python-format +msgid "Getting object: %(bname)s / %(nm)s" +msgstr "" + +#: ../nova/objectstore/handler.py:276 +#, python-format +msgid "Unauthorized attempt to get object %(nm)s from bucket %(bname)s" +msgstr "" + +#: ../nova/objectstore/handler.py:296 +#, python-format +msgid "Putting object: %(bname)s / %(nm)s" +msgstr "" + +#: ../nova/objectstore/handler.py:299 +#, python-format +msgid "Unauthorized attempt to upload object %(nm)s to bucket %(bname)s" +msgstr "" + +#: ../nova/objectstore/handler.py:318 +#, python-format +msgid "Deleting object: %(bname)s / %(nm)s" +msgstr "" + +#: ../nova/objectstore/handler.py:322 +#, python-format +msgid "Unauthorized attempt to delete object %(nm)s from bucket %(bname)s" +msgstr "" + +#: ../nova/objectstore/handler.py:396 +#, python-format +msgid "Not authorized to upload image: invalid directory %s" +msgstr "" + +#: ../nova/objectstore/handler.py:404 +#, python-format +msgid "Not authorized to upload image: unauthorized bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:409 +#, python-format +msgid "Starting image upload: %s" +msgstr "" + +#: ../nova/objectstore/handler.py:423 +#, python-format +msgid "Not authorized to update attributes of image %s" +msgstr "" + +#: ../nova/objectstore/handler.py:431 +#, python-format +msgid "Toggling publicity flag of image %(image_id)s %(newstatus)r" +msgstr "" + +#. other attributes imply update +#: ../nova/objectstore/handler.py:436 +#, python-format +msgid "Updating user fields on image %s" +msgstr "" + +#: ../nova/objectstore/handler.py:450 +#, python-format +msgid "Unauthorized attempt to delete image %s" +msgstr "" + +#: ../nova/objectstore/handler.py:455 +#, python-format +msgid "Deleted image: %s" +msgstr "" + +#: ../nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: ../nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: ../nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" +msgstr "" + +#: ../nova/auth/manager.py:270 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: ../nova/auth/manager.py:277 +#, python-format +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgstr "" + +#: ../nova/auth/manager.py:279 +#, python-format +msgid "No project called %s could be found" +msgstr "" + +#: ../nova/auth/manager.py:287 +#, python-format +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" +msgstr "" + +#: ../nova/auth/manager.py:289 +#, python-format +msgid "User %(uid)s is not a member of project %(pjid)s" +msgstr "" + +#: ../nova/auth/manager.py:298 ../nova/auth/manager.py:309 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: ../nova/auth/manager.py:299 ../nova/auth/manager.py:310 +msgid "Signature does not match" +msgstr "" + +#: ../nova/auth/manager.py:380 +msgid "Must specify project" +msgstr "" + +#: ../nova/auth/manager.py:414 +#, python-format +msgid "The %s role can not be found" +msgstr "" + +#: ../nova/auth/manager.py:416 +#, python-format +msgid "The %s role is global only" +msgstr "" + +#: ../nova/auth/manager.py:420 +#, python-format +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +msgstr "" + +#: ../nova/auth/manager.py:423 +#, python-format +msgid "Adding sitewide role %(role)s to user %(uid)s" +msgstr "" + +#: ../nova/auth/manager.py:448 +#, python-format +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgstr "" + +#: ../nova/auth/manager.py:451 +#, python-format +msgid "Removing sitewide role %(role)s from user %(uid)s" +msgstr "" + +#: ../nova/auth/manager.py:515 +#, python-format +msgid "Created project %(name)s with manager %(manager_user)s" +msgstr "" + +#: ../nova/auth/manager.py:533 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: ../nova/auth/manager.py:545 +#, python-format +msgid "Adding user %(uid)s to project %(pid)s" +msgstr "" + +#: ../nova/auth/manager.py:566 +#, python-format +msgid "Remove user %(uid)s from project %(pid)s" +msgstr "" + +#: ../nova/auth/manager.py:592 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: ../nova/auth/manager.py:650 +#, python-format +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgstr "" + +#: ../nova/auth/manager.py:659 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: ../nova/auth/manager.py:669 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: ../nova/auth/manager.py:671 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: ../nova/auth/manager.py:673 +#, python-format +msgid "Admin status set to %(admin)r for user %(uid)s" +msgstr "" + +#: ../nova/auth/manager.py:722 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: ../nova/service.py:161 +#, python-format +msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgstr "" + +#: ../nova/service.py:174 +msgid "Service killed that has no database entry" +msgstr "" + +#: ../nova/service.py:195 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: ../nova/service.py:207 +msgid "Recovered model server connection!" +msgstr "" + +#: ../nova/service.py:213 +msgid "model server went away" +msgstr "" + +#: ../nova/auth/ldapdriver.py:174 +#, python-format +msgid "LDAP user %s already exists" +msgstr "" + +#: ../nova/auth/ldapdriver.py:205 +#, python-format +msgid "LDAP object for %s doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:348 +#, python-format +msgid "User %s doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:472 +#, python-format +msgid "Group can't be created because group %s already exists" +msgstr "" + +#: ../nova/auth/ldapdriver.py:478 +#, python-format +msgid "Group can't be created because user %s doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:495 +#, python-format +msgid "User %s can't be searched in group because the user doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:507 +#, python-format +msgid "User %s can't be added to the group because the user doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:510 ../nova/auth/ldapdriver.py:521 +#, python-format +msgid "The group at dn %s doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:513 +#, python-format +msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgstr "" + +#: ../nova/auth/ldapdriver.py:524 +#, python-format +msgid "" +"User %s can't be removed from the group because the user doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:528 +#, python-format +msgid "User %s is not a member of the group" +msgstr "" + +#: ../nova/auth/ldapdriver.py:542 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: ../nova/auth/ldapdriver.py:549 +#, python-format +msgid "User %s can't be removed from all because the user doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:564 +#, python-format +msgid "Group at dn %s doesn't exist" +msgstr "" + +#: ../nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: ../nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:97 +#, python-format +msgid "Creating new user: %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:105 +#, python-format +msgid "Deleting user: %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:127 +#, python-format +msgid "Adding role %(role)s to user %(user)s for project %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:131 +#, python-format +msgid "Adding sitewide role %(role)s to user %(user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:137 +#, python-format +msgid "Removing role %(role)s from user %(user)s for project %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:141 +#, python-format +msgid "Removing sitewide role %(role)s from user %(user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:146 ../nova/api/ec2/admin.py:223 +msgid "operation must be add or remove" +msgstr "" + +#: ../nova/api/ec2/admin.py:159 +#, python-format +msgid "Getting x509 for user: %(name)s on project: %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:177 +#, python-format +msgid "Create project %(name)s managed by %(manager_user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:190 +#, python-format +msgid "Modify project: %(name)s managed by %(manager_user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:200 +#, python-format +msgid "Delete project: %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:214 +#, python-format +msgid "Adding user %(user)s to project %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:218 +#, python-format +msgid "Removing user %(user)s from project %(project)s" +msgstr "" diff --git a/run_tests.sh b/run_tests.sh index 2ea221ae3..8f2b51757 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -6,10 +6,12 @@ function usage { echo "" echo " -V, --virtual-env Always use virtualenv. Install automatically if not present" echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment" - echo " -r, --recreate-db Recreate the test database." + echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)." + echo " -n, --no-recreate-db Don't recreate the test database." echo " -x, --stop Stop running tests after the first error or failure." echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added." echo " -p, --pep8 Just run pep8" + echo " -c, --coverage Generate coverage report" echo " -h, --help Print this usage message" echo " --hide-elapsed Don't print the elapsed time for each test along with slow test list" echo "" @@ -25,8 +27,10 @@ function process_option { -V|--virtual-env) let always_venv=1; let never_venv=0;; -N|--no-virtual-env) let always_venv=0; let never_venv=1;; -r|--recreate-db) let recreate_db=1;; + -n|--no-recreate-db) let recreate_db=0;; -f|--force) let force=1;; -p|--pep8) let just_pep8=1;; + -c|--coverage) let coverage=1;; -*) noseopts="$noseopts $1";; *) noseargs="$noseargs $1" esac @@ -41,12 +45,18 @@ noseargs= noseopts= wrapper="" just_pep8=0 -recreate_db=0 +coverage=0 +recreate_db=1 for arg in "$@"; do process_option $arg done +# If enabled, tell nose to collect coverage data +if [ $coverage -eq 1 ]; then + noseopts="$noseopts --with-coverage --cover-package=nova" +fi + function run_tests { # Just run the test suites in current environment ${wrapper} $NOSETESTS 2> run_tests.log @@ -106,13 +116,18 @@ then fi fi +# Delete old coverage data from previous runs +if [ $coverage -eq 1 ]; then + ${wrapper} coverage erase +fi + if [ $just_pep8 -eq 1 ]; then run_pep8 exit fi if [ $recreate_db -eq 1 ]; then - rm tests.sqlite + rm -f tests.sqlite fi run_tests || exit @@ -124,3 +139,8 @@ run_tests || exit if [ -z "$noseargs" ]; then run_pep8 fi + +if [ $coverage -eq 1 ]; then + echo "Generating coverage report in covhtml/" + ${wrapper} coverage html -d covhtml -i +fi diff --git a/smoketests/test_sysadmin.py b/smoketests/test_sysadmin.py index 268d9865b..454f6f1d5 100644 --- a/smoketests/test_sysadmin.py +++ b/smoketests/test_sysadmin.py @@ -103,27 +103,48 @@ class ImageTests(base.UserSmokeTestCase): 'launchPermission') self.assert_(attrs.name, 'launch_permission') - def test_009_can_modify_image_launch_permission(self): + def test_009_can_add_image_launch_permission(self): + image = self.conn.get_image(self.data['image_id']) + self.assertEqual(image.id, self.data['image_id']) + self.assertEqual(image.is_public, False) self.conn.modify_image_attribute(image_id=self.data['image_id'], operation='add', attribute='launchPermission', groups='all') image = self.conn.get_image(self.data['image_id']) self.assertEqual(image.id, self.data['image_id']) + self.assertEqual(image.is_public, True) def test_010_can_see_launch_permission(self): attrs = self.conn.get_image_attribute(self.data['image_id'], 'launchPermission') - self.assert_(attrs.name, 'launch_permission') - self.assert_(attrs.attrs['groups'][0], 'all') + self.assertEqual(attrs.name, 'launch_permission') + self.assertEqual(attrs.attrs['groups'][0], 'all') + + def test_011_can_remove_image_launch_permission(self): + image = self.conn.get_image(self.data['image_id']) + self.assertEqual(image.id, self.data['image_id']) + self.assertEqual(image.is_public, True) + self.conn.modify_image_attribute(image_id=self.data['image_id'], + operation='remove', + attribute='launchPermission', + groups='all') + image = self.conn.get_image(self.data['image_id']) + self.assertEqual(image.id, self.data['image_id']) + self.assertEqual(image.is_public, False) + + def test_012_private_image_shows_in_list(self): + images = self.conn.get_all_images() + image_ids = [image.id for image in images] + self.assertTrue(self.data['image_id'] in image_ids) - def test_011_user_can_deregister_kernel(self): + def test_013_user_can_deregister_kernel(self): self.assertTrue(self.conn.deregister_image(self.data['kernel_id'])) - def test_012_can_deregister_image(self): + def test_014_can_deregister_image(self): self.assertTrue(self.conn.deregister_image(self.data['image_id'])) - def test_013_can_delete_bundle(self): + def test_015_can_delete_bundle(self): self.assertTrue(self.delete_bundle_bucket(TEST_BUCKET)) diff --git a/tools/clean-vlans b/tools/clean-vlans index 820a9dbe5..a26ad86ad 100755 --- a/tools/clean-vlans +++ b/tools/clean-vlans @@ -17,9 +17,9 @@ # License for the specific language governing permissions and limitations # under the License. -export LC_ALL=C +export LC_ALL=C sudo ifconfig -a | grep br | grep -v bridge | cut -f1 -d" " | xargs -n1 -ifoo ifconfig foo down sudo ifconfig -a | grep br | grep -v bridge | cut -f1 -d" " | xargs -n1 -ifoo brctl delbr foo -sudo ifconfig -a | grep vlan | grep -v vlan124 | grep -v vlan5 | cut -f1 -d" " | xargs -n1 -ifoo ifconfig foo down -sudo ifconfig -a | grep vlan | grep -v vlan124 | grep -v vlan5 | cut -f1 -d" " | xargs -n1 -ifoo vconfig rem foo +sudo ifconfig -a | grep vlan | cut -f1 -d" " | xargs -n1 -ifoo ifconfig foo down +sudo ifconfig -a | grep vlan | cut -f1 -d" " | xargs -n1 -ifoo vconfig rem foo diff --git a/tools/esx/guest_tool.py b/tools/esx/guest_tool.py index 13b0f8d33..97b5302ba 100644 --- a/tools/esx/guest_tool.py +++ b/tools/esx/guest_tool.py @@ -21,6 +21,7 @@ On Windows we require pyWin32 installed on Python. """
import array
+import gettext
import logging
import os
import platform
@@ -30,6 +31,8 @@ import subprocess import sys
import time
+gettext.install('nova', unicode=1)
+
PLATFORM_WIN = 'win32'
PLATFORM_LINUX = 'linux2'
ARCH_32_BIT = '32bit'
@@ -275,7 +278,8 @@ def _filter_duplicates(all_entries): return final_list
-def _set_rhel_networking(network_details=[]):
+def _set_rhel_networking(network_details=None):
+ network_details = network_details or []
all_dns_servers = []
for network_detail in network_details:
mac_address, ip_address, subnet_mask, gateway, broadcast,\
@@ -315,6 +319,46 @@ def _set_rhel_networking(network_details=[]): _execute(['/sbin/service', 'network', 'restart'])
+def _set_ubuntu_networking(network_details=None):
+ network_details = network_details or []
+ """ Set IPv4 network settings for Ubuntu """
+ all_dns_servers = []
+ for network_detail in network_details:
+ mac_address, ip_address, subnet_mask, gateway, broadcast,\
+ dns_servers = network_detail
+ all_dns_servers.extend(dns_servers)
+ adapter_name, current_ip_address = \
+ _get_linux_adapter_name_and_ip_address(mac_address)
+
+ if adapter_name and not ip_address == current_ip_address:
+ interface_file_name = \
+ '/etc/network/interfaces'
+ # Remove file
+ os.remove(interface_file_name)
+ # Touch file
+ _execute(['touch', interface_file_name])
+ interface_file = open(interface_file_name, 'w')
+ interface_file.write('\nauto %s' % adapter_name)
+ interface_file.write('\niface %s inet static' % adapter_name)
+ interface_file.write('\nbroadcast %s' % broadcast)
+ interface_file.write('\ngateway %s' % gateway)
+ interface_file.write('\nnetmask %s' % subnet_mask)
+ interface_file.write('\naddress %s' % ip_address)
+ interface_file.close()
+ if all_dns_servers:
+ dns_file_name = "/etc/resolv.conf"
+ os.remove(dns_file_name)
+ _execute(['touch', dns_file_name])
+ dns_file = open(dns_file_name, 'w')
+ dns_file.write("; generated by OpenStack guest tools")
+ unique_entries = _filter_duplicates(all_dns_servers)
+ for dns_server in unique_entries:
+ dns_file.write("\nnameserver %s" % dns_server)
+ dns_file.close()
+ print "\nRestarting networking....\n"
+ _execute(['/etc/init.d/networking', 'restart'])
+
+
def _linux_set_networking():
"""Set IP address for the Linux VM."""
vmware_tools_bin = None
@@ -330,8 +374,13 @@ def _linux_set_networking(): cmd = [vmware_tools_bin, '--cmd', 'machine.id.get']
network_details = _parse_network_details(_execute(cmd,
check_exit_code=False))
- # TODO(sateesh): For other distros like ubuntu, suse, debian, BSD, etc.
- _set_rhel_networking(network_details)
+ # TODO(sateesh): For other distros like suse, debian, BSD, etc.
+ if(platform.dist()[0] == 'Ubuntu'):
+ _set_ubuntu_networking(network_details)
+ elif (platform.dist()[0] == 'redhat'):
+ _set_rhel_networking(network_details)
+ else:
+ logging.warn(_("Distro '%s' not supported") % platform.dist()[0])
else:
logging.warn(_("VMware Tools is not installed"))
diff --git a/tools/nova-debug b/tools/nova-debug index 3ff68ca35..0a78af16a 100755 --- a/tools/nova-debug +++ b/tools/nova-debug @@ -30,13 +30,15 @@ cd $INSTANCES_PATH/$1 if [ $CMD != "umount" ] && [ $CMD != "launch" ]; then # destroy the instance virsh destroy $1 +virsh undefine $1 # mount the filesystem mkdir t -DEVICE=`losetup --show -f disk` +DEVICE=/dev/nbd0 echo $DEVICE -kpartx -a $DEVICE -mount /dev/mapper/${DEVICE:4}p1 t +qemu-nbd -c $DEVICE disk +sleep 3 +mount $DEVICE t fi if [ $CMD != "mount" ] && [ $CMD != "umount" ]; then @@ -66,11 +68,13 @@ sed -i "s/<serial type=\"file\">.*<\/serial>/<serial type=\"pty\"><source path=\ umount t -virsh create debug.xml +virsh define debug.xml +virsh start $1 virsh console $1 virsh destroy $1 +virsh undefine $1 -mount /dev/mapper/${DEVICE:4}p1 t +mount $DEVICE t # clear debug root password chroot t passwd -l root @@ -83,10 +87,11 @@ if [ $CMD != "mount" ] && [ $CMD != "launch" ]; then # unmount the filesystem umount t -kpartx -d $DEVICE -losetup -d $DEVICE +qemu-nbd -d $DEVICE rmdir t # recreate the instance -virsh create libvirt.xml +virsh define libvirt.xml +virsh start $1 fi + diff --git a/tools/pip-requires b/tools/pip-requires index 6e686b7e7..dec93c351 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -9,7 +9,7 @@ boto==1.9b carrot==0.10.5 eventlet lockfile==0.8 -python-novaclient==2.5.3 +python-novaclient==2.5.7 python-daemon==1.5.5 python-gflags==1.3 redis==2.0.0 |