diff options
375 files changed, 35733 insertions, 5676 deletions
diff --git a/.bzrignore b/.bzrignore index b271561a3..d22b62629 100644 --- a/.bzrignore +++ b/.bzrignore @@ -13,3 +13,4 @@ CA/serial* CA/newcerts/*.pem CA/private/cakey.pem nova/vcsversion.py +*.DS_Store @@ -1,35 +1,46 @@ # Format is: -# <preferred e-mail> <other e-mail> -<code@term.ie> <github@anarkystic.com> -<code@term.ie> <termie@preciousroy.local> +# <preferred e-mail> <other e-mail 1> +# <preferred e-mail> <other e-mail 2> +<anotherjesse@gmail.com> <jesse@dancelamb> +<anotherjesse@gmail.com> <jesse@gigantor.local> +<anotherjesse@gmail.com> <jesse@ubuntu> +<ant@openstack.org> <amesserl@rackspace.com> <Armando.Migliaccio@eu.citrix.com> <armando.migliaccio@citrix.com> -<matt.dietz@rackspace.com> <matthewdietz@Matthew-Dietzs-MacBook-Pro.local> -<matt.dietz@rackspace.com> <mdietz@openstack> +<brian.lamar@rackspace.com> <brian.lamar@gmail.com> +<bschott@isi.edu> <bfschott@gmail.com> <cbehrens@codestud.com> <chris.behrens@rackspace.com> +<chiradeep@cloud.com> <chiradeep@chiradeep-lt2> +<code@term.ie> <github@anarkystic.com> +<code@term.ie> <termie@preciousroy.local> +<corywright@gmail.com> <cory.wright@rackspace.com> <devin.carlen@gmail.com> <devcamcar@illian.local> <ewan.mellor@citrix.com> <emellor@silver> +<itoumsn@nttdata.co.jp> <itoumsn@shayol> <jaypipes@gmail.com> <jpipes@serialcoder> -<anotherjesse@gmail.com> <jesse@dancelamb> -<anotherjesse@gmail.com> <jesse@gigantor.local> -<anotherjesse@gmail.com> <jesse@ubuntu> -<jmckenty@gmail.com> <jmckenty@yyj-dhcp171.corp.flock.com> <jmckenty@gmail.com> <jmckenty@joshua-mckentys-macbook-pro.local> +<jmckenty@gmail.com> <jmckenty@yyj-dhcp171.corp.flock.com> <jmckenty@gmail.com> <joshua.mckenty@nasa.gov> +<josh@jk0.org> <josh.kearney@rackspace.com> <justin@fathomdb.com> <justinsb@justinsb-desktop> -<masumotok@nttdata.co.jp> <root@openstack2-api> +<justin@fathomdb.com> <superstack@superstack.org> <masumotok@nttdata.co.jp> Masumoto<masumotok@nttdata.co.jp> +<masumotok@nttdata.co.jp> <root@openstack2-api> +<matt.dietz@rackspace.com> <matthewdietz@Matthew-Dietzs-MacBook-Pro.local> +<matt.dietz@rackspace.com> <mdietz@openstack> <mordred@inaugust.com> <mordred@hudson> -<paul@openstack.org> <pvoccio@castor.local> <paul@openstack.org> <paul.voccio@rackspace.com> +<paul@openstack.org> <pvoccio@castor.local> +<rconradharris@gmail.com> <rick.harris@rackspace.com> +<rlane@wikimedia.org> <laner@controller> +<sleepsonthefloor@gmail.com> <root@tonbuntu> <soren.hansen@rackspace.com> <soren@linux2go.dk> <todd@ansolabs.com> <todd@lapex> <todd@ansolabs.com> <todd@rubidine.com> -<vishvananda@gmail.com> <vishvananda@yahoo.com> +<tushar.vitthal.patil@gmail.com> <tpatil@vertex.co.in> +<ueno.nachi@lab.ntt.co.jp> <nati.ueno@gmail.com> +<ueno.nachi@lab.ntt.co.jp> <nova@u4> +<ueno.nachi@lab.ntt.co.jp> <openstack@lab.ntt.co.jp> <vishvananda@gmail.com> <root@mirror.nasanebula.net> <vishvananda@gmail.com> <root@ubuntu> -<sleepsonthefloor@gmail.com> <root@tonbuntu> -<rlane@wikimedia.org> <laner@controller> -<rconradharris@gmail.com> <rick.harris@rackspace.com> -<corywright@gmail.com> <cory.wright@rackspace.com> -<ant@openstack.org> <amesserl@rackspace.com> -<chiradeep@cloud.com> <chiradeep@chiradeep-lt2> +<naveedm9@gmail.com> <naveed.massjouni@rackspace.com> +<vishvananda@gmail.com> <vishvananda@yahoo.com> @@ -3,13 +3,17 @@ Anne Gentle <anne@openstack.org> Anthony Young <sleepsonthefloor@gmail.com> Antony Messerli <ant@openstack.org> Armando Migliaccio <Armando.Migliaccio@eu.citrix.com> +Bilal Akhtar <bilalakhtar@ubuntu.com> +Brian Lamar <brian.lamar@rackspace.com> +Brian Schott <bschott@isi.edu> +Brian Waldon <brian.waldon@rackspace.com> Chiradeep Vittal <chiradeep@cloud.com> Chmouel Boudjnah <chmouel@chmouel.com> Chris Behrens <cbehrens@codestud.com> Christian Berendt <berendt@b1-systems.de> Cory Wright <corywright@gmail.com> -David Pravec <David.Pravec@danix.org> Dan Prince <dan.prince@rackspace.com> +David Pravec <David.Pravec@danix.org> Dean Troyer <dtroyer@gmail.com> Devin Carlen <devin.carlen@gmail.com> Ed Leafe <ed@leafe.com> @@ -27,20 +31,24 @@ John Dewey <john@dewey.ws> Jonathan Bryce <jbryce@jbryce.com> Jordan Rinke <jordan@openstack.org> Josh Durgin <joshd@hq.newdream.net> -Josh Kearney <josh.kearney@rackspace.com> +Josh Kearney <josh@jk0.org> Joshua McKenty <jmckenty@gmail.com> Justin Santa Barbara <justin@fathomdb.com> Kei Masumoto <masumotok@nttdata.co.jp> Ken Pepple <ken.pepple@gmail.com> +Kevin L. Mitchell <kevin.mitchell@rackspace.com> Koji Iida <iida.koji@lab.ntt.co.jp> Lorin Hochstein <lorin@isi.edu> +Masanori Itoh <itoumsn@nttdata.co.jp> Matt Dietz <matt.dietz@rackspace.com> Michael Gundlach <michael.gundlach@rackspace.com> Monsyne Dragon <mdragon@rackspace.com> Monty Taylor <mordred@inaugust.com> MORITA Kazutaka <morita.kazutaka@gmail.com> Muneyuki Noguchi <noguchimn@nttdata.co.jp> -Nachi Ueno <ueno.nachi@lab.ntt.co.jp> <openstack@lab.ntt.co.jp> <nati.ueno@gmail.com> <nova@u4> +Nachi Ueno <ueno.nachi@lab.ntt.co.jp> +Naveed Massjouni <naveedm9@gmail.com> +Nirmal Ranganathan <nirmal.ranganathan@rackspace.com> Paul Voccio <paul@openstack.org> Ricardo Carrillo Cruz <emaildericky@gmail.com> Rick Clark <rick@openstack.org> @@ -54,7 +62,8 @@ Soren Hansen <soren.hansen@rackspace.com> Thierry Carrez <thierry@openstack.org> Todd Willey <todd@ansolabs.com> Trey Morris <trey.morris@rackspace.com> -Tushar Patil <tushar.vitthal.patil@gmail.com> <tpatil@vertex.co.in> +Tushar Patil <tushar.vitthal.patil@gmail.com> +Vasiliy Shlykov <vash@vasiliyshlykov.org> Vishvananda Ishaya <vishvananda@gmail.com> Youcef Laribi <Youcef.Laribi@eu.citrix.com> Zhixue Wu <Zhixue.Wu@citrix.com> @@ -47,3 +47,22 @@ Human Alphabetical Order Examples from nova.auth import users from nova.endpoint import api from nova.endpoint import cloud + +Docstrings +---------- + """Summary of the function, class or method, less than 80 characters. + + New paragraph after newline that explains in more detail any general + information about the function, class or method. After this, if defining + parameters and return types use the Sphinx format. After that an extra + newline then close the quotations. + + When writing the docstring for a class, an extra line should be placed + after the closing quotations. For more in-depth explanations for these + decisions see http://www.python.org/dev/peps/pep-0257/ + + :param foo: the foo parameter + :param bar: the bar parameter + :returns: description of the return value + + """ diff --git a/MANIFEST.in b/MANIFEST.in index 3908830d7..2ceed34f3 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -6,14 +6,23 @@ graft doc graft smoketests graft tools graft etc +graft bzrplugins +graft contrib +graft po +graft plugins include nova/api/openstack/notes.txt +include nova/auth/*.schema include nova/auth/novarc.template +include nova/auth/opendj.sh include nova/auth/slap.sh include nova/cloudpipe/bootscript.sh include nova/cloudpipe/client.ovpn.template +include nova/cloudpipe/bootscript.template include nova/compute/fakevirtinstance.xml include nova/compute/interfaces.template +include nova/console/xvp.conf.template include nova/db/sqlalchemy/migrate_repo/migrate.cfg +include nova/db/sqlalchemy/migrate_repo/README include nova/virt/interfaces.template include nova/virt/libvirt*.xml.template include nova/tests/CA/ @@ -25,6 +34,8 @@ include nova/tests/bundle/1mb.manifest.xml include nova/tests/bundle/1mb.no_kernel_or_ramdisk.manifest.xml include nova/tests/bundle/1mb.part.0 include nova/tests/bundle/1mb.part.1 +include nova/tests/db/nova.austin.sqlite include plugins/xenapi/README include plugins/xenapi/etc/xapi.d/plugins/objectstore include plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py +global-exclude *.pyc diff --git a/babel.cfg b/babel.cfg deleted file mode 100644 index 15cd6cb76..000000000 --- a/babel.cfg +++ /dev/null @@ -1,2 +0,0 @@ -[python: **.py] - diff --git a/bin/nova-ajax-console-proxy b/bin/nova-ajax-console-proxy index 2bc407658..bbd60bade 100755 --- a/bin/nova-ajax-console-proxy +++ b/bin/nova-ajax-console-proxy @@ -25,7 +25,6 @@ from eventlet.green import urllib2 import exceptions import gettext -import logging import os import sys import time @@ -48,9 +47,11 @@ from nova import utils from nova import wsgi FLAGS = flags.FLAGS - flags.DEFINE_integer('ajax_console_idle_timeout', 300, 'Seconds before idle connection destroyed') +flags.DEFINE_flag(flags.HelpFlag()) +flags.DEFINE_flag(flags.HelpshortFlag()) +flags.DEFINE_flag(flags.HelpXMLFlag()) LOG = logging.getLogger('nova.ajax_console_proxy') LOG.setLevel(logging.DEBUG) @@ -62,10 +63,16 @@ class AjaxConsoleProxy(object): def __call__(self, env, start_response): try: - req_url = '%s://%s%s?%s' % (env['wsgi.url_scheme'], - env['HTTP_HOST'], - env['PATH_INFO'], - env['QUERY_STRING']) + if 'QUERY_STRING' in env: + req_url = '%s://%s%s?%s' % (env['wsgi.url_scheme'], + env['HTTP_HOST'], + env['PATH_INFO'], + env['QUERY_STRING']) + else: + req_url = '%s://%s%s' % (env['wsgi.url_scheme'], + env['HTTP_HOST'], + env['PATH_INFO']) + if 'HTTP_REFERER' in env: auth_url = env['HTTP_REFERER'] else: @@ -130,6 +137,7 @@ class AjaxConsoleProxy(object): if __name__ == '__main__': utils.default_flagfile() FLAGS(sys.argv) + logging.setup() server = wsgi.Server() acp = AjaxConsoleProxy() acp.register_listeners() diff --git a/bin/nova-api b/bin/nova-api index 11176a021..06bb855cb 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -36,54 +36,26 @@ gettext.install('nova', unicode=1) from nova import flags from nova import log as logging +from nova import service +from nova import utils from nova import version from nova import wsgi -logging.basicConfig() + LOG = logging.getLogger('nova.api') -LOG.setLevel(logging.DEBUG) FLAGS = flags.FLAGS -API_ENDPOINTS = ['ec2', 'osapi'] - - -def run_app(paste_config_file): - LOG.debug(_("Using paste.deploy config at: %s"), paste_config_file) - apps = [] - for api in API_ENDPOINTS: - config = wsgi.load_paste_configuration(paste_config_file, api) - if config is None: - LOG.debug(_("No paste configuration for app: %s"), api) - continue - LOG.debug(_("App Config: %(api)s\n%(config)r") % locals()) - wsgi.paste_config_to_flags(config, { - "verbose": FLAGS.verbose, - "%s_host" % api: config.get('host', '0.0.0.0'), - "%s_port" % api: getattr(FLAGS, "%s_port" % api)}) - LOG.info(_("Running %s API"), api) - app = wsgi.load_paste_app(paste_config_file, api) - apps.append((app, getattr(FLAGS, "%s_port" % api), - getattr(FLAGS, "%s_host" % api))) - if len(apps) == 0: - LOG.error(_("No known API applications configured in %s."), - paste_config_file) - return - - # NOTE(todd): redo logging config, verbose could be set in paste config - logging.basicConfig() - server = wsgi.Server() - for app in apps: - server.start(*app) - server.wait() - - if __name__ == '__main__': + utils.default_flagfile() FLAGS(sys.argv) + logging.setup() LOG.audit(_("Starting nova-api node (version %s)"), version.version_string_with_vcs()) - conf = wsgi.paste_config_file('nova-api.conf') - if conf: - run_app(conf) - else: - LOG.error(_("No paste configuration found for: %s"), 'nova-api.conf') + LOG.debug(_("Full set of FLAGS:")) + for flag in FLAGS: + flag_get = FLAGS.get(flag, None) + LOG.debug("%(flag)s : %(flag_get)s" % locals()) + + service = service.serve_wsgi(service.ApiService) + service.wait() diff --git a/bin/nova-combined b/bin/nova-combined deleted file mode 100755 index 913c866bf..000000000 --- a/bin/nova-combined +++ /dev/null @@ -1,80 +0,0 @@ -#!/usr/bin/env python -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Combined starter script for Nova services.""" - -import eventlet -eventlet.monkey_patch() - -import gettext -import os -import sys - -# If ../nova/__init__.py exists, add ../ to Python search path, so that -# it will override what happens to be installed in /usr/(local/)lib/python... -possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), - os.pardir, - os.pardir)) -if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): - sys.path.insert(0, possible_topdir) - -gettext.install('nova', unicode=1) - -from nova import flags -from nova import log as logging -from nova import service -from nova import utils -from nova import wsgi - - -FLAGS = flags.FLAGS - - -if __name__ == '__main__': - utils.default_flagfile() - FLAGS(sys.argv) - logging.basicConfig() - - compute = service.Service.create(binary='nova-compute') - network = service.Service.create(binary='nova-network') - volume = service.Service.create(binary='nova-volume') - scheduler = service.Service.create(binary='nova-scheduler') - #objectstore = service.Service.create(binary='nova-objectstore') - - service.serve(compute, network, volume, scheduler) - - apps = [] - paste_config_file = wsgi.paste_config_file('nova-api.conf') - for api in ['osapi', 'ec2']: - config = wsgi.load_paste_configuration(paste_config_file, api) - if config is None: - continue - wsgi.paste_config_to_flags(config, { - "verbose": FLAGS.verbose, - "%s_host" % api: config.get('host', '0.0.0.0'), - "%s_port" % api: getattr(FLAGS, "%s_port" % api)}) - app = wsgi.load_paste_app(paste_config_file, api) - apps.append((app, getattr(FLAGS, "%s_port" % api), - getattr(FLAGS, "%s_host" % api))) - if len(apps) > 0: - logging.basicConfig() - server = wsgi.Server() - for app in apps: - server.start(*app) - server.wait() diff --git a/bin/nova-compute b/bin/nova-compute index d2d352da2..95fa393b1 100755 --- a/bin/nova-compute +++ b/bin/nova-compute @@ -36,10 +36,14 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): gettext.install('nova', unicode=1) +from nova import flags +from nova import log as logging from nova import service from nova import utils if __name__ == '__main__': utils.default_flagfile() + flags.FLAGS(sys.argv) + logging.setup() service.serve() service.wait() diff --git a/bin/nova-console b/bin/nova-console index 802cc80b6..40608b995 100755 --- a/bin/nova-console +++ b/bin/nova-console @@ -35,10 +35,14 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): gettext.install('nova', unicode=1) +from nova import flags +from nova import log as logging from nova import service from nova import utils if __name__ == '__main__': utils.default_flagfile() + flags.FLAGS(sys.argv) + logging.setup() service.serve() service.wait() diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index d38ba2543..3dd9de367 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -102,19 +102,10 @@ def main(): flagfile = os.environ.get('FLAGFILE', FLAGS.dhcpbridge_flagfile) utils.default_flagfile(flagfile) argv = FLAGS(sys.argv) - logging.basicConfig() + logging.setup() interface = os.environ.get('DNSMASQ_INTERFACE', 'br0') if int(os.environ.get('TESTING', '0')): - FLAGS.fake_rabbit = True - FLAGS.network_size = 16 - FLAGS.connection_type = 'fake' - FLAGS.fake_network = True - FLAGS.auth_driver = 'nova.auth.dbdriver.DbDriver' - FLAGS.num_networks = 5 - path = os.path.abspath(os.path.join(os.path.dirname(__file__), - '..', - 'nova.sqlite')) - FLAGS.sql_connection = 'sqlite:///%s' % path + from nova.tests import fake_flags action = argv[1] if action in ['add', 'del', 'old']: mac = argv[2] diff --git a/bin/nova-direct-api b/bin/nova-direct-api index 173b39bdb..bf29d9a5e 100755 --- a/bin/nova-direct-api +++ b/bin/nova-direct-api @@ -35,6 +35,7 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): gettext.install('nova', unicode=1) from nova import flags +from nova import log as logging from nova import utils from nova import wsgi from nova.api import direct @@ -44,10 +45,15 @@ from nova.compute import api as compute_api FLAGS = flags.FLAGS flags.DEFINE_integer('direct_port', 8001, 'Direct API port') flags.DEFINE_string('direct_host', '0.0.0.0', 'Direct API host') +flags.DEFINE_flag(flags.HelpFlag()) +flags.DEFINE_flag(flags.HelpshortFlag()) +flags.DEFINE_flag(flags.HelpXMLFlag()) + if __name__ == '__main__': utils.default_flagfile() FLAGS(sys.argv) + logging.setup() direct.register_service('compute', compute_api.API()) direct.register_service('reflect', direct.Reflection()) diff --git a/bin/nova-import-canonical-imagestore b/bin/nova-import-canonical-imagestore index 036b41e48..404ae37f4 100755 --- a/bin/nova-import-canonical-imagestore +++ b/bin/nova-import-canonical-imagestore @@ -41,6 +41,7 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): gettext.install('nova', unicode=1) from nova import flags +from nova import log as logging from nova import utils from nova.objectstore import image @@ -92,6 +93,7 @@ def main(): """Main entry point.""" utils.default_flagfile() argv = FLAGS(sys.argv) + logging.setup() images = get_images() if len(argv) == 2: diff --git a/bin/nova-instancemonitor b/bin/nova-instancemonitor index 7dca02014..24cc9fd23 100755 --- a/bin/nova-instancemonitor +++ b/bin/nova-instancemonitor @@ -41,9 +41,6 @@ from nova import utils from nova import twistd from nova.compute import monitor -# TODO(todd): shouldn't this be done with flags? And what about verbose? -logging.getLogger('boto').setLevel(logging.WARN) - LOG = logging.getLogger('nova.instancemonitor') diff --git a/bin/nova-manage b/bin/nova-manage index 7835ca551..e001552d5 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -55,6 +55,8 @@ import datetime import gettext +import glob +import json import os import re import sys @@ -81,13 +83,12 @@ from nova import log as logging from nova import quota from nova import rpc from nova import utils -from nova.api.ec2.cloud import ec2_id_to_id +from nova.api.ec2 import ec2utils from nova.auth import manager from nova.cloudpipe import pipelib +from nova.compute import instance_types from nova.db import migration - -logging.basicConfig() FLAGS = flags.FLAGS flags.DECLARE('fixed_range', 'nova.network.manager') flags.DECLARE('num_networks', 'nova.network.manager') @@ -95,6 +96,10 @@ flags.DECLARE('network_size', 'nova.network.manager') flags.DECLARE('vlan_start', 'nova.network.manager') flags.DECLARE('vpn_start', 'nova.network.manager') flags.DECLARE('fixed_range_v6', 'nova.network.manager') +flags.DECLARE('images_path', 'nova.image.local') +flags.DEFINE_flag(flags.HelpFlag()) +flags.DEFINE_flag(flags.HelpshortFlag()) +flags.DEFINE_flag(flags.HelpXMLFlag()) def param2id(object_id): @@ -102,7 +107,7 @@ def param2id(object_id): args: [object_id], e.g. 'vol-0000000a' or 'volume-0000000a' or '10' """ if '-' in object_id: - return ec2_id_to_id(object_id) + return ec2utils.ec2_id_to_id(object_id) else: return int(object_id) @@ -433,6 +438,37 @@ class ProjectCommands(object): "nova-api server on this host.") +class FixedIpCommands(object): + """Class for managing fixed ip.""" + + def list(self, host=None): + """Lists all fixed ips (optionally by host) arguments: [host]""" + ctxt = context.get_admin_context() + if host == None: + fixed_ips = db.fixed_ip_get_all(ctxt) + else: + fixed_ips = db.fixed_ip_get_all_by_host(ctxt, host) + + print "%-18s\t%-15s\t%-17s\t%-15s\t%s" % (_('network'), + _('IP address'), + _('MAC address'), + _('hostname'), + _('host')) + for fixed_ip in fixed_ips: + hostname = None + host = None + mac_address = None + if fixed_ip['instance']: + instance = fixed_ip['instance'] + hostname = instance['hostname'] + host = instance['host'] + mac_address = instance['mac_address'] + print "%-18s\t%-15s\t%-17s\t%-15s\t%s" % ( + fixed_ip['network']['cidr'], + fixed_ip['address'], + mac_address, hostname, host) + + class FloatingIpCommands(object): """Class for managing floating ip.""" @@ -472,8 +508,8 @@ class NetworkCommands(object): """Class for managing networks.""" def create(self, fixed_range=None, num_networks=None, - network_size=None, vlan_start=None, vpn_start=None, - fixed_range_v6=None): + network_size=None, vlan_start=None, + vpn_start=None, fixed_range_v6=None, label='public'): """Creates fixed ips for host by range arguments: [fixed_range=FLAG], [num_networks=FLAG], [network_size=FLAG], [vlan_start=FLAG], @@ -495,9 +531,31 @@ class NetworkCommands(object): cidr=fixed_range, num_networks=int(num_networks), network_size=int(network_size), - cidr_v6=fixed_range_v6, vlan_start=int(vlan_start), - vpn_start=int(vpn_start)) + vpn_start=int(vpn_start), + cidr_v6=fixed_range_v6, + label=label) + + def list(self): + """List all created networks""" + print "%-18s\t%-15s\t%-15s\t%-15s" % (_('network'), + _('netmask'), + _('start address'), + 'DNS') + for network in db.network_get_all(context.get_admin_context()): + print "%-18s\t%-15s\t%-15s\t%-15s" % (network.cidr, + network.netmask, + network.dhcp_start, + network.dns) + + def delete(self, fixed_range): + """Deletes a network""" + network = db.network_get_by_cidr(context.get_admin_context(), \ + fixed_range) + if network.project_id is not None: + raise ValueError(_('Network must be disassociated from project %s' + ' before delete' % network.project_id)) + db.network_delete_safe(context.get_admin_context(), network.id) class ServiceCommands(object): @@ -508,7 +566,7 @@ class ServiceCommands(object): args: [host] [service]""" ctxt = context.get_admin_context() now = datetime.datetime.utcnow() - services = db.service_get_all(ctxt) + services = db.service_get_all(ctxt) + db.service_get_all(ctxt, True) if host: services = [s for s in services if s['host'] == host] if service: @@ -579,6 +637,13 @@ class VolumeCommands(object): ctxt = context.get_admin_context() volume = db.volume_get(ctxt, param2id(volume_id)) host = volume['host'] + + if not host: + print "Volume not yet assigned to host." + print "Deleting volume from database and skipping rpc." + db.volume_destroy(ctxt, param2id(volume_id)) + return + if volume['status'] == 'in-use': print "Volume is in-use." print "Detach volume from instance and then try again." @@ -609,18 +674,244 @@ class VolumeCommands(object): "mountpoint": volume['mountpoint']}}) +class InstanceTypeCommands(object): + """Class for managing instance types / flavors.""" + + def _print_instance_types(self, n, val): + deleted = ('', ', inactive')[val["deleted"] == 1] + print ("%s: Memory: %sMB, VCPUS: %s, Storage: %sGB, FlavorID: %s, " + "Swap: %sGB, RXTX Quota: %sGB, RXTX Cap: %sMB%s") % ( + n, val["memory_mb"], val["vcpus"], val["local_gb"], + val["flavorid"], val["swap"], val["rxtx_quota"], + val["rxtx_cap"], deleted) + + def create(self, name, memory, vcpus, local_gb, flavorid, + swap=0, rxtx_quota=0, rxtx_cap=0): + """Creates instance types / flavors + arguments: name memory vcpus local_gb flavorid [swap] [rxtx_quota] + [rxtx_cap] + """ + try: + instance_types.create(name, memory, vcpus, local_gb, + flavorid, swap, rxtx_quota, rxtx_cap) + except exception.InvalidInputException: + print "Must supply valid parameters to create instance type" + print e + sys.exit(1) + except exception.DBError, e: + print "DB Error: %s" % e + sys.exit(2) + except: + print "Unknown error" + sys.exit(3) + else: + print "%s created" % name + + def delete(self, name, purge=None): + """Marks instance types / flavors as deleted + arguments: name""" + try: + if purge == "--purge": + instance_types.purge(name) + verb = "purged" + else: + instance_types.destroy(name) + verb = "deleted" + except exception.ApiError: + print "Valid instance type name is required" + sys.exit(1) + except exception.DBError, e: + print "DB Error: %s" % e + sys.exit(2) + except: + sys.exit(3) + else: + print "%s %s" % (name, verb) + + def list(self, name=None): + """Lists all active or specific instance types / flavors + arguments: [name]""" + try: + if name == None: + inst_types = instance_types.get_all_types() + elif name == "--all": + inst_types = instance_types.get_all_types(1) + else: + inst_types = instance_types.get_instance_type(name) + except exception.DBError, e: + _db_error(e) + if isinstance(inst_types.values()[0], dict): + for k, v in inst_types.iteritems(): + self._print_instance_types(k, v) + else: + self._print_instance_types(name, inst_types) + + +class ImageCommands(object): + """Methods for dealing with a cloud in an odd state""" + + def __init__(self, *args, **kwargs): + self.image_service = utils.import_object(FLAGS.image_service) + + def _register(self, image_type, disk_format, container_format, + path, owner, name=None, is_public='T', + architecture='x86_64', kernel_id=None, ramdisk_id=None): + meta = {'is_public': True, + 'name': name, + 'disk_format': disk_format, + 'container_format': container_format, + 'properties': {'image_state': 'available', + 'owner': owner, + 'type': image_type, + 'architecture': architecture, + 'image_location': 'local', + 'is_public': (is_public == 'T')}} + print image_type, meta + if kernel_id: + meta['properties']['kernel_id'] = int(kernel_id) + if ramdisk_id: + meta['properties']['ramdisk_id'] = int(ramdisk_id) + elevated = context.get_admin_context() + try: + with open(path) as ifile: + image = self.image_service.create(elevated, meta, ifile) + new = image['id'] + print _("Image registered to %(new)s (%(new)08x).") % locals() + return new + except Exception as exc: + print _("Failed to register %(path)s: %(exc)s") % locals() + + def all_register(self, image, kernel, ramdisk, owner, name=None, + is_public='T', architecture='x86_64'): + """Uploads an image, kernel, and ramdisk into the image_service + arguments: image kernel ramdisk owner [name] [is_public='T'] + [architecture='x86_64']""" + kernel_id = self.kernel_register(kernel, owner, None, + is_public, architecture) + ramdisk_id = self.ramdisk_register(ramdisk, owner, None, + is_public, architecture) + self.image_register(image, owner, name, is_public, + architecture, kernel_id, ramdisk_id) + + def image_register(self, path, owner, name=None, is_public='T', + architecture='x86_64', kernel_id=None, ramdisk_id=None, + disk_format='ami', container_format='ami'): + """Uploads an image into the image_service + arguments: path owner [name] [is_public='T'] [architecture='x86_64'] + [kernel_id=None] [ramdisk_id=None] + [disk_format='ami'] [container_format='ami']""" + return self._register('machine', disk_format, container_format, path, + owner, name, is_public, architecture, + kernel_id, ramdisk_id) + + def kernel_register(self, path, owner, name=None, is_public='T', + architecture='x86_64'): + """Uploads a kernel into the image_service + arguments: path owner [name] [is_public='T'] [architecture='x86_64'] + """ + return self._register('kernel', 'aki', 'aki', path, owner, name, + is_public, architecture) + + def ramdisk_register(self, path, owner, name=None, is_public='T', + architecture='x86_64'): + """Uploads a ramdisk into the image_service + arguments: path owner [name] [is_public='T'] [architecture='x86_64'] + """ + return self._register('ramdisk', 'ari', 'ari', path, owner, name, + is_public, architecture) + + def _lookup(self, old_image_id): + try: + internal_id = ec2utils.ec2_id_to_id(old_image_id) + image = self.image_service.show(context, internal_id) + except exception.NotFound: + image = self.image_service.show_by_name(context, old_image_id) + return image['id'] + + def _old_to_new(self, old): + mapping = {'machine': 'ami', + 'kernel': 'aki', + 'ramdisk': 'ari'} + container_format = mapping[old['type']] + disk_format = container_format + new = {'disk_format': disk_format, + 'container_format': container_format, + 'is_public': True, + 'name': old['imageId'], + 'properties': {'image_state': old['imageState'], + 'owner': old['imageOwnerId'], + 'architecture': old['architecture'], + 'type': old['type'], + 'image_location': old['imageLocation'], + 'is_public': old['isPublic']}} + if old.get('kernelId'): + new['properties']['kernel_id'] = self._lookup(old['kernelId']) + if old.get('ramdiskId'): + new['properties']['ramdisk_id'] = self._lookup(old['ramdiskId']) + return new + + def _convert_images(self, images): + elevated = context.get_admin_context() + for image_path, image_metadata in images.iteritems(): + meta = self._old_to_new(image_metadata) + old = meta['name'] + try: + with open(image_path) as ifile: + image = self.image_service.create(elevated, meta, ifile) + new = image['id'] + print _("Image %(old)s converted to " \ + "%(new)s (%(new)08x).") % locals() + except Exception as exc: + print _("Failed to convert %(old)s: %(exc)s") % locals() + + def convert(self, directory): + """Uploads old objectstore images in directory to new service + arguments: directory""" + machine_images = {} + other_images = {} + directory = os.path.abspath(directory) + # NOTE(vish): If we're importing from the images path dir, attempt + # to move the files out of the way before importing + # so we aren't writing to the same directory. This + # may fail if the dir was a mointpoint. + if (FLAGS.image_service == 'nova.image.local.LocalImageService' + and directory == os.path.abspath(FLAGS.images_path)): + new_dir = "%s_bak" % directory + os.move(directory, new_dir) + os.mkdir(directory) + directory = new_dir + for fn in glob.glob("%s/*/info.json" % directory): + try: + image_path = os.path.join(fn.rpartition('/')[0], 'image') + with open(fn) as metadata_file: + image_metadata = json.load(metadata_file) + if image_metadata['type'] == 'machine': + machine_images[image_path] = image_metadata + else: + other_images[image_path] = image_metadata + except Exception as exc: + print _("Failed to load %(fn)s.") % locals() + # NOTE(vish): do kernels and ramdisks first so images + self._convert_images(other_images) + self._convert_images(machine_images) + + CATEGORIES = [ ('user', UserCommands), ('project', ProjectCommands), ('role', RoleCommands), ('shell', ShellCommands), ('vpn', VpnCommands), + ('fixed', FixedIpCommands), ('floating', FloatingIpCommands), ('network', NetworkCommands), ('service', ServiceCommands), ('log', LogCommands), ('db', DbCommands), - ('volume', VolumeCommands)] + ('volume', VolumeCommands), + ('instance_type', InstanceTypeCommands), + ('image', ImageCommands), + ('flavor', InstanceTypeCommands)] def lazy_match(name, key_value_tuples): @@ -658,6 +949,7 @@ def main(): """Parse options and call the appropriate class/method.""" utils.default_flagfile() argv = FLAGS(sys.argv) + logging.setup() script_name = argv.pop(0) if len(argv) < 1: diff --git a/bin/nova-network b/bin/nova-network index 0143846a7..101761ef7 100755 --- a/bin/nova-network +++ b/bin/nova-network @@ -36,10 +36,14 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): gettext.install('nova', unicode=1) +from nova import flags +from nova import log as logging from nova import service from nova import utils if __name__ == '__main__': utils.default_flagfile() + flags.FLAGS(sys.argv) + logging.setup() service.serve() service.wait() diff --git a/bin/nova-scheduler b/bin/nova-scheduler index f4c0eaed6..0c205a80f 100755 --- a/bin/nova-scheduler +++ b/bin/nova-scheduler @@ -36,10 +36,14 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): gettext.install('nova', unicode=1) +from nova import flags +from nova import log as logging from nova import service from nova import utils if __name__ == '__main__': utils.default_flagfile() + flags.FLAGS(sys.argv) + logging.setup() service.serve() service.wait() diff --git a/bin/nova-volume b/bin/nova-volume index ad3ddc405..8dcdbc500 100755 --- a/bin/nova-volume +++ b/bin/nova-volume @@ -36,10 +36,14 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): gettext.install('nova', unicode=1) +from nova import flags +from nova import log as logging from nova import service from nova import utils if __name__ == '__main__': utils.default_flagfile() + flags.FLAGS(sys.argv) + logging.setup() service.serve() service.wait() diff --git a/contrib/nova.sh b/contrib/nova.sh index 9259035ca..d6c9b1081 100755 --- a/contrib/nova.sh +++ b/contrib/nova.sh @@ -66,7 +66,7 @@ if [ "$CMD" == "install" ]; then sudo apt-get install -y user-mode-linux kvm libvirt-bin sudo apt-get install -y screen euca2ools vlan curl rabbitmq-server sudo apt-get install -y lvm2 iscsitarget open-iscsi - sudo apt-get install -y socat + sudo apt-get install -y socat unzip echo "ISCSITARGET_ENABLE=true" | sudo tee /etc/default/iscsitarget sudo /etc/init.d/iscsitarget restart sudo modprobe kvm @@ -111,8 +111,7 @@ if [ "$CMD" == "run" ]; then --nodaemon --dhcpbridge_flagfile=$NOVA_DIR/bin/nova.conf --network_manager=nova.network.manager.$NET_MAN ---cc_host=$HOST_IP ---routing_source_ip=$HOST_IP +--my_ip=$HOST_IP --sql_connection=$SQL_CONN --auth_driver=nova.auth.$AUTH --libvirt_type=$LIBVIRT_TYPE @@ -151,7 +150,6 @@ NOVA_CONF_EOF mkdir -p $NOVA_DIR/instances rm -rf $NOVA_DIR/networks mkdir -p $NOVA_DIR/networks - $NOVA_DIR/tools/clean-vlans if [ ! -d "$NOVA_DIR/images" ]; then ln -s $DIR/images $NOVA_DIR/images fi @@ -168,11 +166,12 @@ NOVA_CONF_EOF $NOVA_DIR/bin/nova-manage user admin admin admin admin # create a project called 'admin' with project manager of 'admin' $NOVA_DIR/bin/nova-manage project create admin admin - # export environment variables for project 'admin' and user 'admin' - $NOVA_DIR/bin/nova-manage project environment admin admin $NOVA_DIR/novarc # create a small network $NOVA_DIR/bin/nova-manage network create 10.0.0.0/8 1 32 + # create some floating ips + $NOVA_DIR/bin/nova-manage floating create `hostname` 10.6.0.0/27 + # nova api crashes if we start it with a regular screen command, # so send the start command by forcing text into the window. screen_it api "$NOVA_DIR/bin/nova-api" @@ -182,6 +181,11 @@ NOVA_CONF_EOF screen_it scheduler "$NOVA_DIR/bin/nova-scheduler" screen_it volume "$NOVA_DIR/bin/nova-volume" screen_it ajax_console_proxy "$NOVA_DIR/bin/nova-ajax-console-proxy" + sleep 2 + # export environment variables for project 'admin' and user 'admin' + $NOVA_DIR/bin/nova-manage project zipfile admin admin $NOVA_DIR/nova.zip + unzip -o $NOVA_DIR/nova.zip -d $NOVA_DIR/ + screen_it test ". $NOVA_DIR/novarc" screen -S nova -x fi diff --git a/contrib/puppet/files/etc/default/nova-compute b/contrib/puppet/files/etc/default/nova-compute deleted file mode 100644 index 8bd7d091c..000000000 --- a/contrib/puppet/files/etc/default/nova-compute +++ /dev/null @@ -1 +0,0 @@ -ENABLED=true diff --git a/contrib/puppet/files/etc/default/nova-volume b/contrib/puppet/files/etc/default/nova-volume deleted file mode 100644 index 8bd7d091c..000000000 --- a/contrib/puppet/files/etc/default/nova-volume +++ /dev/null @@ -1 +0,0 @@ -ENABLED=true diff --git a/contrib/puppet/files/etc/issue b/contrib/puppet/files/etc/issue deleted file mode 100644 index 8c567221b..000000000 --- a/contrib/puppet/files/etc/issue +++ /dev/null @@ -1,5 +0,0 @@ ------------------------------------------------ - - Welcome to your OpenStack installation! - ------------------------------------------------ diff --git a/contrib/puppet/files/etc/libvirt/qemu.conf b/contrib/puppet/files/etc/libvirt/qemu.conf deleted file mode 100644 index 7839f12e5..000000000 --- a/contrib/puppet/files/etc/libvirt/qemu.conf +++ /dev/null @@ -1,170 +0,0 @@ -# Master configuration file for the QEMU driver. -# All settings described here are optional - if omitted, sensible -# defaults are used. - -# VNC is configured to listen on 127.0.0.1 by default. -# To make it listen on all public interfaces, uncomment -# this next option. -# -# NB, strong recommendation to enable TLS + x509 certificate -# verification when allowing public access -# -# vnc_listen = "0.0.0.0" - - -# Enable use of TLS encryption on the VNC server. This requires -# a VNC client which supports the VeNCrypt protocol extension. -# Examples include vinagre, virt-viewer, virt-manager and vencrypt -# itself. UltraVNC, RealVNC, TightVNC do not support this -# -# It is necessary to setup CA and issue a server certificate -# before enabling this. -# -# vnc_tls = 1 - - -# Use of TLS requires that x509 certificates be issued. The -# default it to keep them in /etc/pki/libvirt-vnc. This directory -# must contain -# -# ca-cert.pem - the CA master certificate -# server-cert.pem - the server certificate signed with ca-cert.pem -# server-key.pem - the server private key -# -# This option allows the certificate directory to be changed -# -# vnc_tls_x509_cert_dir = "/etc/pki/libvirt-vnc" - - -# The default TLS configuration only uses certificates for the server -# allowing the client to verify the server's identity and establish -# and encrypted channel. -# -# It is possible to use x509 certificates for authentication too, by -# issuing a x509 certificate to every client who needs to connect. -# -# Enabling this option will reject any client who does not have a -# certificate signed by the CA in /etc/pki/libvirt-vnc/ca-cert.pem -# -# vnc_tls_x509_verify = 1 - - -# The default VNC password. Only 8 letters are significant for -# VNC passwords. This parameter is only used if the per-domain -# XML config does not already provide a password. To allow -# access without passwords, leave this commented out. An empty -# string will still enable passwords, but be rejected by QEMU -# effectively preventing any use of VNC. Obviously change this -# example here before you set this -# -# vnc_password = "XYZ12345" - - -# Enable use of SASL encryption on the VNC server. This requires -# a VNC client which supports the SASL protocol extension. -# Examples include vinagre, virt-viewer and virt-manager -# itself. UltraVNC, RealVNC, TightVNC do not support this -# -# It is necessary to configure /etc/sasl2/qemu.conf to choose -# the desired SASL plugin (eg, GSSPI for Kerberos) -# -# vnc_sasl = 1 - - -# The default SASL configuration file is located in /etc/sasl2/ -# When running libvirtd unprivileged, it may be desirable to -# override the configs in this location. Set this parameter to -# point to the directory, and create a qemu.conf in that location -# -# vnc_sasl_dir = "/some/directory/sasl2" - - - - -# The default security driver is SELinux. If SELinux is disabled -# on the host, then the security driver will automatically disable -# itself. If you wish to disable QEMU SELinux security driver while -# leaving SELinux enabled for the host in general, then set this -# to 'none' instead -# -# security_driver = "selinux" - - -# The user ID for QEMU processes run by the system instance -user = "root" - -# The group ID for QEMU processes run by the system instance -group = "root" - -# Whether libvirt should dynamically change file ownership -# to match the configured user/group above. Defaults to 1. -# Set to 0 to disable file ownership changes. -#dynamic_ownership = 1 - - -# What cgroup controllers to make use of with QEMU guests -# -# - 'cpu' - use for schedular tunables -# - 'devices' - use for device whitelisting -# -# NB, even if configured here, they won't be used unless -# the adminsitrator has mounted cgroups. eg -# -# mkdir /dev/cgroup -# mount -t cgroup -o devices,cpu none /dev/cgroup -# -# They can be mounted anywhere, and different controlers -# can be mounted in different locations. libvirt will detect -# where they are located. -# -# cgroup_controllers = [ "cpu", "devices" ] - -# This is the basic set of devices allowed / required by -# all virtual machines. -# -# As well as this, any configured block backed disks, -# all sound device, and all PTY devices are allowed. -# -# This will only need setting if newer QEMU suddenly -# wants some device we don't already know a bout. -# -#cgroup_device_acl = [ -# "/dev/null", "/dev/full", "/dev/zero", -# "/dev/random", "/dev/urandom", -# "/dev/ptmx", "/dev/kvm", "/dev/kqemu", -# "/dev/rtc", "/dev/hpet", "/dev/net/tun", -#] - -# The default format for Qemu/KVM guest save images is raw; that is, the -# memory from the domain is dumped out directly to a file. If you have -# guests with a large amount of memory, however, this can take up quite -# a bit of space. If you would like to compress the images while they -# are being saved to disk, you can also set "lzop", "gzip", "bzip2", or "xz" -# for save_image_format. Note that this means you slow down the process of -# saving a domain in order to save disk space; the list above is in descending -# order by performance and ascending order by compression ratio. -# -# save_image_format = "raw" - -# If provided by the host and a hugetlbfs mount point is configured, -# a guest may request huge page backing. When this mount point is -# unspecified here, determination of a host mount point in /proc/mounts -# will be attempted. Specifying an explicit mount overrides detection -# of the same in /proc/mounts. Setting the mount point to "" will -# disable guest hugepage backing. -# -# NB, within this mount point, guests will create memory backing files -# in a location of $MOUNTPOINT/libvirt/qemu - -# hugetlbfs_mount = "/dev/hugepages" - -# mac_filter enables MAC addressed based filtering on bridge ports. -# This currently requires ebtables to be installed. -# -# mac_filter = 1 - -# By default, PCI devices below non-ACS switch are not allowed to be assigned -# to guests. By setting relaxed_acs_check to 1 such devices will be allowed to -# be assigned to guests. -# -# relaxed_acs_check = 1 diff --git a/contrib/puppet/files/etc/lvm/lvm.conf b/contrib/puppet/files/etc/lvm/lvm.conf deleted file mode 100644 index 4e814ad49..000000000 --- a/contrib/puppet/files/etc/lvm/lvm.conf +++ /dev/null @@ -1,463 +0,0 @@ -# This is an example configuration file for the LVM2 system. -# It contains the default settings that would be used if there was no -# /etc/lvm/lvm.conf file. -# -# Refer to 'man lvm.conf' for further information including the file layout. -# -# To put this file in a different directory and override /etc/lvm set -# the environment variable LVM_SYSTEM_DIR before running the tools. - - -# This section allows you to configure which block devices should -# be used by the LVM system. -devices { - - # Where do you want your volume groups to appear ? - dir = "/dev" - - # An array of directories that contain the device nodes you wish - # to use with LVM2. - scan = [ "/dev" ] - - # If several entries in the scanned directories correspond to the - # same block device and the tools need to display a name for device, - # all the pathnames are matched against each item in the following - # list of regular expressions in turn and the first match is used. - preferred_names = [ ] - - # Try to avoid using undescriptive /dev/dm-N names, if present. - # preferred_names = [ "^/dev/mpath/", "^/dev/mapper/mpath", "^/dev/[hs]d" ] - - # A filter that tells LVM2 to only use a restricted set of devices. - # The filter consists of an array of regular expressions. These - # expressions can be delimited by a character of your choice, and - # prefixed with either an 'a' (for accept) or 'r' (for reject). - # The first expression found to match a device name determines if - # the device will be accepted or rejected (ignored). Devices that - # don't match any patterns are accepted. - - # Be careful if there there are symbolic links or multiple filesystem - # entries for the same device as each name is checked separately against - # the list of patterns. The effect is that if any name matches any 'a' - # pattern, the device is accepted; otherwise if any name matches any 'r' - # pattern it is rejected; otherwise it is accepted. - - # Don't have more than one filter line active at once: only one gets used. - - # Run vgscan after you change this parameter to ensure that - # the cache file gets regenerated (see below). - # If it doesn't do what you expect, check the output of 'vgscan -vvvv'. - - - # By default we accept every block device: - filter = [ "r|/dev/etherd/.*|", "r|/dev/block/.*|", "a/.*/" ] - - # Exclude the cdrom drive - # filter = [ "r|/dev/cdrom|" ] - - # When testing I like to work with just loopback devices: - # filter = [ "a/loop/", "r/.*/" ] - - # Or maybe all loops and ide drives except hdc: - # filter =[ "a|loop|", "r|/dev/hdc|", "a|/dev/ide|", "r|.*|" ] - - # Use anchors if you want to be really specific - # filter = [ "a|^/dev/hda8$|", "r/.*/" ] - - # The results of the filtering are cached on disk to avoid - # rescanning dud devices (which can take a very long time). - # By default this cache is stored in the /etc/lvm/cache directory - # in a file called '.cache'. - # It is safe to delete the contents: the tools regenerate it. - # (The old setting 'cache' is still respected if neither of - # these new ones is present.) - cache_dir = "/etc/lvm/cache" - cache_file_prefix = "" - - # You can turn off writing this cache file by setting this to 0. - write_cache_state = 1 - - # Advanced settings. - - # List of pairs of additional acceptable block device types found - # in /proc/devices with maximum (non-zero) number of partitions. - # types = [ "fd", 16 ] - - # If sysfs is mounted (2.6 kernels) restrict device scanning to - # the block devices it believes are valid. - # 1 enables; 0 disables. - sysfs_scan = 1 - - # By default, LVM2 will ignore devices used as components of - # software RAID (md) devices by looking for md superblocks. - # 1 enables; 0 disables. - md_component_detection = 1 - - # By default, if a PV is placed directly upon an md device, LVM2 - # will align its data blocks with the md device's stripe-width. - # 1 enables; 0 disables. - md_chunk_alignment = 1 - - # By default, the start of a PV's data area will be a multiple of - # the 'minimum_io_size' or 'optimal_io_size' exposed in sysfs. - # - minimum_io_size - the smallest request the device can perform - # w/o incurring a read-modify-write penalty (e.g. MD's chunk size) - # - optimal_io_size - the device's preferred unit of receiving I/O - # (e.g. MD's stripe width) - # minimum_io_size is used if optimal_io_size is undefined (0). - # If md_chunk_alignment is enabled, that detects the optimal_io_size. - # This setting takes precedence over md_chunk_alignment. - # 1 enables; 0 disables. - data_alignment_detection = 1 - - # Alignment (in KB) of start of data area when creating a new PV. - # If a PV is placed directly upon an md device and md_chunk_alignment or - # data_alignment_detection is enabled this parameter is ignored. - # Set to 0 for the default alignment of 64KB or page size, if larger. - data_alignment = 0 - - # By default, the start of the PV's aligned data area will be shifted by - # the 'alignment_offset' exposed in sysfs. This offset is often 0 but - # may be non-zero; e.g.: certain 4KB sector drives that compensate for - # windows partitioning will have an alignment_offset of 3584 bytes - # (sector 7 is the lowest aligned logical block, the 4KB sectors start - # at LBA -1, and consequently sector 63 is aligned on a 4KB boundary). - # 1 enables; 0 disables. - data_alignment_offset_detection = 1 - - # If, while scanning the system for PVs, LVM2 encounters a device-mapper - # device that has its I/O suspended, it waits for it to become accessible. - # Set this to 1 to skip such devices. This should only be needed - # in recovery situations. - ignore_suspended_devices = 0 -} - -# This section that allows you to configure the nature of the -# information that LVM2 reports. -log { - - # Controls the messages sent to stdout or stderr. - # There are three levels of verbosity, 3 being the most verbose. - verbose = 0 - - # Should we send log messages through syslog? - # 1 is yes; 0 is no. - syslog = 1 - - # Should we log error and debug messages to a file? - # By default there is no log file. - #file = "/var/log/lvm2.log" - - # Should we overwrite the log file each time the program is run? - # By default we append. - overwrite = 0 - - # What level of log messages should we send to the log file and/or syslog? - # There are 6 syslog-like log levels currently in use - 2 to 7 inclusive. - # 7 is the most verbose (LOG_DEBUG). - level = 0 - - # Format of output messages - # Whether or not (1 or 0) to indent messages according to their severity - indent = 1 - - # Whether or not (1 or 0) to display the command name on each line output - command_names = 0 - - # A prefix to use before the message text (but after the command name, - # if selected). Default is two spaces, so you can see/grep the severity - # of each message. - prefix = " " - - # To make the messages look similar to the original LVM tools use: - # indent = 0 - # command_names = 1 - # prefix = " -- " - - # Set this if you want log messages during activation. - # Don't use this in low memory situations (can deadlock). - # activation = 0 -} - -# Configuration of metadata backups and archiving. In LVM2 when we -# talk about a 'backup' we mean making a copy of the metadata for the -# *current* system. The 'archive' contains old metadata configurations. -# Backups are stored in a human readeable text format. -backup { - - # Should we maintain a backup of the current metadata configuration ? - # Use 1 for Yes; 0 for No. - # Think very hard before turning this off! - backup = 1 - - # Where shall we keep it ? - # Remember to back up this directory regularly! - backup_dir = "/etc/lvm/backup" - - # Should we maintain an archive of old metadata configurations. - # Use 1 for Yes; 0 for No. - # On by default. Think very hard before turning this off. - archive = 1 - - # Where should archived files go ? - # Remember to back up this directory regularly! - archive_dir = "/etc/lvm/archive" - - # What is the minimum number of archive files you wish to keep ? - retain_min = 10 - - # What is the minimum time you wish to keep an archive file for ? - retain_days = 30 -} - -# Settings for the running LVM2 in shell (readline) mode. -shell { - - # Number of lines of history to store in ~/.lvm_history - history_size = 100 -} - - -# Miscellaneous global LVM2 settings -global { - - # The file creation mask for any files and directories created. - # Interpreted as octal if the first digit is zero. - umask = 077 - - # Allow other users to read the files - #umask = 022 - - # Enabling test mode means that no changes to the on disk metadata - # will be made. Equivalent to having the -t option on every - # command. Defaults to off. - test = 0 - - # Default value for --units argument - units = "h" - - # Since version 2.02.54, the tools distinguish between powers of - # 1024 bytes (e.g. KiB, MiB, GiB) and powers of 1000 bytes (e.g. - # KB, MB, GB). - # If you have scripts that depend on the old behaviour, set this to 0 - # temporarily until you update them. - si_unit_consistency = 1 - - # Whether or not to communicate with the kernel device-mapper. - # Set to 0 if you want to use the tools to manipulate LVM metadata - # without activating any logical volumes. - # If the device-mapper kernel driver is not present in your kernel - # setting this to 0 should suppress the error messages. - activation = 1 - - # If we can't communicate with device-mapper, should we try running - # the LVM1 tools? - # This option only applies to 2.4 kernels and is provided to help you - # switch between device-mapper kernels and LVM1 kernels. - # The LVM1 tools need to be installed with .lvm1 suffices - # e.g. vgscan.lvm1 and they will stop working after you start using - # the new lvm2 on-disk metadata format. - # The default value is set when the tools are built. - # fallback_to_lvm1 = 0 - - # The default metadata format that commands should use - "lvm1" or "lvm2". - # The command line override is -M1 or -M2. - # Defaults to "lvm2". - # format = "lvm2" - - # Location of proc filesystem - proc = "/proc" - - # Type of locking to use. Defaults to local file-based locking (1). - # Turn locking off by setting to 0 (dangerous: risks metadata corruption - # if LVM2 commands get run concurrently). - # Type 2 uses the external shared library locking_library. - # Type 3 uses built-in clustered locking. - # Type 4 uses read-only locking which forbids any operations that might - # change metadata. - locking_type = 1 - - # Set to 0 to fail when a lock request cannot be satisfied immediately. - wait_for_locks = 1 - - # If using external locking (type 2) and initialisation fails, - # with this set to 1 an attempt will be made to use the built-in - # clustered locking. - # If you are using a customised locking_library you should set this to 0. - fallback_to_clustered_locking = 1 - - # If an attempt to initialise type 2 or type 3 locking failed, perhaps - # because cluster components such as clvmd are not running, with this set - # to 1 an attempt will be made to use local file-based locking (type 1). - # If this succeeds, only commands against local volume groups will proceed. - # Volume Groups marked as clustered will be ignored. - fallback_to_local_locking = 1 - - # Local non-LV directory that holds file-based locks while commands are - # in progress. A directory like /tmp that may get wiped on reboot is OK. - locking_dir = "/var/lock/lvm" - - # Whenever there are competing read-only and read-write access requests for - # a volume group's metadata, instead of always granting the read-only - # requests immediately, delay them to allow the read-write requests to be - # serviced. Without this setting, write access may be stalled by a high - # volume of read-only requests. - # NB. This option only affects locking_type = 1 viz. local file-based - # locking. - prioritise_write_locks = 1 - - # Other entries can go here to allow you to load shared libraries - # e.g. if support for LVM1 metadata was compiled as a shared library use - # format_libraries = "liblvm2format1.so" - # Full pathnames can be given. - - # Search this directory first for shared libraries. - # library_dir = "/lib/lvm2" - - # The external locking library to load if locking_type is set to 2. - # locking_library = "liblvm2clusterlock.so" -} - -activation { - # Set to 0 to disable udev syncronisation (if compiled into the binaries). - # Processes will not wait for notification from udev. - # They will continue irrespective of any possible udev processing - # in the background. You should only use this if udev is not running - # or has rules that ignore the devices LVM2 creates. - # The command line argument --nodevsync takes precedence over this setting. - # If set to 1 when udev is not running, and there are LVM2 processes - # waiting for udev, run 'dmsetup udevcomplete_all' manually to wake them up. - udev_sync = 1 - - # How to fill in missing stripes if activating an incomplete volume. - # Using "error" will make inaccessible parts of the device return - # I/O errors on access. You can instead use a device path, in which - # case, that device will be used to in place of missing stripes. - # But note that using anything other than "error" with mirrored - # or snapshotted volumes is likely to result in data corruption. - missing_stripe_filler = "error" - - # How much stack (in KB) to reserve for use while devices suspended - reserved_stack = 256 - - # How much memory (in KB) to reserve for use while devices suspended - reserved_memory = 8192 - - # Nice value used while devices suspended - process_priority = -18 - - # If volume_list is defined, each LV is only activated if there is a - # match against the list. - # "vgname" and "vgname/lvname" are matched exactly. - # "@tag" matches any tag set in the LV or VG. - # "@*" matches if any tag defined on the host is also set in the LV or VG - # - # volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ] - - # Size (in KB) of each copy operation when mirroring - mirror_region_size = 512 - - # Setting to use when there is no readahead value stored in the metadata. - # - # "none" - Disable readahead. - # "auto" - Use default value chosen by kernel. - readahead = "auto" - - # 'mirror_image_fault_policy' and 'mirror_log_fault_policy' define - # how a device failure affecting a mirror is handled. - # A mirror is composed of mirror images (copies) and a log. - # A disk log ensures that a mirror does not need to be re-synced - # (all copies made the same) every time a machine reboots or crashes. - # - # In the event of a failure, the specified policy will be used to determine - # what happens. This applies to automatic repairs (when the mirror is being - # monitored by dmeventd) and to manual lvconvert --repair when - # --use-policies is given. - # - # "remove" - Simply remove the faulty device and run without it. If - # the log device fails, the mirror would convert to using - # an in-memory log. This means the mirror will not - # remember its sync status across crashes/reboots and - # the entire mirror will be re-synced. If a - # mirror image fails, the mirror will convert to a - # non-mirrored device if there is only one remaining good - # copy. - # - # "allocate" - Remove the faulty device and try to allocate space on - # a new device to be a replacement for the failed device. - # Using this policy for the log is fast and maintains the - # ability to remember sync state through crashes/reboots. - # Using this policy for a mirror device is slow, as it - # requires the mirror to resynchronize the devices, but it - # will preserve the mirror characteristic of the device. - # This policy acts like "remove" if no suitable device and - # space can be allocated for the replacement. - # - # "allocate_anywhere" - Not yet implemented. Useful to place the log device - # temporarily on same physical volume as one of the mirror - # images. This policy is not recommended for mirror devices - # since it would break the redundant nature of the mirror. This - # policy acts like "remove" if no suitable device and space can - # be allocated for the replacement. - - mirror_log_fault_policy = "allocate" - mirror_device_fault_policy = "remove" -} - - -#################### -# Advanced section # -#################### - -# Metadata settings -# -# metadata { - # Default number of copies of metadata to hold on each PV. 0, 1 or 2. - # You might want to override it from the command line with 0 - # when running pvcreate on new PVs which are to be added to large VGs. - - # pvmetadatacopies = 1 - - # Approximate default size of on-disk metadata areas in sectors. - # You should increase this if you have large volume groups or - # you want to retain a large on-disk history of your metadata changes. - - # pvmetadatasize = 255 - - # List of directories holding live copies of text format metadata. - # These directories must not be on logical volumes! - # It's possible to use LVM2 with a couple of directories here, - # preferably on different (non-LV) filesystems, and with no other - # on-disk metadata (pvmetadatacopies = 0). Or this can be in - # addition to on-disk metadata areas. - # The feature was originally added to simplify testing and is not - # supported under low memory situations - the machine could lock up. - # - # Never edit any files in these directories by hand unless you - # you are absolutely sure you know what you are doing! Use - # the supplied toolset to make changes (e.g. vgcfgrestore). - - # dirs = [ "/etc/lvm/metadata", "/mnt/disk2/lvm/metadata2" ] -#} - -# Event daemon -# -dmeventd { - # mirror_library is the library used when monitoring a mirror device. - # - # "libdevmapper-event-lvm2mirror.so" attempts to recover from - # failures. It removes failed devices from a volume group and - # reconfigures a mirror as necessary. If no mirror library is - # provided, mirrors are not monitored through dmeventd. - - mirror_library = "libdevmapper-event-lvm2mirror.so" - - # snapshot_library is the library used when monitoring a snapshot device. - # - # "libdevmapper-event-lvm2snapshot.so" monitors the filling of - # snapshots and emits a warning through syslog, when the use of - # snapshot exceedes 80%. The warning is repeated when 85%, 90% and - # 95% of the snapshot are filled. - - snapshot_library = "libdevmapper-event-lvm2snapshot.so" -} diff --git a/contrib/puppet/files/etc/nova.conf b/contrib/puppet/files/etc/nova.conf deleted file mode 100644 index a0d64078c..000000000 --- a/contrib/puppet/files/etc/nova.conf +++ /dev/null @@ -1,28 +0,0 @@ ---ec2_url=http://192.168.255.1:8773/services/Cloud ---rabbit_host=192.168.255.1 ---redis_host=192.168.255.1 ---s3_host=192.168.255.1 ---vpn_ip=192.168.255.1 ---datastore_path=/var/lib/nova/keeper ---networks_path=/var/lib/nova/networks ---instances_path=/var/lib/nova/instances ---buckets_path=/var/lib/nova/objectstore/buckets ---images_path=/var/lib/nova/objectstore/images ---ca_path=/var/lib/nova/CA ---keys_path=/var/lib/nova/keys ---vlan_start=2000 ---vlan_end=3000 ---private_range=192.168.0.0/16 ---public_range=10.0.0.0/24 ---volume_group=vgdata ---storage_dev=/dev/sdc ---bridge_dev=eth2 ---aoe_eth_dev=eth2 ---public_interface=vlan0 ---default_kernel=aki-DEFAULT ---default_ramdisk=ari-DEFAULT ---vpn_image_id=ami-cloudpipe ---daemonize ---verbose ---syslog ---prefix=nova diff --git a/contrib/puppet/files/production/boto.cfg b/contrib/puppet/files/production/boto.cfg deleted file mode 100644 index f4a2de2b6..000000000 --- a/contrib/puppet/files/production/boto.cfg +++ /dev/null @@ -1,3 +0,0 @@ -[Boto] -debug = 0 -num_retries = 1 diff --git a/contrib/puppet/files/production/libvirt.qemu.xml.template b/contrib/puppet/files/production/libvirt.qemu.xml.template deleted file mode 100644 index 114dfdc01..000000000 --- a/contrib/puppet/files/production/libvirt.qemu.xml.template +++ /dev/null @@ -1,35 +0,0 @@ -<domain type='%(type)s'> - <name>%(name)s</name> - <os> - <type>hvm</type> - <kernel>%(basepath)s/kernel</kernel> - <initrd>%(basepath)s/ramdisk</initrd> - <cmdline>root=/dev/vda1 console=ttyS0</cmdline> - </os> - <features> - <acpi/> - </features> - <memory>%(memory_kb)s</memory> - <vcpu>%(vcpus)s</vcpu> - <devices> - <disk type='file'> - <source file='%(basepath)s/disk'/> - <target dev='vda' bus='virtio'/> - </disk> - <interface type='bridge'> - <source bridge='%(bridge_name)s'/> - <mac address='%(mac_address)s'/> - <!-- <model type='virtio'/> CANT RUN virtio network right now --> - <!-- - <filterref filter="nova-instance-%(name)s"> - <parameter name="IP" value="%(ip_address)s" /> - <parameter name="DHCPSERVER" value="%(dhcp_server)s" /> - </filterref> - --> - </interface> - <serial type="file"> - <source path='%(basepath)s/console.log'/> - <target port='1'/> - </serial> - </devices> -</domain> diff --git a/contrib/puppet/files/production/my.cnf b/contrib/puppet/files/production/my.cnf deleted file mode 100644 index 8777bc480..000000000 --- a/contrib/puppet/files/production/my.cnf +++ /dev/null @@ -1,137 +0,0 @@ -# -# The MySQL database server configuration file. -# -# You can copy this to one of: -# - "/etc/mysql/my.cnf" to set global options, -# - "~/.my.cnf" to set user-specific options. -# -# One can use all long options that the program supports. -# Run program with --help to get a list of available options and with -# --print-defaults to see which it would actually understand and use. -# -# For explanations see -# http://dev.mysql.com/doc/mysql/en/server-system-variables.html - -# This will be passed to all mysql clients -# It has been reported that passwords should be enclosed with ticks/quotes -# escpecially if they contain "#" chars... -# Remember to edit /etc/mysql/debian.cnf when changing the socket location. -[client] -port = 3306 -socket = /var/run/mysqld/mysqld.sock - -# Here is entries for some specific programs -# The following values assume you have at least 32M ram - -# This was formally known as [safe_mysqld]. Both versions are currently parsed. -[mysqld_safe] -socket = /var/run/mysqld/mysqld.sock -nice = 0 - -[mysqld] -# -# * Basic Settings -# - -# -# * IMPORTANT -# If you make changes to these settings and your system uses apparmor, you may -# also need to also adjust /etc/apparmor.d/usr.sbin.mysqld. -# - -user = mysql -socket = /var/run/mysqld/mysqld.sock -port = 3306 -basedir = /usr -datadir = /var/lib/mysql -tmpdir = /tmp -skip-external-locking -# -# Instead of skip-networking the default is now to listen only on -# localhost which is more compatible and is not less secure. -# bind-address = 127.0.0.1 -# -# * Fine Tuning -# -innodb_buffer_pool_size = 12G -#innodb_log_file_size = 256M -innodb_log_buffer_size=4M -innodb_flush_log_at_trx_commit=2 -innodb_thread_concurrency=8 -innodb_flush_method=O_DIRECT -key_buffer = 128M -max_allowed_packet = 256M -thread_stack = 8196K -thread_cache_size = 32 -# This replaces the startup script and checks MyISAM tables if needed -# the first time they are touched -myisam-recover = BACKUP -max_connections = 1000 -table_cache = 1024 -#thread_concurrency = 10 -# -# * Query Cache Configuration -# -query_cache_limit = 32M -query_cache_size = 256M -# -# * Logging and Replication -# -# Both location gets rotated by the cronjob. -# Be aware that this log type is a performance killer. -# As of 5.1 you can enable the log at runtime! -#general_log_file = /var/log/mysql/mysql.log -#general_log = 1 - -log_error = /var/log/mysql/error.log - -# Here you can see queries with especially long duration -log_slow_queries = /var/log/mysql/mysql-slow.log -long_query_time = 2 -#log-queries-not-using-indexes -# -# The following can be used as easy to replay backup logs or for replication. -# note: if you are setting up a replication slave, see README.Debian about -# other settings you may need to change. -server-id = 1 -log_bin = /var/log/mysql/mysql-bin.log -expire_logs_days = 10 -max_binlog_size = 50M -#binlog_do_db = include_database_name -#binlog_ignore_db = include_database_name -# -# * InnoDB -# -sync_binlog=1 -# InnoDB is enabled by default with a 10MB datafile in /var/lib/mysql/. -# Read the manual for more InnoDB related options. There are many! -# -# * Security Features -# -# Read the manual, too, if you want chroot! -# chroot = /var/lib/mysql/ -# -# For generating SSL certificates I recommend the OpenSSL GUI "tinyca". -# -# ssl-ca=/etc/mysql/cacert.pem -# ssl-cert=/etc/mysql/server-cert.pem -# ssl-key=/etc/mysql/server-key.pem - - - -[mysqldump] -quick -quote-names -max_allowed_packet = 256M - -[mysql] -#no-auto-rehash # faster start of mysql but no tab completition - -[isamchk] -key_buffer = 128M - -# -# * IMPORTANT: Additional settings that can override those from this file! -# The files must end with '.cnf', otherwise they'll be ignored. -# -!includedir /etc/mysql/conf.d/ diff --git a/contrib/puppet/files/production/nova-iptables b/contrib/puppet/files/production/nova-iptables deleted file mode 100755 index 61e2ca2b9..000000000 --- a/contrib/puppet/files/production/nova-iptables +++ /dev/null @@ -1,187 +0,0 @@ -#! /bin/sh - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# NOTE(vish): This script sets up some reasonable defaults for iptables and -# creates nova-specific chains. If you use this script you should -# run nova-network and nova-compute with --use_nova_chains=True - - -# NOTE(vish): If you run public nova-api on a different port, make sure to -# change the port here - -if [ -f /etc/default/nova-iptables ] ; then - . /etc/default/nova-iptables -fi - -export LC_ALL=C - -API_PORT=${API_PORT:-"8773"} - -if [ ! -n "$IP" ]; then - # NOTE(vish): IP address is what address the services ALLOW on. - # This will just get the first ip in the list, so if you - # have more than one eth device set up, this will fail, and - # you should explicitly pass in the ip of the instance - IP=`ifconfig | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'` -fi - -if [ ! -n "$PRIVATE_RANGE" ]; then - #NOTE(vish): PRIVATE_RANGE: range is ALLOW to access DHCP - PRIVATE_RANGE="192.168.0.0/12" -fi - -if [ ! -n "$MGMT_IP" ]; then - # NOTE(vish): Management IP is the ip over which to allow ssh traffic. It - # will also allow traffic to nova-api - MGMT_IP="$IP" -fi - -if [ ! -n "$DMZ_IP" ]; then - # NOTE(vish): DMZ IP is the ip over which to allow api & objectstore access - DMZ_IP="$IP" -fi - -clear_nova_iptables() { - iptables -P INPUT ACCEPT - iptables -P FORWARD ACCEPT - iptables -P OUTPUT ACCEPT - iptables -F - iptables -t nat -F - iptables -F services - iptables -X services - # HACK: re-adding fail2ban rules :( - iptables -N fail2ban-ssh - iptables -A INPUT -p tcp -m multiport --dports 22 -j fail2ban-ssh - iptables -A fail2ban-ssh -j RETURN -} - -load_nova_iptables() { - - iptables -P INPUT DROP - iptables -A INPUT -m state --state INVALID -j DROP - iptables -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT - # NOTE(ja): allow localhost for everything - iptables -A INPUT -d 127.0.0.1/32 -j ACCEPT - # NOTE(ja): 22 only allowed MGMT_IP before, but we widened it to any - # address, since ssh should be listening only on internal - # before we re-add this rule we will need to add - # flexibility for RSYNC between omega/stingray - iptables -A INPUT -m tcp -p tcp --dport 22 -j ACCEPT - iptables -A INPUT -m udp -p udp --dport 123 -j ACCEPT - iptables -A INPUT -p icmp -j ACCEPT - iptables -N services - iptables -A INPUT -j services - iptables -A INPUT -p tcp -j REJECT --reject-with tcp-reset - iptables -A INPUT -j REJECT --reject-with icmp-port-unreachable - - iptables -P FORWARD DROP - iptables -A FORWARD -m state --state INVALID -j DROP - iptables -A FORWARD -m state --state RELATED,ESTABLISHED -j ACCEPT - iptables -A FORWARD -p tcp -m tcp --tcp-flags SYN,RST SYN -j TCPMSS --clamp-mss-to-pmtu - - # NOTE(vish): DROP on output is too restrictive for now. We need to add - # in a bunch of more specific output rules to use it. - # iptables -P OUTPUT DROP - iptables -A OUTPUT -m state --state INVALID -j DROP - iptables -A OUTPUT -m state --state RELATED,ESTABLISHED -j ACCEPT - - if [ -n "$GANGLIA" ] || [ -n "$ALL" ]; then - iptables -A services -m tcp -p tcp -d $IP --dport 8649 -j ACCEPT - iptables -A services -m udp -p udp -d $IP --dport 8649 -j ACCEPT - fi - - # if [ -n "$WEB" ] || [ -n "$ALL" ]; then - # # NOTE(vish): This opens up ports for web access, allowing web-based - # # dashboards to work. - # iptables -A services -m tcp -p tcp -d $IP --dport 80 -j ACCEPT - # iptables -A services -m tcp -p tcp -d $IP --dport 443 -j ACCEPT - # fi - - if [ -n "$OBJECTSTORE" ] || [ -n "$ALL" ]; then - # infrastructure - iptables -A services -m tcp -p tcp -d $IP --dport 3333 -j ACCEPT - # clients - iptables -A services -m tcp -p tcp -d $DMZ_IP --dport 3333 -j ACCEPT - fi - - if [ -n "$API" ] || [ -n "$ALL" ]; then - iptables -A services -m tcp -p tcp -d $IP --dport $API_PORT -j ACCEPT - if [ "$IP" != "$DMZ_IP" ]; then - iptables -A services -m tcp -p tcp -d $DMZ_IP --dport $API_PORT -j ACCEPT - fi - if [ "$IP" != "$MGMT_IP" ] && [ "$DMZ_IP" != "$MGMT_IP" ]; then - iptables -A services -m tcp -p tcp -d $MGMT_IP --dport $API_PORT -j ACCEPT - fi - fi - - if [ -n "$REDIS" ] || [ -n "$ALL" ]; then - iptables -A services -m tcp -p tcp -d $IP --dport 6379 -j ACCEPT - fi - - if [ -n "$MYSQL" ] || [ -n "$ALL" ]; then - iptables -A services -m tcp -p tcp -d $IP --dport 3306 -j ACCEPT - fi - - if [ -n "$RABBITMQ" ] || [ -n "$ALL" ]; then - iptables -A services -m tcp -p tcp -d $IP --dport 4369 -j ACCEPT - iptables -A services -m tcp -p tcp -d $IP --dport 5672 -j ACCEPT - iptables -A services -m tcp -p tcp -d $IP --dport 53284 -j ACCEPT - fi - - if [ -n "$DNSMASQ" ] || [ -n "$ALL" ]; then - # NOTE(vish): this could theoretically be setup per network - # for each host, but it seems like overkill - iptables -A services -m tcp -p tcp -s $PRIVATE_RANGE --dport 53 -j ACCEPT - iptables -A services -m udp -p udp -s $PRIVATE_RANGE --dport 53 -j ACCEPT - iptables -A services -m udp -p udp --dport 67 -j ACCEPT - fi - - if [ -n "$LDAP" ] || [ -n "$ALL" ]; then - iptables -A services -m tcp -p tcp -d $IP --dport 389 -j ACCEPT - fi - - if [ -n "$ISCSI" ] || [ -n "$ALL" ]; then - iptables -A services -m tcp -p tcp -d $IP --dport 3260 -j ACCEPT - iptables -A services -m tcp -p tcp -d 127.0.0.0/16 --dport 3260 -j ACCEPT - fi -} - - -case "$1" in - start) - echo "Starting nova-iptables: " - load_nova_iptables - ;; - stop) - echo "Clearing nova-iptables: " - clear_nova_iptables - ;; - restart) - echo "Restarting nova-iptables: " - clear_nova_iptables - load_nova_iptables - ;; - *) - echo "Usage: $NAME {start|stop|restart}" >&2 - exit 1 - ;; -esac - -exit 0 diff --git a/contrib/puppet/files/production/nova-iscsi-dev.sh b/contrib/puppet/files/production/nova-iscsi-dev.sh deleted file mode 100644 index 8eda10d2e..000000000 --- a/contrib/puppet/files/production/nova-iscsi-dev.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/sh - -# FILE: /etc/udev/scripts/iscsidev.sh - -BUS=${1} -HOST=${BUS%%:*} - -[ -e /sys/class/iscsi_host ] || exit 1 - -file="/sys/class/iscsi_host/host${HOST}/device/session*/iscsi_session*/session*/targetname" - -target_name=$(cat ${file}) - -# This is not an open-scsi drive -if [ -z "${target_name}" ]; then - exit 1 -fi - -echo "${target_name##*:}" diff --git a/contrib/puppet/files/production/setup_data.sh b/contrib/puppet/files/production/setup_data.sh deleted file mode 100755 index 1fbbac41c..000000000 --- a/contrib/puppet/files/production/setup_data.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -/root/slap.sh -mysql -e "DROP DATABASE nova" -mysql -e "CREATE DATABASE nova" -mysql -e "GRANT ALL on nova.* to nova@'%' identified by 'TODO:CHANGEME:CMON'" -touch /root/installed diff --git a/contrib/puppet/files/production/slap.sh b/contrib/puppet/files/production/slap.sh deleted file mode 100755 index f8ea16949..000000000 --- a/contrib/puppet/files/production/slap.sh +++ /dev/null @@ -1,261 +0,0 @@ -#!/usr/bin/env bash -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# LDAP INSTALL SCRIPT - SHOULD BE IDEMPOTENT, but it SCRUBS all USERS - -apt-get install -y slapd ldap-utils python-ldap - -cat >/etc/ldap/schema/openssh-lpk_openldap.schema <<LPK_SCHEMA_EOF -# -# LDAP Public Key Patch schema for use with openssh-ldappubkey -# Author: Eric AUGE <eau@phear.org> -# -# Based on the proposal of : Mark Ruijter -# - - -# octetString SYNTAX -attributetype ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey' - DESC 'MANDATORY: OpenSSH Public key' - EQUALITY octetStringMatch - SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 ) - -# printableString SYNTAX yes|no -objectclass ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY - DESC 'MANDATORY: OpenSSH LPK objectclass' - MAY ( sshPublicKey $ uid ) - ) -LPK_SCHEMA_EOF - -cat >/etc/ldap/schema/nova.schema <<NOVA_SCHEMA_EOF -# -# Person object for Nova -# inetorgperson with extra attributes -# Author: Vishvananda Ishaya <vishvananda@yahoo.com> -# -# - -# using internet experimental oid arc as per BP64 3.1 -objectidentifier novaSchema 1.3.6.1.3.1.666.666 -objectidentifier novaAttrs novaSchema:3 -objectidentifier novaOCs novaSchema:4 - -attributetype ( - novaAttrs:1 - NAME 'accessKey' - DESC 'Key for accessing data' - EQUALITY caseIgnoreMatch - SUBSTR caseIgnoreSubstringsMatch - SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 - SINGLE-VALUE - ) - -attributetype ( - novaAttrs:2 - NAME 'secretKey' - DESC 'Secret key' - EQUALITY caseIgnoreMatch - SUBSTR caseIgnoreSubstringsMatch - SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 - SINGLE-VALUE - ) - -attributetype ( - novaAttrs:3 - NAME 'keyFingerprint' - DESC 'Fingerprint of private key' - EQUALITY caseIgnoreMatch - SUBSTR caseIgnoreSubstringsMatch - SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 - SINGLE-VALUE - ) - -attributetype ( - novaAttrs:4 - NAME 'isAdmin' - DESC 'Is user an administrator?' - EQUALITY booleanMatch - SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 - SINGLE-VALUE - ) - -attributetype ( - novaAttrs:5 - NAME 'projectManager' - DESC 'Project Managers of a project' - SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 - ) - -objectClass ( - novaOCs:1 - NAME 'novaUser' - DESC 'access and secret keys' - AUXILIARY - MUST ( uid ) - MAY ( accessKey $ secretKey $ isAdmin ) - ) - -objectClass ( - novaOCs:2 - NAME 'novaKeyPair' - DESC 'Key pair for User' - SUP top - STRUCTURAL - MUST ( cn $ sshPublicKey $ keyFingerprint ) - ) - -objectClass ( - novaOCs:3 - NAME 'novaProject' - DESC 'Container for project' - SUP groupOfNames - STRUCTURAL - MUST ( cn $ projectManager ) - ) - -NOVA_SCHEMA_EOF - -mv /etc/ldap/slapd.conf /etc/ldap/slapd.conf.orig -cat >/etc/ldap/slapd.conf <<SLAPD_CONF_EOF -# slapd.conf - Configuration file for LDAP SLAPD -########## -# Basics # -########## -include /etc/ldap/schema/core.schema -include /etc/ldap/schema/cosine.schema -include /etc/ldap/schema/inetorgperson.schema -include /etc/ldap/schema/openssh-lpk_openldap.schema -include /etc/ldap/schema/nova.schema -pidfile /var/run/slapd/slapd.pid -argsfile /var/run/slapd/slapd.args -loglevel none -modulepath /usr/lib/ldap -# modulepath /usr/local/libexec/openldap -moduleload back_hdb -########################## -# Database Configuration # -########################## -database hdb -suffix "dc=example,dc=com" -rootdn "cn=Manager,dc=example,dc=com" -rootpw changeme -directory /var/lib/ldap -# directory /usr/local/var/openldap-data -index objectClass,cn eq -######## -# ACLs # -######## -access to attrs=userPassword - by anonymous auth - by self write - by * none -access to * - by self write - by * none -SLAPD_CONF_EOF - -mv /etc/ldap/ldap.conf /etc/ldap/ldap.conf.orig - -cat >/etc/ldap/ldap.conf <<LDAP_CONF_EOF -# LDAP Client Settings -URI ldap://localhost -BASE dc=example,dc=com -BINDDN cn=Manager,dc=example,dc=com -SIZELIMIT 0 -TIMELIMIT 0 -LDAP_CONF_EOF - -cat >/etc/ldap/base.ldif <<BASE_LDIF_EOF -# This is the root of the directory tree -dn: dc=example,dc=com -description: Example.Com, your trusted non-existent corporation. -dc: example -o: Example.Com -objectClass: top -objectClass: dcObject -objectClass: organization - -# Subtree for users -dn: ou=Users,dc=example,dc=com -ou: Users -description: Users -objectClass: organizationalUnit - -# Subtree for groups -dn: ou=Groups,dc=example,dc=com -ou: Groups -description: Groups -objectClass: organizationalUnit - -# Subtree for system accounts -dn: ou=System,dc=example,dc=com -ou: System -description: Special accounts used by software applications. -objectClass: organizationalUnit - -# Special Account for Authentication: -dn: uid=authenticate,ou=System,dc=example,dc=com -uid: authenticate -ou: System -description: Special account for authenticating users -userPassword: {MD5}TODO-000000000000000000000000000== -objectClass: account -objectClass: simpleSecurityObject - -# create the sysadmin entry - -dn: cn=developers,ou=Groups,dc=example,dc=com -objectclass: groupOfNames -cn: developers -description: IT admin group -member: uid=admin,ou=Users,dc=example,dc=com - -dn: cn=sysadmins,ou=Groups,dc=example,dc=com -objectclass: groupOfNames -cn: sysadmins -description: IT admin group -member: uid=admin,ou=Users,dc=example,dc=com - -dn: cn=netadmins,ou=Groups,dc=example,dc=com -objectclass: groupOfNames -cn: netadmins -description: Network admin group -member: uid=admin,ou=Users,dc=example,dc=com - -dn: cn=cloudadmins,ou=Groups,dc=example,dc=com -objectclass: groupOfNames -cn: cloudadmins -description: Cloud admin group -member: uid=admin,ou=Users,dc=example,dc=com - -dn: cn=itsec,ou=Groups,dc=example,dc=com -objectclass: groupOfNames -cn: itsec -description: IT security users group -member: uid=admin,ou=Users,dc=example,dc=com -BASE_LDIF_EOF - -/etc/init.d/slapd stop -rm -rf /var/lib/ldap/* -rm -rf /etc/ldap/slapd.d/* -slaptest -f /etc/ldap/slapd.conf -F /etc/ldap/slapd.d -cp /usr/share/slapd/DB_CONFIG /var/lib/ldap/DB_CONFIG -slapadd -v -l /etc/ldap/base.ldif -chown -R openldap:openldap /etc/ldap/slapd.d -chown -R openldap:openldap /var/lib/ldap -/etc/init.d/slapd start diff --git a/contrib/puppet/fileserver.conf b/contrib/puppet/fileserver.conf deleted file mode 100644 index 6e2984b8c..000000000 --- a/contrib/puppet/fileserver.conf +++ /dev/null @@ -1,8 +0,0 @@ -# fileserver.conf - -[files] -path /srv/cloud/puppet/files -allow 10.0.0.0/24 - -[plugins] - diff --git a/contrib/puppet/manifests/classes/apt.pp b/contrib/puppet/manifests/classes/apt.pp deleted file mode 100644 index 03022aeef..000000000 --- a/contrib/puppet/manifests/classes/apt.pp +++ /dev/null @@ -1 +0,0 @@ -exec { "update-apt": command => "/usr/bin/apt-get update" } diff --git a/contrib/puppet/manifests/classes/issue.pp b/contrib/puppet/manifests/classes/issue.pp deleted file mode 100644 index 8bb37ee3f..000000000 --- a/contrib/puppet/manifests/classes/issue.pp +++ /dev/null @@ -1,14 +0,0 @@ -class issue { - file { "/etc/issue": - owner => "root", - group => "root", - mode => 444, - source => "puppet://${puppet_server}/files/etc/issue", - } - file { "/etc/issue.net": - owner => "root", - group => "root", - mode => 444, - source => "puppet://${puppet_server}/files/etc/issue", - } -} diff --git a/contrib/puppet/manifests/classes/kern_module.pp b/contrib/puppet/manifests/classes/kern_module.pp deleted file mode 100644 index 00ec0636c..000000000 --- a/contrib/puppet/manifests/classes/kern_module.pp +++ /dev/null @@ -1,34 +0,0 @@ -# via http://projects.puppetlabs.com/projects/puppet/wiki/Kernel_Modules_Patterns - -define kern_module ($ensure) { - $modulesfile = $operatingsystem ? { ubuntu => "/etc/modules", redhat => "/etc/rc.modules" } - case $operatingsystem { - redhat: { file { "/etc/rc.modules": ensure => file, mode => 755 } } - } - case $ensure { - present: { - exec { "insert_module_${name}": - command => $operatingsystem ? { - ubuntu => "/bin/echo '${name}' >> '${modulesfile}'", - redhat => "/bin/echo '/sbin/modprobe ${name}' >> '${modulesfile}' " - }, - unless => "/bin/grep -qFx '${name}' '${modulesfile}'" - } - exec { "/sbin/modprobe ${name}": unless => "/bin/grep -q '^${name} ' '/proc/modules'" } - } - absent: { - exec { "/sbin/modprobe -r ${name}": onlyif => "/bin/grep -q '^${name} ' '/proc/modules'" } - exec { "remove_module_${name}": - command => $operatingsystem ? { - ubuntu => "/usr/bin/perl -ni -e 'print unless /^\\Q${name}\\E\$/' '${modulesfile}'", - redhat => "/usr/bin/perl -ni -e 'print unless /^\\Q/sbin/modprobe ${name}\\E\$/' '${modulesfile}'" - }, - onlyif => $operatingsystem ? { - ubuntu => "/bin/grep -qFx '${name}' '${modulesfile}'", - redhat => "/bin/grep -q '^/sbin/modprobe ${name}' '${modulesfile}'" - } - } - } - default: { err ( "unknown ensure value ${ensure}" ) } - } -} diff --git a/contrib/puppet/manifests/classes/loopback.pp b/contrib/puppet/manifests/classes/loopback.pp deleted file mode 100644 index e0fa9d541..000000000 --- a/contrib/puppet/manifests/classes/loopback.pp +++ /dev/null @@ -1,6 +0,0 @@ -define loopback($num) { - exec { "mknod -m 0660 /dev/loop${num} b 7 ${num}; chown root:disk /dev/loop${num}": - creates => "/dev/loop${num}", - path => ["/usr/bin", "/usr/sbin", "/bin"] - } -} diff --git a/contrib/puppet/manifests/classes/lvm.pp b/contrib/puppet/manifests/classes/lvm.pp deleted file mode 100644 index 5a407abcb..000000000 --- a/contrib/puppet/manifests/classes/lvm.pp +++ /dev/null @@ -1,8 +0,0 @@ -class lvm { - file { "/etc/lvm/lvm.conf": - owner => "root", - group => "root", - mode => 444, - source => "puppet://${puppet_server}/files/etc/lvm.conf", - } -} diff --git a/contrib/puppet/manifests/classes/lvmconf.pp b/contrib/puppet/manifests/classes/lvmconf.pp deleted file mode 100644 index 4aa7ddfdc..000000000 --- a/contrib/puppet/manifests/classes/lvmconf.pp +++ /dev/null @@ -1,8 +0,0 @@ -class lvmconf { - file { "/etc/lvm/lvm.conf": - owner => "root", group => "root", mode => 644, - source => "puppet://${puppet_server}/files/etc/lvm/lvm.conf", - ensure => present - } -} - diff --git a/contrib/puppet/manifests/classes/nova.pp b/contrib/puppet/manifests/classes/nova.pp deleted file mode 100644 index e942860f4..000000000 --- a/contrib/puppet/manifests/classes/nova.pp +++ /dev/null @@ -1,464 +0,0 @@ -import "kern_module" -import "apt" -import "loopback" - -#$head_node_ip = "undef" -#$rabbit_ip = "undef" -#$vpn_ip = "undef" -#$public_interface = "undef" -#$vlan_start = "5000" -#$vlan_end = "6000" -#$private_range = "10.0.0.0/16" -#$public_range = "192.168.177.0/24" - -define nova_iptables($services, $ip="", $private_range="", $mgmt_ip="", $dmz_ip="") { - file { "/etc/init.d/nova-iptables": - owner => "root", mode => 755, - source => "puppet://${puppet_server}/files/production/nova-iptables", - } - - file { "/etc/default/nova-iptables": - owner => "root", mode => 644, - content => template("nova-iptables.erb") - } -} - -define nova_conf_pointer($name) { - file { "/etc/nova/nova-${name}.conf": - owner => "nova", mode => 400, - content => "--flagfile=/etc/nova/nova.conf" - } -} - -class novaconf { - file { "/etc/nova/nova.conf": - owner => "nova", mode => 400, - content => template("production/nova-common.conf.erb", "production/nova-${cluster_name}.conf.erb") - } - nova_conf_pointer{'manage': name => 'manage'} -} - -class novadata { - package { "rabbitmq-server": ensure => present } - - file { "/etc/rabbitmq/rabbitmq.conf": - owner => "root", mode => 644, - content => "NODENAME=rabbit@localhost", - } - - service { "rabbitmq-server": - ensure => running, - enable => true, - hasstatus => true, - require => [ - File["/etc/rabbitmq/rabbitmq.conf"], - Package["rabbitmq-server"] - ] - } - - package { "mysql-server": ensure => present } - - file { "/etc/mysql/my.cnf": - owner => "root", mode => 644, - source => "puppet://${puppet_server}/files/production/my.cnf", - } - - service { "mysql": - ensure => running, - enable => true, - hasstatus => true, - require => [ - File["/etc/mysql/my.cnf"], - Package["mysql-server"] - ] - } - - file { "/root/slap.sh": - owner => "root", mode => 755, - source => "puppet://${puppet_server}/files/production/slap.sh", - } - - file { "/root/setup_data.sh": - owner => "root", mode => 755, - source => "puppet://${puppet_server}/files/production/setup_data.sh", - } - - # setup compute data - exec { "setup_data": - command => "/root/setup_data.sh", - path => "/usr/bin:/bin", - unless => "test -f /root/installed", - require => [ - Service["mysql"], - File["/root/slap.sh"], - File["/root/setup_data.sh"] - ] - } -} - -define nscheduler($version) { - package { "nova-scheduler": ensure => $version, require => Exec["update-apt"] } - nova_conf_pointer{'scheduler': name => 'scheduler'} - exec { "update-rc.d -f nova-scheduler remove; update-rc.d nova-scheduler defaults 50": - path => "/usr/bin:/usr/sbin:/bin", - onlyif => "test -f /etc/init.d/nova-scheduler", - unless => "test -f /etc/rc2.d/S50nova-scheduler" - } - service { "nova-scheduler": - ensure => running, - hasstatus => true, - subscribe => [ - Package["nova-scheduler"], - File["/etc/nova/nova.conf"], - File["/etc/nova/nova-scheduler.conf"] - ] - } - -} - -define napi($version, $api_servers, $api_base_port) { - file { "/etc/boto.cfg": - owner => "root", mode => 644, - source => "puppet://${puppet_server}/files/production/boto.cfg", - } - - file { "/var/lib/nova/CA/genvpn.sh": - owner => "nova", mode => 755, - source => "puppet://${puppet_server}/files/production/genvpn.sh", - } - - package { "python-greenlet": ensure => present } - package { "nova-api": ensure => $version, require => [Exec["update-apt"], Package["python-greenlet"]] } - nova_conf_pointer{'api': name => 'api'} - - exec { "update-rc.d -f nova-api remove; update-rc.d nova-api defaults 50": - path => "/usr/bin:/usr/sbin:/bin", - onlyif => "test -f /etc/init.d/nova-api", - unless => "test -f /etc/rc2.d/S50nova-api" - } - - service { "nova-netsync": - start => "/usr/bin/nova-netsync --pidfile=/var/run/nova/nova-netsync.pid --lockfile=/var/run/nova/nova-netsync.pid.lock start", - stop => "/usr/bin/nova-netsync --pidfile=/var/run/nova/nova-netsync.pid --lockfile=/var/run/nova/nova-netsync.pid.lock stop", - ensure => running, - hasstatus => false, - pattern => "nova-netsync", - require => Service["nova-api"], - subscribe => File["/etc/nova/nova.conf"] - } - service { "nova-api": - start => "monit start all -g nova_api", - stop => "monit stop all -g nova_api", - restart => "monit restart all -g nova_api", - # ensure => running, - # hasstatus => true, - require => Service["monit"], - subscribe => [ - Package["nova-objectstore"], - File["/etc/boto.cfg"], - File["/etc/nova/nova.conf"], - File["/etc/nova/nova-objectstore.conf"] - ] - } - - # the haproxy & monit's template use $api_servers and $api_base_port - - package { "haproxy": ensure => present } - file { "/etc/default/haproxy": - owner => "root", mode => 644, - content => "ENABLED=1", - require => Package['haproxy'] - } - file { "/etc/haproxy/haproxy.cfg": - owner => "root", mode => 644, - content => template("/srv/cloud/puppet/templates/haproxy.cfg.erb"), - require => Package['haproxy'] - } - service { "haproxy": - ensure => true, - enable => true, - hasstatus => true, - subscribe => [ - Package["haproxy"], - File["/etc/default/haproxy"], - File["/etc/haproxy/haproxy.cfg"], - ] - } - - package { "socat": ensure => present } - - file { "/usr/local/bin/gmetric_haproxy.sh": - owner => "root", mode => 755, - source => "puppet://${puppet_server}/files/production/ganglia/gmetric_scripts/gmetric_haproxy.sh", - } - - cron { "gmetric_haproxy": - command => "/usr/local/bin/gmetric_haproxy.sh", - user => root, - minute => "*/3", - } - - package { "monit": ensure => present } - - file { "/etc/default/monit": - owner => "root", mode => 644, - content => "startup=1", - require => Package['monit'] - } - file { "/etc/monit/monitrc": - owner => "root", mode => 600, - content => template("/srv/cloud/puppet/templates/monitrc-nova-api.erb"), - require => Package['monit'] - } - service { "monit": - ensure => true, - pattern => "sbin/monit", - subscribe => [ - Package["monit"], - File["/etc/default/monit"], - File["/etc/monit/monitrc"], - ] - } - -} - - -define nnetwork($version) { - # kill the default network added by the package - exec { "kill-libvirt-default-net": - command => "virsh net-destroy default; rm /etc/libvirt/qemu/networks/autostart/default.xml", - path => "/usr/bin:/bin", - onlyif => "test -f /etc/libvirt/qemu/networks/autostart/default.xml" - } - - # EVIL HACK: custom binary because dnsmasq 2.52 segfaulted accessing dereferenced object - file { "/usr/sbin/dnsmasq": - owner => "root", group => "root", - source => "puppet://${puppet_server}/files/production/dnsmasq", - } - - package { "nova-network": ensure => $version, require => Exec["update-apt"] } - nova_conf_pointer{'dhcpbridge': name => 'dhcpbridge'} - nova_conf_pointer{'network': name => "network" } - - exec { "update-rc.d -f nova-network remove; update-rc.d nova-network defaults 50": - path => "/usr/bin:/usr/sbin:/bin", - onlyif => "test -f /etc/init.d/nova-network", - unless => "test -f /etc/rc2.d/S50nova-network" - } - service { "nova-network": - ensure => running, - hasstatus => true, - subscribe => [ - Package["nova-network"], - File["/etc/nova/nova.conf"], - File["/etc/nova/nova-network.conf"] - ] - } -} - -define nobjectstore($version) { - package { "nova-objectstore": ensure => $version, require => Exec["update-apt"] } - nova_conf_pointer{'objectstore': name => 'objectstore'} - exec { "update-rc.d -f nova-objectstore remove; update-rc.d nova-objectstore defaults 50": - path => "/usr/bin:/usr/sbin:/bin", - onlyif => "test -f /etc/init.d/nova-objectstore", - unless => "test -f /etc/rc2.d/S50nova-objectstore" - } - service { "nova-objectstore": - ensure => running, - hasstatus => true, - subscribe => [ - Package["nova-objectstore"], - File["/etc/nova/nova.conf"], - File["/etc/nova/nova-objectstore.conf"] - ] - } -} - -define ncompute($version) { - include ganglia-python - include ganglia-compute - - # kill the default network added by the package - exec { "kill-libvirt-default-net": - command => "virsh net-destroy default; rm /etc/libvirt/qemu/networks/autostart/default.xml", - path => "/usr/bin:/bin", - onlyif => "test -f /etc/libvirt/qemu/networks/autostart/default.xml" - } - - - # LIBVIRT has to be restarted when ebtables / gawk is installed - service { "libvirt-bin": - ensure => running, - pattern => "sbin/libvirtd", - subscribe => [ - Package["ebtables"], - Kern_module["kvm_intel"] - ], - require => [ - Package["libvirt-bin"], - Package["ebtables"], - Package["gawk"], - Kern_module["kvm_intel"], - File["/dev/kvm"] - ] - } - - package { "libvirt-bin": ensure => "0.8.3-1ubuntu14~ppalucid2" } - package { "ebtables": ensure => present } - package { "gawk": ensure => present } - - # ensure proper permissions on /dev/kvm - file { "/dev/kvm": - owner => "root", - group => "kvm", - mode => 660 - } - - # require hardware virt - kern_module { "kvm_intel": - ensure => present, - } - - # increase loopback devices - file { "/etc/modprobe.d/loop.conf": - owner => "root", mode => 644, - content => "options loop max_loop=40" - } - - nova_conf_pointer{'compute': name => 'compute'} - - loopback{loop0: num => 0} - loopback{loop1: num => 1} - loopback{loop2: num => 2} - loopback{loop3: num => 3} - loopback{loop4: num => 4} - loopback{loop5: num => 5} - loopback{loop6: num => 6} - loopback{loop7: num => 7} - loopback{loop8: num => 8} - loopback{loop9: num => 9} - loopback{loop10: num => 10} - loopback{loop11: num => 11} - loopback{loop12: num => 12} - loopback{loop13: num => 13} - loopback{loop14: num => 14} - loopback{loop15: num => 15} - loopback{loop16: num => 16} - loopback{loop17: num => 17} - loopback{loop18: num => 18} - loopback{loop19: num => 19} - loopback{loop20: num => 20} - loopback{loop21: num => 21} - loopback{loop22: num => 22} - loopback{loop23: num => 23} - loopback{loop24: num => 24} - loopback{loop25: num => 25} - loopback{loop26: num => 26} - loopback{loop27: num => 27} - loopback{loop28: num => 28} - loopback{loop29: num => 29} - loopback{loop30: num => 30} - loopback{loop31: num => 31} - loopback{loop32: num => 32} - loopback{loop33: num => 33} - loopback{loop34: num => 34} - loopback{loop35: num => 35} - loopback{loop36: num => 36} - loopback{loop37: num => 37} - loopback{loop38: num => 38} - loopback{loop39: num => 39} - - package { "python-libvirt": ensure => "0.8.3-1ubuntu14~ppalucid2" } - - package { "nova-compute": - ensure => "$version", - require => Package["python-libvirt"] - } - - #file { "/usr/share/nova/libvirt.qemu.xml.template": - # owner => "nova", mode => 400, - # source => "puppet://${puppet_server}/files/production/libvirt.qemu.xml.template", - #} - - # fix runlevels: using enable => true adds it as 20, which is too early - exec { "update-rc.d -f nova-compute remove": - path => "/usr/bin:/usr/sbin:/bin", - onlyif => "test -f /etc/rc2.d/S??nova-compute" - } - service { "nova-compute": - ensure => running, - hasstatus => true, - subscribe => [ - Package["nova-compute"], - File["/etc/nova/nova.conf"], - File["/etc/nova/nova-compute.conf"], - #File["/usr/share/nova/libvirt.qemu.xml.template"], - Service["libvirt-bin"], - Kern_module["kvm_intel"] - ] - } -} - -define nvolume($version) { - - package { "nova-volume": ensure => $version, require => Exec["update-apt"] } - - nova_conf_pointer{'volume': name => 'volume'} - - # fix runlevels: using enable => true adds it as 20, which is too early - exec { "update-rc.d -f nova-volume remove": - path => "/usr/bin:/usr/sbin:/bin", - onlyif => "test -f /etc/rc2.d/S??nova-volume" - } - - file { "/etc/default/iscsitarget": - owner => "root", mode => 644, - content => "ISCSITARGET_ENABLE=true" - } - - package { "iscsitarget": ensure => present } - - file { "/dev/iscsi": ensure => directory } # FIXME(vish): owner / mode? - file { "/usr/sbin/nova-iscsi-dev.sh": - owner => "root", mode => 755, - source => "puppet://${puppet_server}/files/production/nova-iscsi-dev.sh" - } - file { "/etc/udev/rules.d/55-openiscsi.rules": - owner => "root", mode => 644, - content => 'KERNEL=="sd*", BUS=="scsi", PROGRAM="/usr/sbin/nova-iscsi-dev.sh %b",SYMLINK+="iscsi/%c%n"' - } - - service { "iscsitarget": - ensure => running, - enable => true, - hasstatus => true, - require => [ - File["/etc/default/iscsitarget"], - Package["iscsitarget"] - ] - } - - service { "nova-volume": - ensure => running, - hasstatus => true, - subscribe => [ - Package["nova-volume"], - File["/etc/nova/nova.conf"], - File["/etc/nova/nova-volume.conf"] - ] - } -} - -class novaspool { - # This isn't in release yet - #cron { logspool: - # command => "/usr/bin/nova-logspool /var/log/nova.log /var/lib/nova/spool", - # user => "nova" - #} - #cron { spoolsentry: - # command => "/usr/bin/nova-spoolsentry ${sentry_url} ${sentry_key} /var/lib/nova/spool", - # user => "nova" - #} -} diff --git a/contrib/puppet/manifests/classes/swift.pp b/contrib/puppet/manifests/classes/swift.pp deleted file mode 100644 index 64ffb6fa3..000000000 --- a/contrib/puppet/manifests/classes/swift.pp +++ /dev/null @@ -1,7 +0,0 @@ -class swift { - package { "memcached": ensure => present } - service { "memcached": require => Package['memcached'] } - - package { "swift-proxy": ensure => present } -} - diff --git a/contrib/puppet/manifests/site.pp b/contrib/puppet/manifests/site.pp deleted file mode 100644 index ca07a34ad..000000000 --- a/contrib/puppet/manifests/site.pp +++ /dev/null @@ -1,120 +0,0 @@ -# site.pp - -import "templates" -import "classes/*" - -node novabase inherits default { -# $puppet_server = "192.168.0.10" - $cluster_name = "openstack001" - $ganglia_udp_send_channel = "openstack001.example.com" - $syslog = "192.168.0.10" - - # THIS STUFF ISN'T IN RELEASE YET - #$sentry_url = "http://192.168.0.19/sentry/store/" - #$sentry_key = "TODO:SENTRYPASS" - - $local_network = "192.168.0.0/16" - $vpn_ip = "192.168.0.2" - $public_interface = "eth0" - include novanode -# include nova-common - include opsmetrics - -# non-nova stuff such as nova-dash inherit from novanode -# novaspool needs a better home -# include novaspool -} - -# Builder -node "nova000.example.com" inherits novabase { - $syslog = "server" - include ntp - include syslog-server -} - -# Non-Nova nodes - -node - "blog.example.com", - "wiki.example.com" -inherits novabase { - include ganglia-python - include ganglia-apache - include ganglia-mysql -} - - -node "nova001.example.com" -inherits novabase { - include novabase - - nova_iptables { nova: - services => [ - "ganglia", - "mysql", - "rabbitmq", - "ldap", - "api", - "objectstore", - "nrpe", - ], - ip => "192.168.0.10", - } - - nobjectstore { nova: version => "0.9.0" } - nscheduler { nova: version => "0.9.0" } - napi { nova: - version => "0.9.0", - api_servers => 10, - api_base_port => 8000 - } -} - -node "nova002.example.com" -inherits novabase { - include novaconf - - nova_iptables { nova: - services => [ - "ganglia", - "dnsmasq", - "nrpe" - ], - ip => "192.168.4.2", - private_range => "192.168.0.0/16", - } - - nnetwork { nova: version => "0.9.0" } -} - -node - "nova003.example.com", - "nova004.example.com", - "nova005.example.com", - "nova006.example.com", - "nova007.example.com", - "nova008.example.com", - "nova009.example.com", - "nova010.example.com", - "nova011.example.com", - "nova012.example.com", - "nova013.example.com", - "nova014.example.com", - "nova015.example.com", - "nova016.example.com", - "nova017.example.com", - "nova018.example.com", - "nova019.example.com", -inherits novabase { - include novaconf - ncompute { nova: version => "0.9.0" } - nvolume { nova: version => "0.9.0" } -} - -#node -# "nova020.example.com" -# "nova021.example.com" -#inherits novanode { -# include novaconf - #ncompute { nova: version => "0.9.0" } -#} diff --git a/contrib/puppet/manifests/templates.pp b/contrib/puppet/manifests/templates.pp deleted file mode 100644 index 90e433013..000000000 --- a/contrib/puppet/manifests/templates.pp +++ /dev/null @@ -1,21 +0,0 @@ -# templates.pp - -import "classes/*" - -class baseclass { -# include dns-client # FIXME: missing resolv.conf.erb?? - include issue -} - -node default { - $nova_site = "undef" - $nova_ns1 = "undef" - $nova_ns2 = "undef" -# include baseclass -} - -# novanode handles the system-level requirements for Nova/Swift nodes -class novanode { - include baseclass - include lvmconf -} diff --git a/contrib/puppet/puppet.conf b/contrib/puppet/puppet.conf deleted file mode 100644 index 92af920e3..000000000 --- a/contrib/puppet/puppet.conf +++ /dev/null @@ -1,11 +0,0 @@ -[main] -logdir=/var/log/puppet -vardir=/var/lib/puppet -ssldir=/var/lib/puppet/ssl -rundir=/var/run/puppet -factpath=$vardir/lib/facter -pluginsync=false - -[puppetmasterd] -templatedir=/var/lib/nova/contrib/puppet/templates -autosign=true diff --git a/contrib/puppet/templates/haproxy.cfg.erb b/contrib/puppet/templates/haproxy.cfg.erb deleted file mode 100644 index bd9991de7..000000000 --- a/contrib/puppet/templates/haproxy.cfg.erb +++ /dev/null @@ -1,39 +0,0 @@ -# this config needs haproxy-1.1.28 or haproxy-1.2.1 - -global - log 127.0.0.1 local0 - log 127.0.0.1 local1 notice - #log loghost local0 info - maxconn 4096 - #chroot /usr/share/haproxy - stats socket /var/run/haproxy.sock - user haproxy - group haproxy - daemon - #debug - #quiet - -defaults - log global - mode http - option httplog - option dontlognull - retries 3 - option redispatch - stats enable - stats uri /haproxy - maxconn 2000 - contimeout 5000 - clitimeout 50000 - srvtimeout 50000 - - -listen nova-api 0.0.0.0:8773 - option httpchk GET / HTTP/1.0\r\nHost:\ example.com - option forwardfor - reqidel ^X-Forwarded-For:.* - balance roundrobin -<% api_servers.to_i.times do |offset| %><% port = api_base_port.to_i + offset -%> - server api_<%= port %> 127.0.0.1:<%= port %> maxconn 1 check -<% end -%> - option httpclose # disable keep-alive diff --git a/contrib/puppet/templates/monitrc-nova-api.erb b/contrib/puppet/templates/monitrc-nova-api.erb deleted file mode 100644 index fe2626327..000000000 --- a/contrib/puppet/templates/monitrc-nova-api.erb +++ /dev/null @@ -1,138 +0,0 @@ -############################################################################### -## Monit control file -############################################################################### -## -## Comments begin with a '#' and extend through the end of the line. Keywords -## are case insensitive. All path's MUST BE FULLY QUALIFIED, starting with '/'. -## -## Below you will find examples of some frequently used statements. For -## information about the control file, a complete list of statements and -## options please have a look in the monit manual. -## -## -############################################################################### -## Global section -############################################################################### -## -## Start monit in the background (run as a daemon): -# -set daemon 60 # check services at 1-minute intervals - with start delay 30 # optional: delay the first check by half a minute - # (by default check immediately after monit start) - - -## Set syslog logging with the 'daemon' facility. If the FACILITY option is -## omitted, monit will use 'user' facility by default. If you want to log to -## a stand alone log file instead, specify the path to a log file -# -set logfile syslog facility log_daemon -# -# -### Set the location of monit id file which saves the unique id specific for -### given monit. The id is generated and stored on first monit start. -### By default the file is placed in $HOME/.monit.id. -# -# set idfile /var/.monit.id -# -### Set the location of monit state file which saves the monitoring state -### on each cycle. By default the file is placed in $HOME/.monit.state. If -### state file is stored on persistent filesystem, monit will recover the -### monitoring state across reboots. If it is on temporary filesystem, the -### state will be lost on reboot. -# -# set statefile /var/.monit.state -# -## Set the list of mail servers for alert delivery. Multiple servers may be -## specified using comma separator. By default monit uses port 25 - this -## is possible to override with the PORT option. -# -# set mailserver mail.bar.baz, # primary mailserver -# backup.bar.baz port 10025, # backup mailserver on port 10025 -# localhost # fallback relay -# -# -## By default monit will drop alert events if no mail servers are available. -## If you want to keep the alerts for a later delivery retry, you can use the -## EVENTQUEUE statement. The base directory where undelivered alerts will be -## stored is specified by the BASEDIR option. You can limit the maximal queue -## size using the SLOTS option (if omitted, the queue is limited by space -## available in the back end filesystem). -# -# set eventqueue -# basedir /var/monit # set the base directory where events will be stored -# slots 100 # optionaly limit the queue size -# -# -## Send status and events to M/Monit (Monit central management: for more -## informations about M/Monit see http://www.tildeslash.com/mmonit). -# -# set mmonit http://monit:monit@192.168.1.10:8080/collector -# -# -## Monit by default uses the following alert mail format: -## -## --8<-- -## From: monit@$HOST # sender -## Subject: monit alert -- $EVENT $SERVICE # subject -## -## $EVENT Service $SERVICE # -## # -## Date: $DATE # -## Action: $ACTION # -## Host: $HOST # body -## Description: $DESCRIPTION # -## # -## Your faithful employee, # -## monit # -## --8<-- -## -## You can override this message format or parts of it, such as subject -## or sender using the MAIL-FORMAT statement. Macros such as $DATE, etc. -## are expanded at runtime. For example, to override the sender: -# -# set mail-format { from: monit@foo.bar } -# -# -## You can set alert recipients here whom will receive alerts if/when a -## service defined in this file has errors. Alerts may be restricted on -## events by using a filter as in the second example below. -# -# set alert sysadm@foo.bar # receive all alerts -# set alert manager@foo.bar only on { timeout } # receive just service- -# # timeout alert -# -# -## Monit has an embedded web server which can be used to view status of -## services monitored, the current configuration, actual services parameters -## and manage services from a web interface. -# - set httpd port 2812 and - use address localhost # only accept connection from localhost - allow localhost # allow localhost to connect to the server and -# allow admin:monit # require user 'admin' with password 'monit' -# allow @monit # allow users of group 'monit' to connect (rw) -# allow @users readonly # allow users of group 'users' to connect readonly -# -# -############################################################################### -## Services -############################################################################### - -<% api_servers.to_i.times do |offset| %><% port = api_base_port.to_i + offset %> - -check process nova_api_<%= port %> with pidfile /var/run/nova/nova-api-<%= port %>.pid - group nova_api - start program = "/usr/bin/nova-api --flagfile=/etc/nova/nova.conf --pidfile=/var/run/nova/nova-api-<%= port %>.pid --api_listen_port=<%= port %> --lockfile=/var/run/nova/nova-api-<%= port %>.pid.lock start" - as uid nova - stop program = "/usr/bin/nova-api --flagfile=/etc/nova/nova.conf --pidfile=/var/run/nova/nova-api-<%= port %>.pid --api_listen_port=<%= port %> --lockfile=/var/run/nova/nova-api-<%= port %>.pid.lock stop" - as uid nova - if failed port <%= port %> protocol http - with timeout 15 seconds - for 4 cycles - then restart - if totalmem > 300 Mb then restart - if cpu is greater than 60% for 2 cycles then alert - if cpu > 80% for 3 cycles then restart - if 3 restarts within 5 cycles then timeout - -<% end %> diff --git a/contrib/puppet/templates/nova-iptables.erb b/contrib/puppet/templates/nova-iptables.erb deleted file mode 100644 index 2fc066305..000000000 --- a/contrib/puppet/templates/nova-iptables.erb +++ /dev/null @@ -1,10 +0,0 @@ -<% services.each do |service| -%> -<%= service.upcase %>=1 -<% end -%> -<% if ip && ip != "" %>IP="<%=ip%>"<% end %> -<% if private_range && private_range != "" %>PRIVATE_RANGE="<%=private_range%>"<% end %> -<% if mgmt_ip && mgmt_ip != "" %>MGMT_IP="<%=mgmt_ip%>"<% end %> -<% if dmz_ip && dmz_ip != "" %>DMZ_IP="<%=dmz_ip%>"<% end %> - -# warning: this file is auto-generated by puppet - diff --git a/contrib/puppet/templates/production/nova-common.conf.erb b/contrib/puppet/templates/production/nova-common.conf.erb deleted file mode 100644 index 23ee0c5e8..000000000 --- a/contrib/puppet/templates/production/nova-common.conf.erb +++ /dev/null @@ -1,55 +0,0 @@ -# global ---dmz_net=192.168.0.0 ---dmz_mask=255.255.0.0 ---dmz_cidr=192.168.0.0/16 ---ldap_user_dn=cn=Administrators,dc=example,dc=com ---ldap_user_unit=Users ---ldap_user_subtree=ou=Users,dc=example,dc=com ---ldap_project_subtree=ou=Groups,dc=example,dc=com ---role_project_subtree=ou=Groups,dc=example,dc=com ---ldap_cloudadmin=cn=NovaAdmins,ou=Groups,dc=example,dc=com ---ldap_itsec=cn=NovaSecurity,ou=Groups,dc=example,dc=com ---ldap_sysadmin=cn=Administrators,ou=Groups,dc=example,dc=com ---ldap_netadmin=cn=Administrators,ou=Groups,dc=example,dc=com ---ldap_developer=cn=developers,ou=Groups,dc=example,dc=com ---verbose ---daemonize ---syslog ---networks_path=/var/lib/nova/networks ---instances_path=/var/lib/nova/instances ---buckets_path=/var/lib/nova/objectstore/buckets ---images_path=/var/lib/nova/objectstore/images ---scheduler_driver=nova.scheduler.simple.SimpleScheduler ---libvirt_xml_template=/usr/share/nova/libvirt.qemu.xml.template ---credentials_template=/usr/share/nova/novarc.template ---boot_script_template=/usr/share/nova/bootscript.template ---vpn_client_template=/usr/share/nova/client.ovpn.template ---max_cores=40 ---max_gigabytes=2000 ---ca_path=/var/lib/nova/CA ---keys_path=/var/lib/nova/keys ---vpn_start=11000 ---volume_group=vgdata ---volume_manager=nova.volume.manager.ISCSIManager ---volume_driver=nova.volume.driver.ISCSIDriver ---default_kernel=aki-DEFAULT ---default_ramdisk=ari-DEFAULT ---dhcpbridge=/usr/bin/nova-dhcpbridge ---vpn_image_id=ami-cloudpipe ---dhcpbridge_flagfile=/etc/nova/nova.conf ---credential_cert_subject=/C=US/ST=Texas/L=Bexar/O=NovaDev/OU=NOVA/CN=%s-%s ---auth_driver=nova.auth.ldapdriver.LdapDriver ---quota_cores=17 ---quota_floating_ips=5 ---quota_instances=6 ---quota_volumes=10 ---quota_gigabytes=100 ---use_nova_chains=True ---input_chain=services ---use_project_ca=True ---fixed_ip_disassociate_timeout=300 ---api_max_requests=1 ---api_listen_ip=127.0.0.1 ---user_cert_subject=/C=US/ST=Texas/L=Bexar/O=NovaDev/OU=Nova/CN=%s-%s-%s ---project_cert_subject=/C=US/ST=Texas/L=Bexar/O=NovaDev/OU=Nova/CN=project-ca-%s-%s ---vpn_cert_subject=/C=US/ST=Texas/L=Bexar/O=NovaDev/OU=Nova/CN=project-vpn-%s-%s diff --git a/contrib/puppet/templates/production/nova-nova.conf.erb b/contrib/puppet/templates/production/nova-nova.conf.erb deleted file mode 100644 index 8683fefde..000000000 --- a/contrib/puppet/templates/production/nova-nova.conf.erb +++ /dev/null @@ -1,21 +0,0 @@ ---fixed_range=192.168.0.0/16 ---iscsi_ip_prefix=192.168.4 ---floating_range=10.0.0.0/24 ---rabbit_host=192.168.0.10 ---s3_host=192.168.0.10 ---cc_host=192.168.0.10 ---cc_dmz=192.168.24.10 ---s3_dmz=192.168.24.10 ---ec2_url=http://192.168.0.1:8773/services/Cloud ---vpn_ip=192.168.0.2 ---ldap_url=ldap://192.168.0.10 ---sql_connection=mysql://nova:TODO-MYPASS@192.168.0.10/nova ---other_sql_connection=mysql://nova:TODO-MYPASS@192.168.0.10/nova ---routing_source_ip=192.168.0.2 ---bridge_dev=eth1 ---public_interface=eth0 ---vlan_start=3100 ---num_networks=700 ---rabbit_userid=TODO:RABBIT ---rabbit_password=TODO:CHANGEME ---ldap_password=TODO:CHANGEME diff --git a/doc/.autogenerated b/doc/.autogenerated new file mode 100644 index 000000000..456c8ad1e --- /dev/null +++ b/doc/.autogenerated @@ -0,0 +1,283 @@ +source/api/nova..adminclient.rst +source/api/nova..api.direct.rst +source/api/nova..api.ec2.admin.rst +source/api/nova..api.ec2.apirequest.rst +source/api/nova..api.ec2.cloud.rst +source/api/nova..api.ec2.metadatarequesthandler.rst +source/api/nova..api.openstack.auth.rst +source/api/nova..api.openstack.backup_schedules.rst +source/api/nova..api.openstack.common.rst +source/api/nova..api.openstack.consoles.rst +source/api/nova..api.openstack.faults.rst +source/api/nova..api.openstack.flavors.rst +source/api/nova..api.openstack.images.rst +source/api/nova..api.openstack.servers.rst +source/api/nova..api.openstack.shared_ip_groups.rst +source/api/nova..api.openstack.zones.rst +source/api/nova..auth.dbdriver.rst +source/api/nova..auth.fakeldap.rst +source/api/nova..auth.ldapdriver.rst +source/api/nova..auth.manager.rst +source/api/nova..auth.signer.rst +source/api/nova..cloudpipe.pipelib.rst +source/api/nova..compute.api.rst +source/api/nova..compute.instance_types.rst +source/api/nova..compute.manager.rst +source/api/nova..compute.monitor.rst +source/api/nova..compute.power_state.rst +source/api/nova..console.api.rst +source/api/nova..console.fake.rst +source/api/nova..console.manager.rst +source/api/nova..console.xvp.rst +source/api/nova..context.rst +source/api/nova..crypto.rst +source/api/nova..db.api.rst +source/api/nova..db.base.rst +source/api/nova..db.migration.rst +source/api/nova..db.sqlalchemy.api.rst +source/api/nova..db.sqlalchemy.migrate_repo.manage.rst +source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst +source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst +source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst +source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst +source/api/nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst +source/api/nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst +source/api/nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst +source/api/nova..db.sqlalchemy.migration.rst +source/api/nova..db.sqlalchemy.models.rst +source/api/nova..db.sqlalchemy.session.rst +source/api/nova..exception.rst +source/api/nova..fakememcache.rst +source/api/nova..fakerabbit.rst +source/api/nova..flags.rst +source/api/nova..image.glance.rst +source/api/nova..image.local.rst +source/api/nova..image.s3.rst +source/api/nova..image.service.rst +source/api/nova..log.rst +source/api/nova..manager.rst +source/api/nova..network.api.rst +source/api/nova..network.linux_net.rst +source/api/nova..network.manager.rst +source/api/nova..objectstore.bucket.rst +source/api/nova..objectstore.handler.rst +source/api/nova..objectstore.image.rst +source/api/nova..objectstore.stored.rst +source/api/nova..quota.rst +source/api/nova..rpc.rst +source/api/nova..scheduler.chance.rst +source/api/nova..scheduler.driver.rst +source/api/nova..scheduler.manager.rst +source/api/nova..scheduler.simple.rst +source/api/nova..scheduler.zone.rst +source/api/nova..service.rst +source/api/nova..test.rst +source/api/nova..tests.api.openstack.fakes.rst +source/api/nova..tests.api.openstack.test_adminapi.rst +source/api/nova..tests.api.openstack.test_api.rst +source/api/nova..tests.api.openstack.test_auth.rst +source/api/nova..tests.api.openstack.test_common.rst +source/api/nova..tests.api.openstack.test_faults.rst +source/api/nova..tests.api.openstack.test_flavors.rst +source/api/nova..tests.api.openstack.test_images.rst +source/api/nova..tests.api.openstack.test_ratelimiting.rst +source/api/nova..tests.api.openstack.test_servers.rst +source/api/nova..tests.api.openstack.test_shared_ip_groups.rst +source/api/nova..tests.api.openstack.test_zones.rst +source/api/nova..tests.api.test_wsgi.rst +source/api/nova..tests.db.fakes.rst +source/api/nova..tests.declare_flags.rst +source/api/nova..tests.fake_flags.rst +source/api/nova..tests.glance.stubs.rst +source/api/nova..tests.hyperv_unittest.rst +source/api/nova..tests.objectstore_unittest.rst +source/api/nova..tests.real_flags.rst +source/api/nova..tests.runtime_flags.rst +source/api/nova..tests.test_access.rst +source/api/nova..tests.test_api.rst +source/api/nova..tests.test_auth.rst +source/api/nova..tests.test_cloud.rst +source/api/nova..tests.test_compute.rst +source/api/nova..tests.test_console.rst +source/api/nova..tests.test_direct.rst +source/api/nova..tests.test_flags.rst +source/api/nova..tests.test_instance_types.rst +source/api/nova..tests.test_localization.rst +source/api/nova..tests.test_log.rst +source/api/nova..tests.test_middleware.rst +source/api/nova..tests.test_misc.rst +source/api/nova..tests.test_network.rst +source/api/nova..tests.test_quota.rst +source/api/nova..tests.test_rpc.rst +source/api/nova..tests.test_scheduler.rst +source/api/nova..tests.test_service.rst +source/api/nova..tests.test_test.rst +source/api/nova..tests.test_twistd.rst +source/api/nova..tests.test_utils.rst +source/api/nova..tests.test_virt.rst +source/api/nova..tests.test_volume.rst +source/api/nova..tests.test_xenapi.rst +source/api/nova..tests.xenapi.stubs.rst +source/api/nova..twistd.rst +source/api/nova..utils.rst +source/api/nova..version.rst +source/api/nova..virt.connection.rst +source/api/nova..virt.disk.rst +source/api/nova..virt.fake.rst +source/api/nova..virt.hyperv.rst +source/api/nova..virt.images.rst +source/api/nova..virt.libvirt_conn.rst +source/api/nova..virt.xenapi.fake.rst +source/api/nova..virt.xenapi.network_utils.rst +source/api/nova..virt.xenapi.vm_utils.rst +source/api/nova..virt.xenapi.vmops.rst +source/api/nova..virt.xenapi.volume_utils.rst +source/api/nova..virt.xenapi.volumeops.rst +source/api/nova..virt.xenapi_conn.rst +source/api/nova..volume.api.rst +source/api/nova..volume.driver.rst +source/api/nova..volume.manager.rst +source/api/nova..volume.san.rst +source/api/nova..wsgi.rst +source/api/autoindex.rst +source/api/nova..adminclient.rst +source/api/nova..api.direct.rst +source/api/nova..api.ec2.admin.rst +source/api/nova..api.ec2.apirequest.rst +source/api/nova..api.ec2.cloud.rst +source/api/nova..api.ec2.metadatarequesthandler.rst +source/api/nova..api.openstack.auth.rst +source/api/nova..api.openstack.backup_schedules.rst +source/api/nova..api.openstack.common.rst +source/api/nova..api.openstack.consoles.rst +source/api/nova..api.openstack.faults.rst +source/api/nova..api.openstack.flavors.rst +source/api/nova..api.openstack.images.rst +source/api/nova..api.openstack.servers.rst +source/api/nova..api.openstack.shared_ip_groups.rst +source/api/nova..api.openstack.zones.rst +source/api/nova..auth.dbdriver.rst +source/api/nova..auth.fakeldap.rst +source/api/nova..auth.ldapdriver.rst +source/api/nova..auth.manager.rst +source/api/nova..auth.signer.rst +source/api/nova..cloudpipe.pipelib.rst +source/api/nova..compute.api.rst +source/api/nova..compute.instance_types.rst +source/api/nova..compute.manager.rst +source/api/nova..compute.monitor.rst +source/api/nova..compute.power_state.rst +source/api/nova..console.api.rst +source/api/nova..console.fake.rst +source/api/nova..console.manager.rst +source/api/nova..console.xvp.rst +source/api/nova..context.rst +source/api/nova..crypto.rst +source/api/nova..db.api.rst +source/api/nova..db.base.rst +source/api/nova..db.migration.rst +source/api/nova..db.sqlalchemy.api.rst +source/api/nova..db.sqlalchemy.migrate_repo.manage.rst +source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst +source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst +source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst +source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst +source/api/nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst +source/api/nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst +source/api/nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst +source/api/nova..db.sqlalchemy.migration.rst +source/api/nova..db.sqlalchemy.models.rst +source/api/nova..db.sqlalchemy.session.rst +source/api/nova..exception.rst +source/api/nova..fakememcache.rst +source/api/nova..fakerabbit.rst +source/api/nova..flags.rst +source/api/nova..image.glance.rst +source/api/nova..image.local.rst +source/api/nova..image.s3.rst +source/api/nova..image.service.rst +source/api/nova..log.rst +source/api/nova..manager.rst +source/api/nova..network.api.rst +source/api/nova..network.linux_net.rst +source/api/nova..network.manager.rst +source/api/nova..objectstore.bucket.rst +source/api/nova..objectstore.handler.rst +source/api/nova..objectstore.image.rst +source/api/nova..objectstore.stored.rst +source/api/nova..quota.rst +source/api/nova..rpc.rst +source/api/nova..scheduler.chance.rst +source/api/nova..scheduler.driver.rst +source/api/nova..scheduler.manager.rst +source/api/nova..scheduler.simple.rst +source/api/nova..scheduler.zone.rst +source/api/nova..service.rst +source/api/nova..test.rst +source/api/nova..tests.api.openstack.fakes.rst +source/api/nova..tests.api.openstack.test_adminapi.rst +source/api/nova..tests.api.openstack.test_api.rst +source/api/nova..tests.api.openstack.test_auth.rst +source/api/nova..tests.api.openstack.test_common.rst +source/api/nova..tests.api.openstack.test_faults.rst +source/api/nova..tests.api.openstack.test_flavors.rst +source/api/nova..tests.api.openstack.test_images.rst +source/api/nova..tests.api.openstack.test_ratelimiting.rst +source/api/nova..tests.api.openstack.test_servers.rst +source/api/nova..tests.api.openstack.test_shared_ip_groups.rst +source/api/nova..tests.api.openstack.test_zones.rst +source/api/nova..tests.api.test_wsgi.rst +source/api/nova..tests.db.fakes.rst +source/api/nova..tests.declare_flags.rst +source/api/nova..tests.fake_flags.rst +source/api/nova..tests.glance.stubs.rst +source/api/nova..tests.hyperv_unittest.rst +source/api/nova..tests.objectstore_unittest.rst +source/api/nova..tests.real_flags.rst +source/api/nova..tests.runtime_flags.rst +source/api/nova..tests.test_access.rst +source/api/nova..tests.test_api.rst +source/api/nova..tests.test_auth.rst +source/api/nova..tests.test_cloud.rst +source/api/nova..tests.test_compute.rst +source/api/nova..tests.test_console.rst +source/api/nova..tests.test_direct.rst +source/api/nova..tests.test_flags.rst +source/api/nova..tests.test_instance_types.rst +source/api/nova..tests.test_localization.rst +source/api/nova..tests.test_log.rst +source/api/nova..tests.test_middleware.rst +source/api/nova..tests.test_misc.rst +source/api/nova..tests.test_network.rst +source/api/nova..tests.test_quota.rst +source/api/nova..tests.test_rpc.rst +source/api/nova..tests.test_scheduler.rst +source/api/nova..tests.test_service.rst +source/api/nova..tests.test_test.rst +source/api/nova..tests.test_twistd.rst +source/api/nova..tests.test_utils.rst +source/api/nova..tests.test_virt.rst +source/api/nova..tests.test_volume.rst +source/api/nova..tests.test_xenapi.rst +source/api/nova..tests.xenapi.stubs.rst +source/api/nova..twistd.rst +source/api/nova..utils.rst +source/api/nova..version.rst +source/api/nova..virt.connection.rst +source/api/nova..virt.disk.rst +source/api/nova..virt.fake.rst +source/api/nova..virt.hyperv.rst +source/api/nova..virt.images.rst +source/api/nova..virt.libvirt_conn.rst +source/api/nova..virt.xenapi.fake.rst +source/api/nova..virt.xenapi.network_utils.rst +source/api/nova..virt.xenapi.vm_utils.rst +source/api/nova..virt.xenapi.vmops.rst +source/api/nova..virt.xenapi.volume_utils.rst +source/api/nova..virt.xenapi.volumeops.rst +source/api/nova..virt.xenapi_conn.rst +source/api/nova..volume.api.rst +source/api/nova..volume.driver.rst +source/api/nova..volume.manager.rst +source/api/nova..volume.san.rst +source/api/nova..wsgi.rst diff --git a/doc/build/html/.buildinfo b/doc/build/html/.buildinfo new file mode 100644 index 000000000..091736d4f --- /dev/null +++ b/doc/build/html/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: 2a2fe6198f4be4a4d6f289b09d16d74a +tags: fbb0d17656682115ca4d033fb2f83ba1 diff --git a/doc/ext/nova_autodoc.py b/doc/ext/nova_autodoc.py index 5429bb656..3dd992d84 100644 --- a/doc/ext/nova_autodoc.py +++ b/doc/ext/nova_autodoc.py @@ -8,5 +8,6 @@ from nova import utils def setup(app): rootdir = os.path.abspath(app.srcdir + '/..') print "**Autodocumenting from %s" % rootdir - rv = utils.execute('cd %s && ./generate_autodoc_index.sh' % rootdir) + os.chdir(rootdir) + rv = utils.execute('./generate_autodoc_index.sh') print rv[0] diff --git a/doc/source/adminguide/distros/others.rst b/doc/source/adminguide/distros/others.rst deleted file mode 100644 index ec14a9abb..000000000 --- a/doc/source/adminguide/distros/others.rst +++ /dev/null @@ -1,88 +0,0 @@ -Installation on other distros (like Debian, Fedora or CentOS ) -============================================================== - -Feel free to add additional notes for additional distributions. - -Nova installation on CentOS 5.5 -------------------------------- - -These are notes for installing OpenStack Compute on CentOS 5.5 and will be updated but are NOT final. Please test for accuracy and edit as you see fit. - -The principle botleneck for running nova on centos in python 2.6. Nova is written in python 2.6 and CentOS 5.5. comes with python 2.4. We can not update python system wide as some core utilities (like yum) is dependent on python 2.4. Also very few python 2.6 modules are available in centos/epel repos. - -Pre-reqs --------- - -Add euca2ools and EPEL repo first.:: - - cat >/etc/yum.repos.d/euca2ools.repo << EUCA_REPO_CONF_EOF - [eucalyptus] - name=euca2ools - baseurl=http://www.eucalyptussoftware.com/downloads/repo/euca2ools/1.3.1/yum/centos/ - enabled=1 - gpgcheck=0 - - EUCA_REPO_CONF_EOF - -:: - - rpm -Uvh 'http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-4.noarch.rpm' - -Now install python2.6, kvm and few other libraries through yum:: - - yum -y install dnsmasq vblade kpartx kvm gawk iptables ebtables bzr screen euca2ools curl rabbitmq-server gcc gcc-c++ autoconf automake swig openldap openldap-servers nginx python26 python26-devel python26-distribute git openssl-devel python26-tools mysql-server qemu kmod-kvm libxml2 libxslt libxslt-devel mysql-devel - -Then download the latest aoetools and then build(and install) it, check for the latest version on sourceforge, exact url will change if theres a new release:: - - wget -c http://sourceforge.net/projects/aoetools/files/aoetools/32/aoetools-32.tar.gz/download - tar -zxvf aoetools-32.tar.gz - cd aoetools-32 - make - make install - -Add the udev rules for aoetools:: - - cat > /etc/udev/rules.d/60-aoe.rules << AOE_RULES_EOF - SUBSYSTEM=="aoe", KERNEL=="discover", NAME="etherd/%k", GROUP="disk", MODE="0220" - SUBSYSTEM=="aoe", KERNEL=="err", NAME="etherd/%k", GROUP="disk", MODE="0440" - SUBSYSTEM=="aoe", KERNEL=="interfaces", NAME="etherd/%k", GROUP="disk", MODE="0220" - SUBSYSTEM=="aoe", KERNEL=="revalidate", NAME="etherd/%k", GROUP="disk", MODE="0220" - # aoe block devices - KERNEL=="etherd*", NAME="%k", GROUP="disk" - AOE_RULES_EOF - -Load the kernel modules:: - - modprobe aoe - -:: - - modprobe kvm - -Now, install the python modules using easy_install-2.6, this ensures the installation are done against python 2.6 - - -easy_install-2.6 twisted sqlalchemy mox greenlet carrot daemon eventlet tornado IPy routes lxml MySQL-python -python-gflags need to be downloaded and installed manually, use these commands (check the exact url for newer releases ): - -:: - - wget -c "http://python-gflags.googlecode.com/files/python-gflags-1.4.tar.gz" - tar -zxvf python-gflags-1.4.tar.gz - cd python-gflags-1.4 - python2.6 setup.py install - cd .. - -Same for python2.6-libxml2 module, notice the --with-python and --prefix flags. --with-python ensures we are building it against python2.6 (otherwise it will build against python2.4, which is default):: - - wget -c "ftp://xmlsoft.org/libxml2/libxml2-2.7.3.tar.gz" - tar -zxvf libxml2-2.7.3.tar.gz - cd libxml2-2.7.3 - ./configure --with-python=/usr/bin/python26 --prefix=/usr - make all - make install - cd python - python2.6 setup.py install - cd .. - -Once you've done this, continue at Step 3 here: :doc:`../single.node.install` diff --git a/doc/source/adminguide/distros/ubuntu.10.04.rst b/doc/source/adminguide/distros/ubuntu.10.04.rst deleted file mode 100644 index bd0693c46..000000000 --- a/doc/source/adminguide/distros/ubuntu.10.04.rst +++ /dev/null @@ -1,40 +0,0 @@ -Installing on Ubuntu 10.04 (Lucid) -================================== - -Step 1: Install dependencies ----------------------------- -Grab the latest code from launchpad: - -:: - - bzr clone lp:nova - -Here's a script you can use to install (and then run) Nova on Ubuntu or Debian (when using Debian, edit nova.sh to have USE_PPA=0): - -.. todo:: give a link to a stable releases page - -Step 2: Install dependencies ----------------------------- - -Nova requires rabbitmq for messaging, so install that first. - -*Note:* You must have sudo installed to run these commands as shown here. - -:: - - sudo apt-get install rabbitmq-server - - -You'll see messages starting with "Reading package lists... Done" and you must confirm by typing Y that you want to continue. - -If you're running on Ubuntu 10.04, you'll need to install Twisted and python-gflags which is included in the OpenStack PPA. - -:: - - sudo apt-get install python-software-properties - sudo add-apt-repository ppa:nova-core/trunk - sudo apt-get update - sudo apt-get install python-twisted python-gflags - - -Once you've done this, continue at Step 3 here: :doc:`../single.node.install` diff --git a/doc/source/adminguide/distros/ubuntu.10.10.rst b/doc/source/adminguide/distros/ubuntu.10.10.rst deleted file mode 100644 index a3fa2def1..000000000 --- a/doc/source/adminguide/distros/ubuntu.10.10.rst +++ /dev/null @@ -1,41 +0,0 @@ -Installing on Ubuntu 10.10 (Maverick) -===================================== -Single Machine Installation (Ubuntu 10.10) - -While we wouldn't expect you to put OpenStack Compute into production on a non-LTS version of Ubuntu, these instructions are up-to-date with the latest version of Ubuntu. - -Make sure you are running Ubuntu 10.10 so that the packages will be available. This install requires more than 70 MB of free disk space. - -These instructions are based on Soren Hansen's blog entry, Openstack on Maverick. A script is in progress as well. - -Step 1: Install required prerequisites --------------------------------------- -Nova requires rabbitmq for messaging and redis for storing state (for now), so we'll install these first.:: - - sudo apt-get install rabbitmq-server redis-server - -You'll see messages starting with "Reading package lists... Done" and you must confirm by typing Y that you want to continue. - -Step 2: Install Nova packages available in Maverick Meerkat ------------------------------------------------------------ -Type or copy/paste in the following line to get the packages that you use to run OpenStack Compute.:: - - sudo apt-get install python-nova - sudo apt-get install nova-api nova-objectstore nova-compute nova-scheduler nova-network euca2ools unzip - -You'll see messages starting with "Reading package lists... Done" and you must confirm by typing Y that you want to continue. This operation may take a while as many dependent packages will be installed. Note: there is a dependency problem with python-nova which can be worked around by installing first. - -When the installation is complete, you'll see the following lines confirming::: - - Adding system user `nova' (UID 106) ... - Adding new user `nova' (UID 106) with group `nogroup' ... - Not creating home directory `/var/lib/nova'. - Setting up nova-scheduler (0.9.1~bzr331-0ubuntu2) ... - * Starting nova scheduler nova-scheduler - WARNING:root:Starting scheduler node - ...done. - Processing triggers for libc-bin ... - ldconfig deferred processing now taking place - Processing triggers for python-support ... - -Once you've done this, continue at Step 3 here: :doc:`../single.node.install` diff --git a/doc/source/adminguide/flags.rst b/doc/source/adminguide/flags.rst deleted file mode 100644 index 072f0a1a5..000000000 --- a/doc/source/adminguide/flags.rst +++ /dev/null @@ -1,23 +0,0 @@ -.. - Copyright 2010-2011 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Flags and Flagfiles -=================== - -* python-gflags -* flagfiles -* list of flags by component (see concepts list) diff --git a/doc/source/adminguide/multi.node.install.rst b/doc/source/adminguide/multi.node.install.rst deleted file mode 100644 index f2f25b060..000000000 --- a/doc/source/adminguide/multi.node.install.rst +++ /dev/null @@ -1,364 +0,0 @@ - -Installing Nova on Multiple Servers -=================================== - -When you move beyond evaluating the technology and into building an actual -production environment, you will need to know how to configure your datacenter -and how to deploy components across your clusters. This guide should help you -through that process. - -You can install multiple nodes to increase performance and availability of the OpenStack Compute installation. - -This setup is based on an Ubuntu Lucid 10.04 installation with the latest updates. Most of this works around issues that need to be resolved either in packaging or bug-fixing. It also needs to eventually be generalized, but the intent here is to get the multi-node configuration bootstrapped so folks can move forward. - -For a starting architecture, these instructions describing installing a cloud controller node and a compute node. The cloud controller node contains the nova- services plus the database. The compute node installs all the nova-services but then refers to the database installation, which is hosted by the cloud controller node. - -Requirements for a multi-node installation ------------------------------------------- - -* You need a real database, compatible with SQLAlchemy (mysql, postgresql) There's not a specific reason to choose one over another, it basically depends what you know. MySQL is easier to do High Availability (HA) with, but people may already know PostgreSQL. We should document both configurations, though. -* For a recommended HA setup, consider a MySQL master/slave replication, with as many slaves as you like, and probably a heartbeat to kick one of the slaves into being a master if it dies. -* For performance optimization, split reads and writes to the database. MySQL proxy is the easiest way to make this work if running MySQL. - -Assumptions ------------ - -* Networking is configured between/through the physical machines on a single subnet. -* Installation and execution are both performed by ROOT user. - - -Scripted Installation ---------------------- -A script available to get your OpenStack cloud running quickly. You can copy the file to the server where you want to install OpenStack Compute services - typically you would install a compute node and a cloud controller node. - -You must run these scripts with root permissions. - -From a server you intend to use as a cloud controller node, use this command to get the cloud controller script. This script is a work-in-progress and the maintainer plans to keep it up, but it is offered "as-is." Feel free to collaborate on it in GitHub - https://github.com/dubsquared/OpenStack-NOVA-Installer-Script/. - -:: - - wget --no-check-certificate https://github.com/dubsquared/OpenStack-NOVA-Installer-Script/raw/master/Nova_CC_Installer_v0.1 - -Ensure you can execute the script by modifying the permissions on the script file. - -:: - - sudo chmod 755 Nova_CC_Installer_v0.1 - - -:: - - sudo ./Nova_CC_Installer_v0.1 - -Next, from a server you intend to use as a compute node (doesn't contain the database), install the nova services. Copy the nova.conf from the cloud controller node to the compute node. - -Restart related services:: - - libvirtd restart; service nova-network restart; service nova-compute restart; service nova-api restart; service nova-objectstore restart; service nova-scheduler restart - -You can go to the `Configuration section`_ for next steps. - -Manual Installation - Step-by-Step ----------------------------------- -The following sections show you how to install Nova manually with a cloud controller node and a separate compute node. The cloud controller node contains the database plus all nova- services, and the compute node runs nova- services only. - -Cloud Controller Installation -````````````````````````````` -On the cloud controller node, you install nova services and the related helper applications, and then configure with the nova.conf file. You will then copy the nova.conf file to the compute node, which you install as a second node in the `Compute Installation`_. - -Step 1 - Use apt-get to get the latest code -------------------------------------------- - -1. Setup Nova PPA with https://launchpad.net/~nova-core/+archive/trunk. The ‘python-software-properties’ package is a pre-requisite for setting up the nova package repo: - -:: - - sudo apt-get install python-software-properties - sudo add-apt-repository ppa:nova-core/trunk - -2. Run update. - -:: - - sudo apt-get update - -3. Install python required packages, nova-packages, and helper apps. - -:: - - sudo apt-get install python-greenlet python-mysqldb python-nova nova-common nova-doc nova-api nova-network nova-objectstore nova-scheduler nova-compute euca2ools unzip - -It is highly likely that there will be errors when the nova services come up since they are not yet configured. Don't worry, you're only at step 1! - -Step 2 Set up configuration file (installed in /etc/nova) ---------------------------------------------------------- - -1. Nova development has consolidated all config files to nova.conf as of November 2010. There is a default set of options that are already configured in nova.conf: - -:: - ---daemonize=1 ---dhcpbridge_flagfile=/etc/nova/nova.conf ---dhcpbridge=/usr/bin/nova-dhcpbridge ---logdir=/var/log/nova ---state_path=/var/lib/nova - -The following items ALSO need to be defined in /etc/nova/nova.conf. I’ve added some explanation of the variables, as comments CANNOT be in nova.conf. There seems to be an issue with nova-manage not processing the comments/whitespace correctly: - ---sql_connection ### Location of Nova SQL DB - ---s3_host ### This is where Nova is hosting the objectstore service, which will contain the VM images and buckets - ---rabbit_host ### This is where the rabbit AMQP messaging service is hosted - ---cc_host ### This is where the the nova-api service lives - ---verbose ### Optional but very helpful during initial setup - ---ec2_url ### The location to interface nova-api - ---network_manager ### Many options here, discussed below. This is how your controller will communicate with additional Nova nodes and VMs: - -nova.network.manager.FlatManager # Simple, no-vlan networking type -nova.network.manager. FlatDHCPManager # Flat networking with DHCP -nova.network.manager.VlanManager # Vlan networking with DHCP – /DEFAULT/ if no network manager is defined in nova.conf - ---fixed_range=<network/prefix> ### This will be the IP network that ALL the projects for future VM guests will reside on. E.g. 192.168.0.0/12 - ---network_size=<# of addrs> ### This is the total number of IP Addrs to use for VM guests, of all projects. E.g. 5000 - -The following code can be cut and paste, and edited to your setup: - -Note: CC_ADDR=<the external IP address of your cloud controller> - -Detailed explanation of the following example is available above. - -:: - ---sql_connection=mysql://root:nova@<CC_ADDR>/nova ---s3_host=<CC_ADDR> ---rabbit_host=<CC_ADDR> ---cc_host=<CC_ADDR> ---verbose ---ec2_url=http://<CC_ADDR>:8773/services/Cloud ---network_manager=nova.network.manager.VlanManager ---fixed_range=<network/prefix> ---network_size=<# of addrs> - -2. Create a “nova” group, and set permissions:: - - addgroup nova - -The Nova config file should have its owner set to root:nova, and mode set to 0644, since they contain your MySQL server's root password. :: - - chown -R root:nova /etc/nova - chmod 644 /etc/nova/nova.conf - -Step 3 - Setup the SQL DB (MySQL for this setup) ------------------------------------------------- - -1. First you 'preseed' to bypass all the installation prompts:: - - bash - MYSQL_PASS=nova - cat <<MYSQL_PRESEED | debconf-set-selections - mysql-server-5.1 mysql-server/root_password password $MYSQL_PASS - mysql-server-5.1 mysql-server/root_password_again password $MYSQL_PASS - mysql-server-5.1 mysql-server/start_on_boot boolean true - MYSQL_PRESEED - -2. Install MySQL:: - - apt-get install -y mysql-server - -3. Edit /etc/mysql/my.cnf to change ‘bind-address’ from localhost to any:: - - sed -i 's/127.0.0.1/0.0.0.0/g' /etc/mysql/my.cnf - service mysql restart - -4. MySQL DB configuration: - -Create NOVA database:: - - mysql -uroot -p$MYSQL_PASS -e 'CREATE DATABASE nova;' - -Update the DB to include user 'root'@'%' with super user privileges:: - - mysql -uroot -p$MYSQL_PASS -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION;" - -Set mySQL root password:: - - mysql -uroot -p$MYSQL_PASS -e "SET PASSWORD FOR 'root'@'%' = PASSWORD('$MYSQL_PASS');" - -Compute Node Installation -````````````````````````` - -Repeat steps 1 and 2 from the Cloud Controller Installation section above, then configure the network for your Compute instances on the Compute node. Copy the nova.conf file from the Cloud Controller node to this node. - -Network Configuration ---------------------- - -If you use FlatManager as your network manager (as opposed to VlanManager that is shown in the nova.conf example above), there are some additional networking changes you’ll have to make to ensure connectivity between your nodes and VMs. If you chose VlanManager or FlatDHCP, you may skip this section, as it’s set up for you automatically. - -Nova defaults to a bridge device named 'br100'. This needs to be created and somehow integrated into YOUR network. To keep things as simple as possible, have all the VM guests on the same network as the VM hosts (the compute nodes). To do so, set the compute node's external IP address to be on the bridge and add eth0 to that bridge. To do this, edit your network interfaces config to look like the following:: - - < begin /etc/network/interfaces > - # The loopback network interface - auto lo - iface lo inet loopback - - # Networking for NOVA - auto br100 - - iface br100 inet dhcp - bridge_ports eth0 - bridge_stp off - bridge_maxwait 0 - bridge_fd 0 - < end /etc/network/interfaces > - -Next, restart networking to apply the changes:: - - sudo /etc/init.d/networking restart - -Configuration -````````````` - -On the Compute node, you should continue with these configuration steps. - -Step 1 - Set up the Nova environment ------------------------------------- - -These are the commands you run to update the database if needed, and then set up a user and project:: - - /usr/bin/python /usr/bin/nova-manage db sync - /usr/bin/python /usr/bin/nova-manage user admin <user_name> - /usr/bin/python /usr/bin/nova-manage project create <project_name> <user_name> - /usr/bin/python /usr/bin/nova-manage network create <project-network> <number-of-networks-in-project> <IPs in project> - -Here is an example of what this looks like with real data:: - - /usr/bin/python /usr/bin/nova-manage db sync - /usr/bin/python /usr/bin/nova-manage user admin dub - /usr/bin/python /usr/bin/nova-manage project create dubproject dub - /usr/bin/python /usr/bin/nova-manage network create 192.168.0.0/24 1 255 - -(I chose a /24 since that falls inside my /12 range I set in ‘fixed-range’ in nova.conf. Currently, there can only be one network, and I am using the max IP’s available in a /24. You can choose to use any valid amount that you would like.) - -Note: The nova-manage service assumes that the first IP address is your network (like 192.168.0.0), that the 2nd IP is your gateway (192.168.0.1), and that the broadcast is the very last IP in the range you defined (192.168.0.255). If this is not the case you will need to manually edit the sql db 'networks' table.o. - -On running this command, entries are made in the 'networks' and 'fixed_ips' table. However, one of the networks listed in the 'networks' table needs to be marked as bridge in order for the code to know that a bridge exists. The Network is marked as bridged automatically based on the type of network manager selected. This is ONLY necessary if you chose FlatManager as your network type. More information can be found at the end of this document discussing setting up the bridge device. - - -Step 2 - Create Nova certifications ------------------------------------ - -1. Generate the certs as a zip file. These are the certs you will use to launch instances, bundle images, and all the other assorted api functions. - -:: - - mkdir –p /root/creds - /usr/bin/python /usr/bin/nova-manage project zipfile $NOVA_PROJECT $NOVA_PROJECT_USER /root/creds/novacreds.zip - -2. Unzip them in your home directory, and add them to your environment. - -:: - - unzip /root/creds/novacreds.zip -d /root/creds/ - cat /root/creds/novarc >> ~/.bashrc - source ~/.bashrc - -Step 3 - Restart all relevant services --------------------------------------- - -Restart all six services in total, just to cover the entire spectrum:: - - libvirtd restart; service nova-network restart; service nova-compute restart; service nova-api restart; service nova-objectstore restart; service nova-scheduler restart - -Step 4 - Closing steps, and cleaning up ---------------------------------------- - -One of the most commonly missed configuration areas is not allowing the proper access to VMs. Use the 'euca-authorize' command to enable access. Below, you will find the commands to allow 'ping' and 'ssh' to your VMs:: - - euca-authorize -P icmp -t -1:-1 default - euca-authorize -P tcp -p 22 default - -Another common issue is you cannot ping or SSH your instances after issusing the 'euca-authorize' commands. Something to look at is the amount of 'dnsmasq' processes that are running. If you have a running instance, check to see that TWO 'dnsmasq' processes are running. If not, perform the following:: - - killall dnsmasq - service nova-network restart - -Testing the Installation -```````````````````````` - -You can then use `euca2ools` to test some items:: - - euca-describe-images - euca-describe-instances - -If you have issues with the API key, you may need to re-source your creds file:: - - . /root/creds/novarc - -If you don’t get any immediate errors, you’re successfully making calls to your cloud! - -Spinning up a VM for Testing -```````````````````````````` - -(This excerpt is from Thierry Carrez's blog, with reference to http://wiki.openstack.org/GettingImages.) - -The image that you will use here will be a ttylinux image, so this is a limited function server. You will be able to ping and SSH to this instance, but it is in no way a full production VM. - -UPDATE: Due to `bug 661159 <https://bugs.launchpad.net/nova/+bug/661159>`_, we can’t use images without ramdisks yet, so we can’t use the classic Ubuntu cloud images from http://uec-images.ubuntu.com/releases/ yet. For the sake of this tutorial, we’ll use the `ttylinux images from Scott Moser instead <http://smoser.brickies.net/ubuntu/ttylinux-uec/>`_. - -Download the image, and publish to your bucket: - -:: - - image="ttylinux-uec-amd64-12.1_2.6.35-22_1.tar.gz" - wget http://smoser.brickies.net/ubuntu/ttylinux-uec/$image - uec-publish-tarball $image mybucket - -This will output three references, an "emi", an "eri" and an "eki." (Image, ramdisk, and kernel) The emi is the one we use to launch instances, so take note of this. - -Create a keypair to SSH to the server: - -:: - - euca-add-keypair mykey > mykey.priv - - chmod 0600 mykey.priv - -Boot your instance: - -:: - - euca-run-instances $emi -k mykey -t m1.tiny - -($emi is replaced with the output from the previous command) - -Checking status, and confirming communication: - -Once you have booted the instance, you can check the status the the `euca-describe-instances` command. Here you can view the instance ID, IP, and current status of the VM. - -:: - - euca-describe-instances - -Once in a "running" state, you can use your SSH key connect: - -:: - - ssh -i mykey.priv root@$ipaddress - -When you are ready to terminate the instance, you may do so with the `euca-terminate-instances` command: - -:: - - euca-terminate-instances $instance-id - -You can determine the instance-id with `euca-describe-instances`, and the format is "i-" with a series of letter and numbers following: e.g. i-a4g9d. - -For more information in creating you own custom (production ready) instance images, please visit http://wiki.openstack.org/GettingImages for more information! - -Enjoy your new private cloud, and play responsibly! diff --git a/doc/source/adminguide/single.node.install.rst b/doc/source/adminguide/single.node.install.rst deleted file mode 100644 index ff43aa90b..000000000 --- a/doc/source/adminguide/single.node.install.rst +++ /dev/null @@ -1,362 +0,0 @@ -Installing Nova on a Single Host -================================ - -Nova can be run on a single machine, and it is recommended that new users practice managing this type of installation before graduating to multi node systems. - -The fastest way to get a test cloud running is through our :doc:`../quickstart`. But for more detail on installing the system read this doc. - - -Step 1 and 2: Get the latest Nova code system software ------------------------------------------------------- - -Depending on your system, the method for accomplishing this varies - -.. toctree:: - :maxdepth: 1 - - distros/ubuntu.10.04 - distros/ubuntu.10.10 - distros/others - - -Step 3: Build and install Nova services ---------------------------------------- - -Switch to the base nova source directory. - -Then type or copy/paste in the following line to compile the Python code for OpenStack Compute. - -:: - - sudo python setup.py build - sudo python setup.py install - - -When the installation is complete, you'll see the following lines: - -:: - - Installing nova-network script to /usr/local/bin - Installing nova-volume script to /usr/local/bin - Installing nova-objectstore script to /usr/local/bin - Installing nova-manage script to /usr/local/bin - Installing nova-scheduler script to /usr/local/bin - Installing nova-dhcpbridge script to /usr/local/bin - Installing nova-compute script to /usr/local/bin - Installing nova-instancemonitor script to /usr/local/bin - Installing nova-api script to /usr/local/bin - Installing nova-import-canonical-imagestore script to /usr/local/bin - - Installed /usr/local/lib/python2.6/dist-packages/nova-2010.1-py2.6.egg - Processing dependencies for nova==2010.1 - Finished processing dependencies for nova==2010.1 - - -Step 4: Create the Nova Database --------------------------------- -Type or copy/paste in the following line to create your nova db:: - - sudo nova-manage db sync - -Step 5: Create a Nova administrator ------------------------------------ -Type or copy/paste in the following line to create a user named "anne.":: - - sudo nova-manage user admin anne - -You see an access key and a secret key export, such as these made-up ones::: - - export EC2_ACCESS_KEY=4e6498a2-blah-blah-blah-17d1333t97fd - export EC2_SECRET_KEY=0a520304-blah-blah-blah-340sp34k05bbe9a7 - -Step 6: Create the network --------------------------- - -Type or copy/paste in the following line to create a network prior to creating a project. - -:: - - sudo nova-manage network create 10.0.0.0/8 1 64 - -For this command, the IP address is the cidr notation for your netmask, such as 192.168.1.0/24. The value 1 is the total number of networks you want made, and the 64 value is the total number of ips in all networks. - -After running this command, entries are made in the 'networks' and 'fixed_ips' table in the database. - -Step 7: Create a project with the user you created --------------------------------------------------- -Type or copy/paste in the following line to create a project named IRT (for Ice Road Truckers, of course) with the newly-created user named anne. - -:: - - sudo nova-manage project create IRT anne - -:: - - Generating RSA private key, 1024 bit long modulus - .....++++++ - ..++++++ - e is 65537 (0x10001) - Using configuration from ./openssl.cnf - Check that the request matches the signature - Signature ok - The Subject's Distinguished Name is as follows - countryName :PRINTABLE:'US' - stateOrProvinceName :PRINTABLE:'California' - localityName :PRINTABLE:'MountainView' - organizationName :PRINTABLE:'AnsoLabs' - organizationalUnitName:PRINTABLE:'NovaDev' - commonName :PRINTABLE:'anne-2010-10-12T21:12:35Z' - Certificate is to be certified until Oct 12 21:12:35 2011 GMT (365 days) - - Write out database with 1 new entries - Data Base Updated - - -Step 8: Unzip the nova.zip --------------------------- - -You should have a nova.zip file in your current working directory. Unzip it with this command: - -:: - - unzip nova.zip - - -You'll see these files extract. - -:: - - Archive: nova.zip - extracting: novarc - extracting: pk.pem - extracting: cert.pem - extracting: nova-vpn.conf - extracting: cacert.pem - - -Step 9: Source the rc file --------------------------- -Type or copy/paste the following to source the novarc file in your current working directory. - -:: - - . novarc - - -Step 10: Pat yourself on the back :) ------------------------------------ -Congratulations, your cloud is up and running, you’ve created an admin user, created a network, retrieved the user's credentials and put them in your environment. - -Now you need an image. - - -Step 11: Get an image --------------------- -To make things easier, we've provided a small image on the Rackspace CDN. Use this command to get it on your server. - -:: - - wget http://c2477062.cdn.cloudfiles.rackspacecloud.com/images.tgz - - -:: - - --2010-10-12 21:40:55-- http://c2477062.cdn.cloudfiles.rackspacecloud.com/images.tgz - Resolving cblah2.cdn.cloudfiles.rackspacecloud.com... 208.111.196.6, 208.111.196.7 - Connecting to cblah2.cdn.cloudfiles.rackspacecloud.com|208.111.196.6|:80... connected. - HTTP request sent, awaiting response... 200 OK - Length: 58520278 (56M) [application/x-gzip] - Saving to: `images.tgz' - - 100%[======================================>] 58,520,278 14.1M/s in 3.9s - - 2010-10-12 21:40:59 (14.1 MB/s) - `images.tgz' saved [58520278/58520278] - - - -Step 12: Decompress the image file ----------------------------------- -Use this command to extract the image files::: - - tar xvzf images.tgz - -You get a directory listing like so::: - - images - |-- aki-lucid - | |-- image - | `-- info.json - |-- ami-tiny - | |-- image - | `-- info.json - `-- ari-lucid - |-- image - `-- info.json - -Step 13: Send commands to upload sample image to the cloud ----------------------------------------------------------- - -Type or copy/paste the following commands to create a manifest for the kernel.:: - - euca-bundle-image -i images/aki-lucid/image -p kernel --kernel true - -You should see this in response::: - - Checking image - Tarring image - Encrypting image - Splitting image... - Part: kernel.part.0 - Generating manifest /tmp/kernel.manifest.xml - -Type or copy/paste the following commands to create a manifest for the ramdisk.:: - - euca-bundle-image -i images/ari-lucid/image -p ramdisk --ramdisk true - -You should see this in response::: - - Checking image - Tarring image - Encrypting image - Splitting image... - Part: ramdisk.part.0 - Generating manifest /tmp/ramdisk.manifest.xml - -Type or copy/paste the following commands to upload the kernel bundle.:: - - euca-upload-bundle -m /tmp/kernel.manifest.xml -b mybucket - -You should see this in response::: - - Checking bucket: mybucket - Creating bucket: mybucket - Uploading manifest file - Uploading part: kernel.part.0 - Uploaded image as mybucket/kernel.manifest.xml - -Type or copy/paste the following commands to upload the ramdisk bundle.:: - - euca-upload-bundle -m /tmp/ramdisk.manifest.xml -b mybucket - -You should see this in response::: - - Checking bucket: mybucket - Uploading manifest file - Uploading part: ramdisk.part.0 - Uploaded image as mybucket/ramdisk.manifest.xml - -Type or copy/paste the following commands to register the kernel and get its ID.:: - - euca-register mybucket/kernel.manifest.xml - -You should see this in response::: - - IMAGE ami-fcbj2non - -Type or copy/paste the following commands to register the ramdisk and get its ID.:: - - euca-register mybucket/ramdisk.manifest.xml - -You should see this in response::: - - IMAGE ami-orukptrc - -Type or copy/paste the following commands to create a manifest for the machine image associated with the ramdisk and kernel IDs that you got from the previous commands.:: - - euca-bundle-image -i images/ami-tiny/image -p machine --kernel ami-fcbj2non --ramdisk ami-orukptrc - -You should see this in response::: - - Checking image - Tarring image - Encrypting image - Splitting image... - Part: machine.part.0 - Part: machine.part.1 - Part: machine.part.2 - Part: machine.part.3 - Part: machine.part.4 - Generating manifest /tmp/machine.manifest.xml - -Type or copy/paste the following commands to upload the machine image bundle.:: - - euca-upload-bundle -m /tmp/machine.manifest.xml -b mybucket - -You should see this in response::: - - Checking bucket: mybucket - Uploading manifest file - Uploading part: machine.part.0 - Uploading part: machine.part.1 - Uploading part: machine.part.2 - Uploading part: machine.part.3 - Uploading part: machine.part.4 - Uploaded image as mybucket/machine.manifest.xml - -Type or copy/paste the following commands to register the machine image and get its ID.:: - - euca-register mybucket/machine.manifest.xml - -You should see this in response::: - - IMAGE ami-g06qbntt - -Type or copy/paste the following commands to register a SSH keypair for use in starting and accessing the instances.:: - - euca-add-keypair mykey > mykey.priv - chmod 600 mykey.priv - -Type or copy/paste the following commands to run an instance using the keypair and IDs that we previously created.:: - - euca-run-instances ami-g06qbntt --kernel ami-fcbj2non --ramdisk ami-orukptrc -k mykey - -You should see this in response::: - - RESERVATION r-0at28z12 IRT - INSTANCE i-1b0bh8n ami-g06qbntt 10.0.0.3 10.0.0.3 scheduling mykey (IRT, None) m1.small 2010-10-18 19:02:10.443599 - -Type or copy/paste the following commands to watch as the scheduler launches, and completes booting your instance.:: - - euca-describe-instances - -You should see this in response::: - - RESERVATION r-0at28z12 IRT - INSTANCE i-1b0bh8n ami-g06qbntt 10.0.0.3 10.0.0.3 launching mykey (IRT, cloud02) m1.small 2010-10-18 19:02:10.443599 - -Type or copy/paste the following commands to see when loading is completed and the instance is running.:: - - euca-describe-instances - -You should see this in response::: - - RESERVATION r-0at28z12 IRT - INSTANCE i-1b0bh8n ami-g06qbntt 10.0.0.3 10.0.0.3 running mykey (IRT, cloud02) 0 m1.small 2010-10-18 19:02:10.443599 - -Type or copy/paste the following commands to check that the virtual machine is running.:: - - virsh list - -You should see this in response::: - - Id Name State - ---------------------------------- - 1 2842445831 running - -Type or copy/paste the following commands to ssh to the instance using your private key.:: - - ssh -i mykey.priv root@10.0.0.3 - - -Troubleshooting Installation ----------------------------- - -If you see an "error loading the config file './openssl.cnf'" it means you can copy the openssl.cnf file to the location where Nova expects it and reboot, then try the command again. - -:: - - cp /etc/ssl/openssl.cnf ~ - sudo reboot - - - diff --git a/doc/source/api/autoindex.rst b/doc/source/api/autoindex.rst new file mode 100644 index 000000000..329a465db --- /dev/null +++ b/doc/source/api/autoindex.rst @@ -0,0 +1,144 @@ +.. toctree:: + :maxdepth: 1 + + nova..adminclient.rst + nova..api.direct.rst + nova..api.ec2.admin.rst + nova..api.ec2.apirequest.rst + nova..api.ec2.cloud.rst + nova..api.ec2.metadatarequesthandler.rst + nova..api.openstack.auth.rst + nova..api.openstack.backup_schedules.rst + nova..api.openstack.common.rst + nova..api.openstack.consoles.rst + nova..api.openstack.faults.rst + nova..api.openstack.flavors.rst + nova..api.openstack.images.rst + nova..api.openstack.servers.rst + nova..api.openstack.shared_ip_groups.rst + nova..api.openstack.zones.rst + nova..auth.dbdriver.rst + nova..auth.fakeldap.rst + nova..auth.ldapdriver.rst + nova..auth.manager.rst + nova..auth.signer.rst + nova..cloudpipe.pipelib.rst + nova..compute.api.rst + nova..compute.instance_types.rst + nova..compute.manager.rst + nova..compute.monitor.rst + nova..compute.power_state.rst + nova..console.api.rst + nova..console.fake.rst + nova..console.manager.rst + nova..console.xvp.rst + nova..context.rst + nova..crypto.rst + nova..db.api.rst + nova..db.base.rst + nova..db.migration.rst + nova..db.sqlalchemy.api.rst + nova..db.sqlalchemy.migrate_repo.manage.rst + nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst + nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst + nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst + nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst + nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst + nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst + nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst + nova..db.sqlalchemy.migration.rst + nova..db.sqlalchemy.models.rst + nova..db.sqlalchemy.session.rst + nova..exception.rst + nova..fakememcache.rst + nova..fakerabbit.rst + nova..flags.rst + nova..image.glance.rst + nova..image.local.rst + nova..image.s3.rst + nova..image.service.rst + nova..log.rst + nova..manager.rst + nova..network.api.rst + nova..network.linux_net.rst + nova..network.manager.rst + nova..objectstore.bucket.rst + nova..objectstore.handler.rst + nova..objectstore.image.rst + nova..objectstore.stored.rst + nova..quota.rst + nova..rpc.rst + nova..scheduler.chance.rst + nova..scheduler.driver.rst + nova..scheduler.manager.rst + nova..scheduler.simple.rst + nova..scheduler.zone.rst + nova..service.rst + nova..test.rst + nova..tests.api.openstack.fakes.rst + nova..tests.api.openstack.test_adminapi.rst + nova..tests.api.openstack.test_api.rst + nova..tests.api.openstack.test_auth.rst + nova..tests.api.openstack.test_common.rst + nova..tests.api.openstack.test_faults.rst + nova..tests.api.openstack.test_flavors.rst + nova..tests.api.openstack.test_images.rst + nova..tests.api.openstack.test_ratelimiting.rst + nova..tests.api.openstack.test_servers.rst + nova..tests.api.openstack.test_shared_ip_groups.rst + nova..tests.api.openstack.test_zones.rst + nova..tests.api.test_wsgi.rst + nova..tests.db.fakes.rst + nova..tests.declare_flags.rst + nova..tests.fake_flags.rst + nova..tests.glance.stubs.rst + nova..tests.hyperv_unittest.rst + nova..tests.objectstore_unittest.rst + nova..tests.real_flags.rst + nova..tests.runtime_flags.rst + nova..tests.test_access.rst + nova..tests.test_api.rst + nova..tests.test_auth.rst + nova..tests.test_cloud.rst + nova..tests.test_compute.rst + nova..tests.test_console.rst + nova..tests.test_direct.rst + nova..tests.test_flags.rst + nova..tests.test_instance_types.rst + nova..tests.test_localization.rst + nova..tests.test_log.rst + nova..tests.test_middleware.rst + nova..tests.test_misc.rst + nova..tests.test_network.rst + nova..tests.test_quota.rst + nova..tests.test_rpc.rst + nova..tests.test_scheduler.rst + nova..tests.test_service.rst + nova..tests.test_test.rst + nova..tests.test_twistd.rst + nova..tests.test_utils.rst + nova..tests.test_virt.rst + nova..tests.test_volume.rst + nova..tests.test_xenapi.rst + nova..tests.xenapi.stubs.rst + nova..twistd.rst + nova..utils.rst + nova..version.rst + nova..virt.connection.rst + nova..virt.disk.rst + nova..virt.fake.rst + nova..virt.hyperv.rst + nova..virt.images.rst + nova..virt.libvirt_conn.rst + nova..virt.xenapi.fake.rst + nova..virt.xenapi.network_utils.rst + nova..virt.xenapi.vm_utils.rst + nova..virt.xenapi.vmops.rst + nova..virt.xenapi.volume_utils.rst + nova..virt.xenapi.volumeops.rst + nova..virt.xenapi_conn.rst + nova..volume.api.rst + nova..volume.driver.rst + nova..volume.manager.rst + nova..volume.san.rst + nova..wsgi.rst diff --git a/doc/source/api/nova..adminclient.rst b/doc/source/api/nova..adminclient.rst new file mode 100644 index 000000000..35fa839e1 --- /dev/null +++ b/doc/source/api/nova..adminclient.rst @@ -0,0 +1,6 @@ +The :mod:`nova..adminclient` Module +============================================================================== +.. automodule:: nova..adminclient + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..api.direct.rst b/doc/source/api/nova..api.direct.rst new file mode 100644 index 000000000..a1705c707 --- /dev/null +++ b/doc/source/api/nova..api.direct.rst @@ -0,0 +1,6 @@ +The :mod:`nova..api.direct` Module +============================================================================== +.. automodule:: nova..api.direct + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..api.ec2.admin.rst b/doc/source/api/nova..api.ec2.admin.rst new file mode 100644 index 000000000..4e9ab308b --- /dev/null +++ b/doc/source/api/nova..api.ec2.admin.rst @@ -0,0 +1,6 @@ +The :mod:`nova..api.ec2.admin` Module +============================================================================== +.. automodule:: nova..api.ec2.admin + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..api.ec2.apirequest.rst b/doc/source/api/nova..api.ec2.apirequest.rst new file mode 100644 index 000000000..c17a2ff3a --- /dev/null +++ b/doc/source/api/nova..api.ec2.apirequest.rst @@ -0,0 +1,6 @@ +The :mod:`nova..api.ec2.apirequest` Module +============================================================================== +.. automodule:: nova..api.ec2.apirequest + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..api.ec2.cloud.rst b/doc/source/api/nova..api.ec2.cloud.rst new file mode 100644 index 000000000..f6145c217 --- /dev/null +++ b/doc/source/api/nova..api.ec2.cloud.rst @@ -0,0 +1,6 @@ +The :mod:`nova..api.ec2.cloud` Module +============================================================================== +.. automodule:: nova..api.ec2.cloud + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..api.ec2.metadatarequesthandler.rst b/doc/source/api/nova..api.ec2.metadatarequesthandler.rst new file mode 100644 index 000000000..75f5169e5 --- /dev/null +++ b/doc/source/api/nova..api.ec2.metadatarequesthandler.rst @@ -0,0 +1,6 @@ +The :mod:`nova..api.ec2.metadatarequesthandler` Module +============================================================================== +.. automodule:: nova..api.ec2.metadatarequesthandler + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..api.openstack.auth.rst b/doc/source/api/nova..api.openstack.auth.rst new file mode 100644 index 000000000..8c3f8f2da --- /dev/null +++ b/doc/source/api/nova..api.openstack.auth.rst @@ -0,0 +1,6 @@ +The :mod:`nova..api.openstack.auth` Module +============================================================================== +.. automodule:: nova..api.openstack.auth + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..api.openstack.backup_schedules.rst b/doc/source/api/nova..api.openstack.backup_schedules.rst new file mode 100644 index 000000000..6b406f12d --- /dev/null +++ b/doc/source/api/nova..api.openstack.backup_schedules.rst @@ -0,0 +1,6 @@ +The :mod:`nova..api.openstack.backup_schedules` Module +============================================================================== +.. automodule:: nova..api.openstack.backup_schedules + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..api.openstack.common.rst b/doc/source/api/nova..api.openstack.common.rst new file mode 100644 index 000000000..4fd734790 --- /dev/null +++ b/doc/source/api/nova..api.openstack.common.rst @@ -0,0 +1,6 @@ +The :mod:`nova..api.openstack.common` Module +============================================================================== +.. automodule:: nova..api.openstack.common + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..api.openstack.consoles.rst b/doc/source/api/nova..api.openstack.consoles.rst new file mode 100644 index 000000000..1e3e09599 --- /dev/null +++ b/doc/source/api/nova..api.openstack.consoles.rst @@ -0,0 +1,6 @@ +The :mod:`nova..api.openstack.consoles` Module +============================================================================== +.. automodule:: nova..api.openstack.consoles + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..api.openstack.faults.rst b/doc/source/api/nova..api.openstack.faults.rst new file mode 100644 index 000000000..7b25561f7 --- /dev/null +++ b/doc/source/api/nova..api.openstack.faults.rst @@ -0,0 +1,6 @@ +The :mod:`nova..api.openstack.faults` Module +============================================================================== +.. automodule:: nova..api.openstack.faults + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..api.openstack.flavors.rst b/doc/source/api/nova..api.openstack.flavors.rst new file mode 100644 index 000000000..0deb724de --- /dev/null +++ b/doc/source/api/nova..api.openstack.flavors.rst @@ -0,0 +1,6 @@ +The :mod:`nova..api.openstack.flavors` Module +============================================================================== +.. automodule:: nova..api.openstack.flavors + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..api.openstack.images.rst b/doc/source/api/nova..api.openstack.images.rst new file mode 100644 index 000000000..82bd5f1e8 --- /dev/null +++ b/doc/source/api/nova..api.openstack.images.rst @@ -0,0 +1,6 @@ +The :mod:`nova..api.openstack.images` Module +============================================================================== +.. automodule:: nova..api.openstack.images + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..api.openstack.servers.rst b/doc/source/api/nova..api.openstack.servers.rst new file mode 100644 index 000000000..c36856ea2 --- /dev/null +++ b/doc/source/api/nova..api.openstack.servers.rst @@ -0,0 +1,6 @@ +The :mod:`nova..api.openstack.servers` Module +============================================================================== +.. automodule:: nova..api.openstack.servers + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..api.openstack.shared_ip_groups.rst b/doc/source/api/nova..api.openstack.shared_ip_groups.rst new file mode 100644 index 000000000..4b1f44efe --- /dev/null +++ b/doc/source/api/nova..api.openstack.shared_ip_groups.rst @@ -0,0 +1,6 @@ +The :mod:`nova..api.openstack.shared_ip_groups` Module +============================================================================== +.. automodule:: nova..api.openstack.shared_ip_groups + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..api.openstack.zones.rst b/doc/source/api/nova..api.openstack.zones.rst new file mode 100644 index 000000000..ebe4569c5 --- /dev/null +++ b/doc/source/api/nova..api.openstack.zones.rst @@ -0,0 +1,6 @@ +The :mod:`nova..api.openstack.zones` Module +============================================================================== +.. automodule:: nova..api.openstack.zones + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..auth.dbdriver.rst b/doc/source/api/nova..auth.dbdriver.rst new file mode 100644 index 000000000..7de68b6e0 --- /dev/null +++ b/doc/source/api/nova..auth.dbdriver.rst @@ -0,0 +1,6 @@ +The :mod:`nova..auth.dbdriver` Module +============================================================================== +.. automodule:: nova..auth.dbdriver + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..auth.fakeldap.rst b/doc/source/api/nova..auth.fakeldap.rst new file mode 100644 index 000000000..ca8a3ad4d --- /dev/null +++ b/doc/source/api/nova..auth.fakeldap.rst @@ -0,0 +1,6 @@ +The :mod:`nova..auth.fakeldap` Module +============================================================================== +.. automodule:: nova..auth.fakeldap + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..auth.ldapdriver.rst b/doc/source/api/nova..auth.ldapdriver.rst new file mode 100644 index 000000000..c44463522 --- /dev/null +++ b/doc/source/api/nova..auth.ldapdriver.rst @@ -0,0 +1,6 @@ +The :mod:`nova..auth.ldapdriver` Module +============================================================================== +.. automodule:: nova..auth.ldapdriver + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..auth.manager.rst b/doc/source/api/nova..auth.manager.rst new file mode 100644 index 000000000..bc5ce2ec3 --- /dev/null +++ b/doc/source/api/nova..auth.manager.rst @@ -0,0 +1,6 @@ +The :mod:`nova..auth.manager` Module +============================================================================== +.. automodule:: nova..auth.manager + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..auth.signer.rst b/doc/source/api/nova..auth.signer.rst new file mode 100644 index 000000000..aad824ead --- /dev/null +++ b/doc/source/api/nova..auth.signer.rst @@ -0,0 +1,6 @@ +The :mod:`nova..auth.signer` Module +============================================================================== +.. automodule:: nova..auth.signer + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..cloudpipe.pipelib.rst b/doc/source/api/nova..cloudpipe.pipelib.rst new file mode 100644 index 000000000..054aaf484 --- /dev/null +++ b/doc/source/api/nova..cloudpipe.pipelib.rst @@ -0,0 +1,6 @@ +The :mod:`nova..cloudpipe.pipelib` Module +============================================================================== +.. automodule:: nova..cloudpipe.pipelib + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..compute.api.rst b/doc/source/api/nova..compute.api.rst new file mode 100644 index 000000000..caa66313a --- /dev/null +++ b/doc/source/api/nova..compute.api.rst @@ -0,0 +1,6 @@ +The :mod:`nova..compute.api` Module +============================================================================== +.. automodule:: nova..compute.api + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..compute.instance_types.rst b/doc/source/api/nova..compute.instance_types.rst new file mode 100644 index 000000000..d206ff3a4 --- /dev/null +++ b/doc/source/api/nova..compute.instance_types.rst @@ -0,0 +1,6 @@ +The :mod:`nova..compute.instance_types` Module +============================================================================== +.. automodule:: nova..compute.instance_types + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..compute.manager.rst b/doc/source/api/nova..compute.manager.rst new file mode 100644 index 000000000..33a337c39 --- /dev/null +++ b/doc/source/api/nova..compute.manager.rst @@ -0,0 +1,6 @@ +The :mod:`nova..compute.manager` Module +============================================================================== +.. automodule:: nova..compute.manager + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..compute.monitor.rst b/doc/source/api/nova..compute.monitor.rst new file mode 100644 index 000000000..a91169ecd --- /dev/null +++ b/doc/source/api/nova..compute.monitor.rst @@ -0,0 +1,6 @@ +The :mod:`nova..compute.monitor` Module +============================================================================== +.. automodule:: nova..compute.monitor + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..compute.power_state.rst b/doc/source/api/nova..compute.power_state.rst new file mode 100644 index 000000000..41b1080e5 --- /dev/null +++ b/doc/source/api/nova..compute.power_state.rst @@ -0,0 +1,6 @@ +The :mod:`nova..compute.power_state` Module +============================================================================== +.. automodule:: nova..compute.power_state + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..console.api.rst b/doc/source/api/nova..console.api.rst new file mode 100644 index 000000000..82a51d4c7 --- /dev/null +++ b/doc/source/api/nova..console.api.rst @@ -0,0 +1,6 @@ +The :mod:`nova..console.api` Module +============================================================================== +.. automodule:: nova..console.api + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..console.fake.rst b/doc/source/api/nova..console.fake.rst new file mode 100644 index 000000000..f053f85d6 --- /dev/null +++ b/doc/source/api/nova..console.fake.rst @@ -0,0 +1,6 @@ +The :mod:`nova..console.fake` Module +============================================================================== +.. automodule:: nova..console.fake + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..console.manager.rst b/doc/source/api/nova..console.manager.rst new file mode 100644 index 000000000..f9283a6c3 --- /dev/null +++ b/doc/source/api/nova..console.manager.rst @@ -0,0 +1,6 @@ +The :mod:`nova..console.manager` Module +============================================================================== +.. automodule:: nova..console.manager + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..console.xvp.rst b/doc/source/api/nova..console.xvp.rst new file mode 100644 index 000000000..a0887009e --- /dev/null +++ b/doc/source/api/nova..console.xvp.rst @@ -0,0 +1,6 @@ +The :mod:`nova..console.xvp` Module +============================================================================== +.. automodule:: nova..console.xvp + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..context.rst b/doc/source/api/nova..context.rst new file mode 100644 index 000000000..9de1adb24 --- /dev/null +++ b/doc/source/api/nova..context.rst @@ -0,0 +1,6 @@ +The :mod:`nova..context` Module +============================================================================== +.. automodule:: nova..context + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..crypto.rst b/doc/source/api/nova..crypto.rst new file mode 100644 index 000000000..af9f63634 --- /dev/null +++ b/doc/source/api/nova..crypto.rst @@ -0,0 +1,6 @@ +The :mod:`nova..crypto` Module +============================================================================== +.. automodule:: nova..crypto + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..db.api.rst b/doc/source/api/nova..db.api.rst new file mode 100644 index 000000000..6d998fbb2 --- /dev/null +++ b/doc/source/api/nova..db.api.rst @@ -0,0 +1,6 @@ +The :mod:`nova..db.api` Module +============================================================================== +.. automodule:: nova..db.api + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..db.base.rst b/doc/source/api/nova..db.base.rst new file mode 100644 index 000000000..29fb417d6 --- /dev/null +++ b/doc/source/api/nova..db.base.rst @@ -0,0 +1,6 @@ +The :mod:`nova..db.base` Module +============================================================================== +.. automodule:: nova..db.base + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..db.migration.rst b/doc/source/api/nova..db.migration.rst new file mode 100644 index 000000000..71dfea301 --- /dev/null +++ b/doc/source/api/nova..db.migration.rst @@ -0,0 +1,6 @@ +The :mod:`nova..db.migration` Module +============================================================================== +.. automodule:: nova..db.migration + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..db.sqlalchemy.api.rst b/doc/source/api/nova..db.sqlalchemy.api.rst new file mode 100644 index 000000000..76d0c1bd3 --- /dev/null +++ b/doc/source/api/nova..db.sqlalchemy.api.rst @@ -0,0 +1,6 @@ +The :mod:`nova..db.sqlalchemy.api` Module +============================================================================== +.. automodule:: nova..db.sqlalchemy.api + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..db.sqlalchemy.migrate_repo.manage.rst b/doc/source/api/nova..db.sqlalchemy.migrate_repo.manage.rst new file mode 100644 index 000000000..93decfb27 --- /dev/null +++ b/doc/source/api/nova..db.sqlalchemy.migrate_repo.manage.rst @@ -0,0 +1,6 @@ +The :mod:`nova..db.sqlalchemy.migrate_repo.manage` Module +============================================================================== +.. automodule:: nova..db.sqlalchemy.migrate_repo.manage + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst new file mode 100644 index 000000000..4b1219edb --- /dev/null +++ b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst @@ -0,0 +1,6 @@ +The :mod:`nova..db.sqlalchemy.migrate_repo.versions.001_austin` Module +============================================================================== +.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.001_austin + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst new file mode 100644 index 000000000..82f1f4680 --- /dev/null +++ b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst @@ -0,0 +1,6 @@ +The :mod:`nova..db.sqlalchemy.migrate_repo.versions.002_bexar` Module +============================================================================== +.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.002_bexar + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst new file mode 100644 index 000000000..98f3e8da7 --- /dev/null +++ b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst @@ -0,0 +1,6 @@ +The :mod:`nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks` Module +============================================================================== +.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst new file mode 100644 index 000000000..5cbb81191 --- /dev/null +++ b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst @@ -0,0 +1,6 @@ +The :mod:`nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables` Module +============================================================================== +.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst new file mode 100644 index 000000000..cef0c243e --- /dev/null +++ b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst @@ -0,0 +1,6 @@ +The :mod:`nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata` Module +============================================================================== +.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst new file mode 100644 index 000000000..a15697196 --- /dev/null +++ b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst @@ -0,0 +1,6 @@ +The :mod:`nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes` Module +============================================================================== +.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst new file mode 100644 index 000000000..38842d1af --- /dev/null +++ b/doc/source/api/nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst @@ -0,0 +1,6 @@ +The :mod:`nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types` Module +============================================================================== +.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..db.sqlalchemy.migration.rst b/doc/source/api/nova..db.sqlalchemy.migration.rst new file mode 100644 index 000000000..3a9b01b9a --- /dev/null +++ b/doc/source/api/nova..db.sqlalchemy.migration.rst @@ -0,0 +1,6 @@ +The :mod:`nova..db.sqlalchemy.migration` Module +============================================================================== +.. automodule:: nova..db.sqlalchemy.migration + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..db.sqlalchemy.models.rst b/doc/source/api/nova..db.sqlalchemy.models.rst new file mode 100644 index 000000000..9c795d7f5 --- /dev/null +++ b/doc/source/api/nova..db.sqlalchemy.models.rst @@ -0,0 +1,6 @@ +The :mod:`nova..db.sqlalchemy.models` Module +============================================================================== +.. automodule:: nova..db.sqlalchemy.models + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..db.sqlalchemy.session.rst b/doc/source/api/nova..db.sqlalchemy.session.rst new file mode 100644 index 000000000..cbfd6416a --- /dev/null +++ b/doc/source/api/nova..db.sqlalchemy.session.rst @@ -0,0 +1,6 @@ +The :mod:`nova..db.sqlalchemy.session` Module +============================================================================== +.. automodule:: nova..db.sqlalchemy.session + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..exception.rst b/doc/source/api/nova..exception.rst new file mode 100644 index 000000000..97ac6b752 --- /dev/null +++ b/doc/source/api/nova..exception.rst @@ -0,0 +1,6 @@ +The :mod:`nova..exception` Module +============================================================================== +.. automodule:: nova..exception + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..fakememcache.rst b/doc/source/api/nova..fakememcache.rst new file mode 100644 index 000000000..7e7ffb98b --- /dev/null +++ b/doc/source/api/nova..fakememcache.rst @@ -0,0 +1,6 @@ +The :mod:`nova..fakememcache` Module +============================================================================== +.. automodule:: nova..fakememcache + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..fakerabbit.rst b/doc/source/api/nova..fakerabbit.rst new file mode 100644 index 000000000..f1e27c266 --- /dev/null +++ b/doc/source/api/nova..fakerabbit.rst @@ -0,0 +1,6 @@ +The :mod:`nova..fakerabbit` Module +============================================================================== +.. automodule:: nova..fakerabbit + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..flags.rst b/doc/source/api/nova..flags.rst new file mode 100644 index 000000000..08165be44 --- /dev/null +++ b/doc/source/api/nova..flags.rst @@ -0,0 +1,6 @@ +The :mod:`nova..flags` Module +============================================================================== +.. automodule:: nova..flags + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..image.glance.rst b/doc/source/api/nova..image.glance.rst new file mode 100644 index 000000000..b0882d5ec --- /dev/null +++ b/doc/source/api/nova..image.glance.rst @@ -0,0 +1,6 @@ +The :mod:`nova..image.glance` Module +============================================================================== +.. automodule:: nova..image.glance + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..image.local.rst b/doc/source/api/nova..image.local.rst new file mode 100644 index 000000000..b6ad5470b --- /dev/null +++ b/doc/source/api/nova..image.local.rst @@ -0,0 +1,6 @@ +The :mod:`nova..image.local` Module +============================================================================== +.. automodule:: nova..image.local + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..image.s3.rst b/doc/source/api/nova..image.s3.rst new file mode 100644 index 000000000..e5b236127 --- /dev/null +++ b/doc/source/api/nova..image.s3.rst @@ -0,0 +1,6 @@ +The :mod:`nova..image.s3` Module +============================================================================== +.. automodule:: nova..image.s3 + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..image.service.rst b/doc/source/api/nova..image.service.rst new file mode 100644 index 000000000..78ef1ecca --- /dev/null +++ b/doc/source/api/nova..image.service.rst @@ -0,0 +1,6 @@ +The :mod:`nova..image.service` Module +============================================================================== +.. automodule:: nova..image.service + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..log.rst b/doc/source/api/nova..log.rst new file mode 100644 index 000000000..ff209709f --- /dev/null +++ b/doc/source/api/nova..log.rst @@ -0,0 +1,6 @@ +The :mod:`nova..log` Module +============================================================================== +.. automodule:: nova..log + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..manager.rst b/doc/source/api/nova..manager.rst new file mode 100644 index 000000000..576902491 --- /dev/null +++ b/doc/source/api/nova..manager.rst @@ -0,0 +1,6 @@ +The :mod:`nova..manager` Module +============================================================================== +.. automodule:: nova..manager + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..network.api.rst b/doc/source/api/nova..network.api.rst new file mode 100644 index 000000000..b63be2ba3 --- /dev/null +++ b/doc/source/api/nova..network.api.rst @@ -0,0 +1,6 @@ +The :mod:`nova..network.api` Module +============================================================================== +.. automodule:: nova..network.api + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..network.linux_net.rst b/doc/source/api/nova..network.linux_net.rst new file mode 100644 index 000000000..7af78d5ad --- /dev/null +++ b/doc/source/api/nova..network.linux_net.rst @@ -0,0 +1,6 @@ +The :mod:`nova..network.linux_net` Module +============================================================================== +.. automodule:: nova..network.linux_net + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..network.manager.rst b/doc/source/api/nova..network.manager.rst new file mode 100644 index 000000000..0ea705533 --- /dev/null +++ b/doc/source/api/nova..network.manager.rst @@ -0,0 +1,6 @@ +The :mod:`nova..network.manager` Module +============================================================================== +.. automodule:: nova..network.manager + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..objectstore.bucket.rst b/doc/source/api/nova..objectstore.bucket.rst new file mode 100644 index 000000000..3bfdf639c --- /dev/null +++ b/doc/source/api/nova..objectstore.bucket.rst @@ -0,0 +1,6 @@ +The :mod:`nova..objectstore.bucket` Module +============================================================================== +.. automodule:: nova..objectstore.bucket + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..objectstore.handler.rst b/doc/source/api/nova..objectstore.handler.rst new file mode 100644 index 000000000..0eb8c4efb --- /dev/null +++ b/doc/source/api/nova..objectstore.handler.rst @@ -0,0 +1,6 @@ +The :mod:`nova..objectstore.handler` Module +============================================================================== +.. automodule:: nova..objectstore.handler + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..objectstore.image.rst b/doc/source/api/nova..objectstore.image.rst new file mode 100644 index 000000000..fa4c971f1 --- /dev/null +++ b/doc/source/api/nova..objectstore.image.rst @@ -0,0 +1,6 @@ +The :mod:`nova..objectstore.image` Module +============================================================================== +.. automodule:: nova..objectstore.image + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..objectstore.stored.rst b/doc/source/api/nova..objectstore.stored.rst new file mode 100644 index 000000000..2b1d997a3 --- /dev/null +++ b/doc/source/api/nova..objectstore.stored.rst @@ -0,0 +1,6 @@ +The :mod:`nova..objectstore.stored` Module +============================================================================== +.. automodule:: nova..objectstore.stored + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..quota.rst b/doc/source/api/nova..quota.rst new file mode 100644 index 000000000..4140d95d6 --- /dev/null +++ b/doc/source/api/nova..quota.rst @@ -0,0 +1,6 @@ +The :mod:`nova..quota` Module +============================================================================== +.. automodule:: nova..quota + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..rpc.rst b/doc/source/api/nova..rpc.rst new file mode 100644 index 000000000..5b2a9b8e2 --- /dev/null +++ b/doc/source/api/nova..rpc.rst @@ -0,0 +1,6 @@ +The :mod:`nova..rpc` Module +============================================================================== +.. automodule:: nova..rpc + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..scheduler.chance.rst b/doc/source/api/nova..scheduler.chance.rst new file mode 100644 index 000000000..89c074c8f --- /dev/null +++ b/doc/source/api/nova..scheduler.chance.rst @@ -0,0 +1,6 @@ +The :mod:`nova..scheduler.chance` Module +============================================================================== +.. automodule:: nova..scheduler.chance + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..scheduler.driver.rst b/doc/source/api/nova..scheduler.driver.rst new file mode 100644 index 000000000..793ed9c7b --- /dev/null +++ b/doc/source/api/nova..scheduler.driver.rst @@ -0,0 +1,6 @@ +The :mod:`nova..scheduler.driver` Module +============================================================================== +.. automodule:: nova..scheduler.driver + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..scheduler.manager.rst b/doc/source/api/nova..scheduler.manager.rst new file mode 100644 index 000000000..d0fc7c423 --- /dev/null +++ b/doc/source/api/nova..scheduler.manager.rst @@ -0,0 +1,6 @@ +The :mod:`nova..scheduler.manager` Module +============================================================================== +.. automodule:: nova..scheduler.manager + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..scheduler.simple.rst b/doc/source/api/nova..scheduler.simple.rst new file mode 100644 index 000000000..dacc2cf30 --- /dev/null +++ b/doc/source/api/nova..scheduler.simple.rst @@ -0,0 +1,6 @@ +The :mod:`nova..scheduler.simple` Module +============================================================================== +.. automodule:: nova..scheduler.simple + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..scheduler.zone.rst b/doc/source/api/nova..scheduler.zone.rst new file mode 100644 index 000000000..54c4bf201 --- /dev/null +++ b/doc/source/api/nova..scheduler.zone.rst @@ -0,0 +1,6 @@ +The :mod:`nova..scheduler.zone` Module +============================================================================== +.. automodule:: nova..scheduler.zone + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..service.rst b/doc/source/api/nova..service.rst new file mode 100644 index 000000000..2d2dfcf2e --- /dev/null +++ b/doc/source/api/nova..service.rst @@ -0,0 +1,6 @@ +The :mod:`nova..service` Module +============================================================================== +.. automodule:: nova..service + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..test.rst b/doc/source/api/nova..test.rst new file mode 100644 index 000000000..a6bdb6f1f --- /dev/null +++ b/doc/source/api/nova..test.rst @@ -0,0 +1,6 @@ +The :mod:`nova..test` Module +============================================================================== +.. automodule:: nova..test + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.api.openstack.fakes.rst b/doc/source/api/nova..tests.api.openstack.fakes.rst new file mode 100644 index 000000000..4a9ff5938 --- /dev/null +++ b/doc/source/api/nova..tests.api.openstack.fakes.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.api.openstack.fakes` Module +============================================================================== +.. automodule:: nova..tests.api.openstack.fakes + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.api.openstack.test_adminapi.rst b/doc/source/api/nova..tests.api.openstack.test_adminapi.rst new file mode 100644 index 000000000..19a85ca0f --- /dev/null +++ b/doc/source/api/nova..tests.api.openstack.test_adminapi.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.api.openstack.test_adminapi` Module +============================================================================== +.. automodule:: nova..tests.api.openstack.test_adminapi + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.api.openstack.test_api.rst b/doc/source/api/nova..tests.api.openstack.test_api.rst new file mode 100644 index 000000000..68106d221 --- /dev/null +++ b/doc/source/api/nova..tests.api.openstack.test_api.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.api.openstack.test_api` Module +============================================================================== +.. automodule:: nova..tests.api.openstack.test_api + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.api.openstack.test_auth.rst b/doc/source/api/nova..tests.api.openstack.test_auth.rst new file mode 100644 index 000000000..9f0011669 --- /dev/null +++ b/doc/source/api/nova..tests.api.openstack.test_auth.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.api.openstack.test_auth` Module +============================================================================== +.. automodule:: nova..tests.api.openstack.test_auth + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.api.openstack.test_common.rst b/doc/source/api/nova..tests.api.openstack.test_common.rst new file mode 100644 index 000000000..82f40ecb8 --- /dev/null +++ b/doc/source/api/nova..tests.api.openstack.test_common.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.api.openstack.test_common` Module +============================================================================== +.. automodule:: nova..tests.api.openstack.test_common + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.api.openstack.test_faults.rst b/doc/source/api/nova..tests.api.openstack.test_faults.rst new file mode 100644 index 000000000..b839ae8a3 --- /dev/null +++ b/doc/source/api/nova..tests.api.openstack.test_faults.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.api.openstack.test_faults` Module +============================================================================== +.. automodule:: nova..tests.api.openstack.test_faults + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.api.openstack.test_flavors.rst b/doc/source/api/nova..tests.api.openstack.test_flavors.rst new file mode 100644 index 000000000..471fac56e --- /dev/null +++ b/doc/source/api/nova..tests.api.openstack.test_flavors.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.api.openstack.test_flavors` Module +============================================================================== +.. automodule:: nova..tests.api.openstack.test_flavors + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.api.openstack.test_images.rst b/doc/source/api/nova..tests.api.openstack.test_images.rst new file mode 100644 index 000000000..57ae93c8c --- /dev/null +++ b/doc/source/api/nova..tests.api.openstack.test_images.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.api.openstack.test_images` Module +============================================================================== +.. automodule:: nova..tests.api.openstack.test_images + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.api.openstack.test_ratelimiting.rst b/doc/source/api/nova..tests.api.openstack.test_ratelimiting.rst new file mode 100644 index 000000000..9a857f795 --- /dev/null +++ b/doc/source/api/nova..tests.api.openstack.test_ratelimiting.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.api.openstack.test_ratelimiting` Module +============================================================================== +.. automodule:: nova..tests.api.openstack.test_ratelimiting + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.api.openstack.test_servers.rst b/doc/source/api/nova..tests.api.openstack.test_servers.rst new file mode 100644 index 000000000..ea602e6ab --- /dev/null +++ b/doc/source/api/nova..tests.api.openstack.test_servers.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.api.openstack.test_servers` Module +============================================================================== +.. automodule:: nova..tests.api.openstack.test_servers + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.api.openstack.test_shared_ip_groups.rst b/doc/source/api/nova..tests.api.openstack.test_shared_ip_groups.rst new file mode 100644 index 000000000..48814af00 --- /dev/null +++ b/doc/source/api/nova..tests.api.openstack.test_shared_ip_groups.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.api.openstack.test_shared_ip_groups` Module +============================================================================== +.. automodule:: nova..tests.api.openstack.test_shared_ip_groups + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.api.openstack.test_zones.rst b/doc/source/api/nova..tests.api.openstack.test_zones.rst new file mode 100644 index 000000000..ba7078e63 --- /dev/null +++ b/doc/source/api/nova..tests.api.openstack.test_zones.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.api.openstack.test_zones` Module +============================================================================== +.. automodule:: nova..tests.api.openstack.test_zones + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.api.test_wsgi.rst b/doc/source/api/nova..tests.api.test_wsgi.rst new file mode 100644 index 000000000..8e79caa4d --- /dev/null +++ b/doc/source/api/nova..tests.api.test_wsgi.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.api.test_wsgi` Module +============================================================================== +.. automodule:: nova..tests.api.test_wsgi + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.db.fakes.rst b/doc/source/api/nova..tests.db.fakes.rst new file mode 100644 index 000000000..cc79e55e2 --- /dev/null +++ b/doc/source/api/nova..tests.db.fakes.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.db.fakes` Module +============================================================================== +.. automodule:: nova..tests.db.fakes + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.declare_flags.rst b/doc/source/api/nova..tests.declare_flags.rst new file mode 100644 index 000000000..524e72e91 --- /dev/null +++ b/doc/source/api/nova..tests.declare_flags.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.declare_flags` Module +============================================================================== +.. automodule:: nova..tests.declare_flags + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.fake_flags.rst b/doc/source/api/nova..tests.fake_flags.rst new file mode 100644 index 000000000..a8dc3df36 --- /dev/null +++ b/doc/source/api/nova..tests.fake_flags.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.fake_flags` Module +============================================================================== +.. automodule:: nova..tests.fake_flags + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.glance.stubs.rst b/doc/source/api/nova..tests.glance.stubs.rst new file mode 100644 index 000000000..7ef5fccbe --- /dev/null +++ b/doc/source/api/nova..tests.glance.stubs.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.glance.stubs` Module +============================================================================== +.. automodule:: nova..tests.glance.stubs + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.hyperv_unittest.rst b/doc/source/api/nova..tests.hyperv_unittest.rst new file mode 100644 index 000000000..c08443121 --- /dev/null +++ b/doc/source/api/nova..tests.hyperv_unittest.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.hyperv_unittest` Module +============================================================================== +.. automodule:: nova..tests.hyperv_unittest + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.objectstore_unittest.rst b/doc/source/api/nova..tests.objectstore_unittest.rst new file mode 100644 index 000000000..0ae252f04 --- /dev/null +++ b/doc/source/api/nova..tests.objectstore_unittest.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.objectstore_unittest` Module +============================================================================== +.. automodule:: nova..tests.objectstore_unittest + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.real_flags.rst b/doc/source/api/nova..tests.real_flags.rst new file mode 100644 index 000000000..e9c0d1abd --- /dev/null +++ b/doc/source/api/nova..tests.real_flags.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.real_flags` Module +============================================================================== +.. automodule:: nova..tests.real_flags + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.runtime_flags.rst b/doc/source/api/nova..tests.runtime_flags.rst new file mode 100644 index 000000000..984e21199 --- /dev/null +++ b/doc/source/api/nova..tests.runtime_flags.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.runtime_flags` Module +============================================================================== +.. automodule:: nova..tests.runtime_flags + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.test_access.rst b/doc/source/api/nova..tests.test_access.rst new file mode 100644 index 000000000..300d8109e --- /dev/null +++ b/doc/source/api/nova..tests.test_access.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.test_access` Module +============================================================================== +.. automodule:: nova..tests.test_access + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.test_api.rst b/doc/source/api/nova..tests.test_api.rst new file mode 100644 index 000000000..f9473062e --- /dev/null +++ b/doc/source/api/nova..tests.test_api.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.test_api` Module +============================================================================== +.. automodule:: nova..tests.test_api + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.test_auth.rst b/doc/source/api/nova..tests.test_auth.rst new file mode 100644 index 000000000..ff4445ae4 --- /dev/null +++ b/doc/source/api/nova..tests.test_auth.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.test_auth` Module +============================================================================== +.. automodule:: nova..tests.test_auth + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.test_cloud.rst b/doc/source/api/nova..tests.test_cloud.rst new file mode 100644 index 000000000..7bd03db9a --- /dev/null +++ b/doc/source/api/nova..tests.test_cloud.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.test_cloud` Module +============================================================================== +.. automodule:: nova..tests.test_cloud + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.test_compute.rst b/doc/source/api/nova..tests.test_compute.rst new file mode 100644 index 000000000..90fd6e9d1 --- /dev/null +++ b/doc/source/api/nova..tests.test_compute.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.test_compute` Module +============================================================================== +.. automodule:: nova..tests.test_compute + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.test_console.rst b/doc/source/api/nova..tests.test_console.rst new file mode 100644 index 000000000..f695f5d17 --- /dev/null +++ b/doc/source/api/nova..tests.test_console.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.test_console` Module +============================================================================== +.. automodule:: nova..tests.test_console + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.test_direct.rst b/doc/source/api/nova..tests.test_direct.rst new file mode 100644 index 000000000..4f7adef19 --- /dev/null +++ b/doc/source/api/nova..tests.test_direct.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.test_direct` Module +============================================================================== +.. automodule:: nova..tests.test_direct + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.test_flags.rst b/doc/source/api/nova..tests.test_flags.rst new file mode 100644 index 000000000..2ec35d6c2 --- /dev/null +++ b/doc/source/api/nova..tests.test_flags.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.test_flags` Module +============================================================================== +.. automodule:: nova..tests.test_flags + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.test_instance_types.rst b/doc/source/api/nova..tests.test_instance_types.rst new file mode 100644 index 000000000..ebe689966 --- /dev/null +++ b/doc/source/api/nova..tests.test_instance_types.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.test_instance_types` Module +============================================================================== +.. automodule:: nova..tests.test_instance_types + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.test_localization.rst b/doc/source/api/nova..tests.test_localization.rst new file mode 100644 index 000000000..d93c83ba7 --- /dev/null +++ b/doc/source/api/nova..tests.test_localization.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.test_localization` Module +============================================================================== +.. automodule:: nova..tests.test_localization + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.test_log.rst b/doc/source/api/nova..tests.test_log.rst new file mode 100644 index 000000000..04ff5ead1 --- /dev/null +++ b/doc/source/api/nova..tests.test_log.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.test_log` Module +============================================================================== +.. automodule:: nova..tests.test_log + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.test_middleware.rst b/doc/source/api/nova..tests.test_middleware.rst new file mode 100644 index 000000000..2f9df5832 --- /dev/null +++ b/doc/source/api/nova..tests.test_middleware.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.test_middleware` Module +============================================================================== +.. automodule:: nova..tests.test_middleware + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.test_misc.rst b/doc/source/api/nova..tests.test_misc.rst new file mode 100644 index 000000000..4975f89d7 --- /dev/null +++ b/doc/source/api/nova..tests.test_misc.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.test_misc` Module +============================================================================== +.. automodule:: nova..tests.test_misc + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.test_network.rst b/doc/source/api/nova..tests.test_network.rst new file mode 100644 index 000000000..3a4b04ea4 --- /dev/null +++ b/doc/source/api/nova..tests.test_network.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.test_network` Module +============================================================================== +.. automodule:: nova..tests.test_network + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.test_quota.rst b/doc/source/api/nova..tests.test_quota.rst new file mode 100644 index 000000000..24ebf9ca3 --- /dev/null +++ b/doc/source/api/nova..tests.test_quota.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.test_quota` Module +============================================================================== +.. automodule:: nova..tests.test_quota + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.test_rpc.rst b/doc/source/api/nova..tests.test_rpc.rst new file mode 100644 index 000000000..c141d6889 --- /dev/null +++ b/doc/source/api/nova..tests.test_rpc.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.test_rpc` Module +============================================================================== +.. automodule:: nova..tests.test_rpc + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.test_scheduler.rst b/doc/source/api/nova..tests.test_scheduler.rst new file mode 100644 index 000000000..1cd9991db --- /dev/null +++ b/doc/source/api/nova..tests.test_scheduler.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.test_scheduler` Module +============================================================================== +.. automodule:: nova..tests.test_scheduler + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.test_service.rst b/doc/source/api/nova..tests.test_service.rst new file mode 100644 index 000000000..a264fbb55 --- /dev/null +++ b/doc/source/api/nova..tests.test_service.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.test_service` Module +============================================================================== +.. automodule:: nova..tests.test_service + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.test_test.rst b/doc/source/api/nova..tests.test_test.rst new file mode 100644 index 000000000..389eb3c99 --- /dev/null +++ b/doc/source/api/nova..tests.test_test.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.test_test` Module +============================================================================== +.. automodule:: nova..tests.test_test + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.test_twistd.rst b/doc/source/api/nova..tests.test_twistd.rst new file mode 100644 index 000000000..cae0c0a28 --- /dev/null +++ b/doc/source/api/nova..tests.test_twistd.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.test_twistd` Module +============================================================================== +.. automodule:: nova..tests.test_twistd + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.test_utils.rst b/doc/source/api/nova..tests.test_utils.rst new file mode 100644 index 000000000..d61a7021f --- /dev/null +++ b/doc/source/api/nova..tests.test_utils.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.test_utils` Module +============================================================================== +.. automodule:: nova..tests.test_utils + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.test_virt.rst b/doc/source/api/nova..tests.test_virt.rst new file mode 100644 index 000000000..9b0dc1e46 --- /dev/null +++ b/doc/source/api/nova..tests.test_virt.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.test_virt` Module +============================================================================== +.. automodule:: nova..tests.test_virt + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.test_volume.rst b/doc/source/api/nova..tests.test_volume.rst new file mode 100644 index 000000000..b5affe53c --- /dev/null +++ b/doc/source/api/nova..tests.test_volume.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.test_volume` Module +============================================================================== +.. automodule:: nova..tests.test_volume + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.test_xenapi.rst b/doc/source/api/nova..tests.test_xenapi.rst new file mode 100644 index 000000000..7128baee4 --- /dev/null +++ b/doc/source/api/nova..tests.test_xenapi.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.test_xenapi` Module +============================================================================== +.. automodule:: nova..tests.test_xenapi + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..tests.xenapi.stubs.rst b/doc/source/api/nova..tests.xenapi.stubs.rst new file mode 100644 index 000000000..356eed9a7 --- /dev/null +++ b/doc/source/api/nova..tests.xenapi.stubs.rst @@ -0,0 +1,6 @@ +The :mod:`nova..tests.xenapi.stubs` Module +============================================================================== +.. automodule:: nova..tests.xenapi.stubs + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..twistd.rst b/doc/source/api/nova..twistd.rst new file mode 100644 index 000000000..d4145396d --- /dev/null +++ b/doc/source/api/nova..twistd.rst @@ -0,0 +1,6 @@ +The :mod:`nova..twistd` Module +============================================================================== +.. automodule:: nova..twistd + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..utils.rst b/doc/source/api/nova..utils.rst new file mode 100644 index 000000000..1131d1080 --- /dev/null +++ b/doc/source/api/nova..utils.rst @@ -0,0 +1,6 @@ +The :mod:`nova..utils` Module +============================================================================== +.. automodule:: nova..utils + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..version.rst b/doc/source/api/nova..version.rst new file mode 100644 index 000000000..4b0fc078f --- /dev/null +++ b/doc/source/api/nova..version.rst @@ -0,0 +1,6 @@ +The :mod:`nova..version` Module +============================================================================== +.. automodule:: nova..version + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..virt.connection.rst b/doc/source/api/nova..virt.connection.rst new file mode 100644 index 000000000..caf766765 --- /dev/null +++ b/doc/source/api/nova..virt.connection.rst @@ -0,0 +1,6 @@ +The :mod:`nova..virt.connection` Module +============================================================================== +.. automodule:: nova..virt.connection + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..virt.disk.rst b/doc/source/api/nova..virt.disk.rst new file mode 100644 index 000000000..4a6c0f406 --- /dev/null +++ b/doc/source/api/nova..virt.disk.rst @@ -0,0 +1,6 @@ +The :mod:`nova..virt.disk` Module +============================================================================== +.. automodule:: nova..virt.disk + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..virt.fake.rst b/doc/source/api/nova..virt.fake.rst new file mode 100644 index 000000000..06ecdbf7d --- /dev/null +++ b/doc/source/api/nova..virt.fake.rst @@ -0,0 +1,6 @@ +The :mod:`nova..virt.fake` Module +============================================================================== +.. automodule:: nova..virt.fake + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..virt.hyperv.rst b/doc/source/api/nova..virt.hyperv.rst new file mode 100644 index 000000000..48d89378e --- /dev/null +++ b/doc/source/api/nova..virt.hyperv.rst @@ -0,0 +1,6 @@ +The :mod:`nova..virt.hyperv` Module +============================================================================== +.. automodule:: nova..virt.hyperv + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..virt.images.rst b/doc/source/api/nova..virt.images.rst new file mode 100644 index 000000000..4fdeb7af8 --- /dev/null +++ b/doc/source/api/nova..virt.images.rst @@ -0,0 +1,6 @@ +The :mod:`nova..virt.images` Module +============================================================================== +.. automodule:: nova..virt.images + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..virt.libvirt_conn.rst b/doc/source/api/nova..virt.libvirt_conn.rst new file mode 100644 index 000000000..7fb8aed5f --- /dev/null +++ b/doc/source/api/nova..virt.libvirt_conn.rst @@ -0,0 +1,6 @@ +The :mod:`nova..virt.libvirt_conn` Module +============================================================================== +.. automodule:: nova..virt.libvirt_conn + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..virt.xenapi.fake.rst b/doc/source/api/nova..virt.xenapi.fake.rst new file mode 100644 index 000000000..752dabb14 --- /dev/null +++ b/doc/source/api/nova..virt.xenapi.fake.rst @@ -0,0 +1,6 @@ +The :mod:`nova..virt.xenapi.fake` Module +============================================================================== +.. automodule:: nova..virt.xenapi.fake + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..virt.xenapi.network_utils.rst b/doc/source/api/nova..virt.xenapi.network_utils.rst new file mode 100644 index 000000000..15f52973e --- /dev/null +++ b/doc/source/api/nova..virt.xenapi.network_utils.rst @@ -0,0 +1,6 @@ +The :mod:`nova..virt.xenapi.network_utils` Module +============================================================================== +.. automodule:: nova..virt.xenapi.network_utils + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..virt.xenapi.vm_utils.rst b/doc/source/api/nova..virt.xenapi.vm_utils.rst new file mode 100644 index 000000000..18745dc71 --- /dev/null +++ b/doc/source/api/nova..virt.xenapi.vm_utils.rst @@ -0,0 +1,6 @@ +The :mod:`nova..virt.xenapi.vm_utils` Module +============================================================================== +.. automodule:: nova..virt.xenapi.vm_utils + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..virt.xenapi.vmops.rst b/doc/source/api/nova..virt.xenapi.vmops.rst new file mode 100644 index 000000000..30662c58d --- /dev/null +++ b/doc/source/api/nova..virt.xenapi.vmops.rst @@ -0,0 +1,6 @@ +The :mod:`nova..virt.xenapi.vmops` Module +============================================================================== +.. automodule:: nova..virt.xenapi.vmops + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..virt.xenapi.volume_utils.rst b/doc/source/api/nova..virt.xenapi.volume_utils.rst new file mode 100644 index 000000000..413e4dc4b --- /dev/null +++ b/doc/source/api/nova..virt.xenapi.volume_utils.rst @@ -0,0 +1,6 @@ +The :mod:`nova..virt.xenapi.volume_utils` Module +============================================================================== +.. automodule:: nova..virt.xenapi.volume_utils + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..virt.xenapi.volumeops.rst b/doc/source/api/nova..virt.xenapi.volumeops.rst new file mode 100644 index 000000000..626f164df --- /dev/null +++ b/doc/source/api/nova..virt.xenapi.volumeops.rst @@ -0,0 +1,6 @@ +The :mod:`nova..virt.xenapi.volumeops` Module +============================================================================== +.. automodule:: nova..virt.xenapi.volumeops + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..virt.xenapi_conn.rst b/doc/source/api/nova..virt.xenapi_conn.rst new file mode 100644 index 000000000..14ac5147f --- /dev/null +++ b/doc/source/api/nova..virt.xenapi_conn.rst @@ -0,0 +1,6 @@ +The :mod:`nova..virt.xenapi_conn` Module +============================================================================== +.. automodule:: nova..virt.xenapi_conn + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..volume.api.rst b/doc/source/api/nova..volume.api.rst new file mode 100644 index 000000000..8ad36e049 --- /dev/null +++ b/doc/source/api/nova..volume.api.rst @@ -0,0 +1,6 @@ +The :mod:`nova..volume.api` Module +============================================================================== +.. automodule:: nova..volume.api + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..volume.driver.rst b/doc/source/api/nova..volume.driver.rst new file mode 100644 index 000000000..51f5c0729 --- /dev/null +++ b/doc/source/api/nova..volume.driver.rst @@ -0,0 +1,6 @@ +The :mod:`nova..volume.driver` Module +============================================================================== +.. automodule:: nova..volume.driver + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..volume.manager.rst b/doc/source/api/nova..volume.manager.rst new file mode 100644 index 000000000..91a192a8f --- /dev/null +++ b/doc/source/api/nova..volume.manager.rst @@ -0,0 +1,6 @@ +The :mod:`nova..volume.manager` Module +============================================================================== +.. automodule:: nova..volume.manager + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..volume.san.rst b/doc/source/api/nova..volume.san.rst new file mode 100644 index 000000000..1de068928 --- /dev/null +++ b/doc/source/api/nova..volume.san.rst @@ -0,0 +1,6 @@ +The :mod:`nova..volume.san` Module +============================================================================== +.. automodule:: nova..volume.san + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/api/nova..wsgi.rst b/doc/source/api/nova..wsgi.rst new file mode 100644 index 000000000..0bff1c332 --- /dev/null +++ b/doc/source/api/nova..wsgi.rst @@ -0,0 +1,6 @@ +The :mod:`nova..wsgi` Module +============================================================================== +.. automodule:: nova..wsgi + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/community.rst b/doc/source/community.rst index 4ae32f1eb..e925a47bd 100644 --- a/doc/source/community.rst +++ b/doc/source/community.rst @@ -18,7 +18,7 @@ Getting Involved ================ -The Nova community is a very friendly group and there are places online to join in with the +The OpenStack community for Nova is a very friendly group and there are places online to join in with the community. Feel free to ask questions. This document points you to some of the places where you can communicate with people. @@ -83,3 +83,13 @@ Twitter Because all the cool kids do it: `@openstack <http://twitter.com/openstack>`_. Also follow the `#openstack <http://search.twitter.com/search?q=%23openstack>`_ tag for relevant tweets. + +OpenStack Docs Site +------------------- + +The `nova.openstack.org <http://nova.openstack.org>`_ site is geared towards developer documentation, +and the `docs.openstack.org <http://docs.openstack.org>`_ site is intended for cloud administrators +who are standing up and running OpenStack Compute in production. You can contribute to the Docs Site +by using bzr and Launchpad and contributing to the openstack-manuals project at http://launchpad.net/openstack-manuals. + + diff --git a/doc/source/index.rst b/doc/source/index.rst index d337fb69f..846d3cfcd 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -32,11 +32,13 @@ Nova is written with the following design guidelines in mind: * **API Compatibility**: Nova strives to provide API-compatible with popular systems like Amazon EC2 This documentation is generated by the Sphinx toolkit and lives in the source -tree. Additional documentation on Nova and other components of OpenStack can -be found on the `OpenStack wiki`_. Also see the :doc:`community` page for -other ways to interact with the community. +tree. Additional draft and project documentation on Nova and other components of OpenStack can +be found on the `OpenStack wiki`_. Cloud administrators, refer to `docs.openstack.org`_. + +Also see the :doc:`community` page for other ways to interact with the community. .. _`OpenStack wiki`: http://wiki.openstack.org +.. _`docs.openstack.org`: http://docs.openstack.org Key Concepts @@ -50,17 +52,7 @@ Key Concepts service.architecture nova.object.model swift.object.model - -Administrator's Documentation -============================= - -.. toctree:: - :maxdepth: 1 - - livecd - adminguide/index - adminguide/single.node.install - adminguide/multi.node.install + runnova/index Developer Docs ============== diff --git a/doc/source/man/novamanage.rst b/doc/source/man/novamanage.rst index bb9d7a7fe..1d8446f08 100644 --- a/doc/source/man/novamanage.rst +++ b/doc/source/man/novamanage.rst @@ -173,12 +173,72 @@ Nova Floating IPs ``nova-manage floating create <host> <ip_range>`` Creates floating IP addresses for the named host by the given range. - floating delete <ip_range> Deletes floating IP addresses in the range given. + +``nova-manage floating delete <ip_range>`` + + Deletes floating IP addresses in the range given. ``nova-manage floating list`` Displays a list of all floating IP addresses. +Nova Flavor +~~~~~~~~~~~ + +``nova-manage flavor list`` + + Outputs a list of all active flavors to the screen. + +``nova-manage flavor list --all`` + + Outputs a list of all flavors (active and inactive) to the screen. + +``nova-manage flavor create <name> <memory> <vCPU> <local_storage> <flavorID> <(optional) swap> <(optional) RXTX Quota> <(optional) RXTX Cap>`` + + creates a flavor with the following positional arguments: + * memory (expressed in megabytes) + * vcpu(s) (integer) + * local storage (expressed in gigabytes) + * flavorid (unique integer) + * swap space (expressed in megabytes, defaults to zero, optional) + * RXTX quotas (expressed in gigabytes, defaults to zero, optional) + * RXTX cap (expressed in gigabytes, defaults to zero, optional) + +``nova-manage flavor delete <name>`` + + Delete the flavor with the name <name>. This marks the flavor as inactive and cannot be launched. However, the record stays in the database for archival and billing purposes. + +``nova-manage flavor delete <name> --purge`` + + Purges the flavor with the name <name>. This removes this flavor from the database. + +Nova Instance_type +~~~~~~~~~~~~~~~~~~ + +The instance_type command is provided as an alias for the flavor command. All the same subcommands and arguments from nova-manage flavor can be used. + +Nova Images +~~~~~~~~~~~ + +``nova-manage image image_register <path> <owner>`` + + Registers an image with the image service. + +``nova-manage image kernel_register <path> <owner>`` + + Registers a kernel with the image service. + +``nova-manage image ramdisk_register <path> <owner>`` + + Registers a ramdisk with the image service. + +``nova-manage image all_register <image_path> <kernel_path> <ramdisk_path> <owner>`` + + Registers an image kernel and ramdisk with the image service. + +``nova-manage image convert <directory>`` + + Converts all images in directory from the old (Bexar) format to the new format. FILES ======== diff --git a/doc/source/nova.concepts.rst b/doc/source/nova.concepts.rst index e9687dc98..45cc4b879 100644 --- a/doc/source/nova.concepts.rst +++ b/doc/source/nova.concepts.rst @@ -64,6 +64,11 @@ Concept: Instances An 'instance' is a word for a virtual machine that runs inside the cloud. +Concept: Instance Type +---------------------- + +An 'instance type' describes the compute, memory and storage capacity of nova computing instances. In layman terms, this is the size (in terms of vCPUs, RAM, etc.) of the virtual server that you will be launching. + Concept: System Architecture ---------------------------- diff --git a/doc/source/object.model.rst b/doc/source/object.model.rst index d02f151fd..419e89b0c 100644 --- a/doc/source/object.model.rst +++ b/doc/source/object.model.rst @@ -18,8 +18,6 @@ Object Model ============ -.. todo:: Add brief description for core models - .. graphviz:: digraph foo { @@ -42,27 +40,27 @@ Object Model Users ----- -Each Nova User is authorized based on their access key and secret key, assigned per-user. Read more at :doc:`/adminguide/managing.users`. +Each Nova User is authorized based on their access key and secret key, assigned per-user. Read more at :doc:`/runnova/managing.users`. Projects -------- -For Nova, access to images is based on the project. Read more at :doc:`/adminguide/managing.projects`. +For Nova, access to images is based on the project. Read more at :doc:`/runnova/managing.projects`. Images ------ -Images are binary files that run the operating system. Read more at :doc:`/adminguide/managing.images`. +Images are binary files that run the operating system. Read more at :doc:`/runnova/managing.images`. Instances --------- -Instances are running virtual servers. Read more at :doc:`/adminguide/managing.instances`. +Instances are running virtual servers. Read more at :doc:`/runnova/managing.instances`. Volumes ------- -.. todo:: Write doc about volumes +Volumes offer extra block level storage to instances. Read more at `Managing Volumes <http://docs.openstack.org/openstack-compute/admin/content/ch05s07.html>`_. Security Groups --------------- @@ -72,7 +70,7 @@ In Nova, a security group is a named collection of network access rules, like fi VLANs ----- -VLAN is the default network mode for Nova. Read more at :doc:`/adminguide/network.vlan`. +VLAN is the default network mode for Nova. Read more at :doc:`/runnova/network.vlan`. IP Addresses ------------ diff --git a/doc/source/quickstart.rst b/doc/source/quickstart.rst index 17c9e10a8..84ed3fe01 100644 --- a/doc/source/quickstart.rst +++ b/doc/source/quickstart.rst @@ -54,7 +54,7 @@ Environment Variables By tweaking the environment that nova.sh run in, you can build slightly different configurations (though for more complex setups you should see -:doc:`/adminguide/getting.started` and :doc:`/adminguide/multi.node.install`). +`Installing and Configuring OpenStack Compute <http://docs.openstack.org/openstack-compute/admin/content/ch03.html>`_). * HOST_IP * Default: address of first interface from the ifconfig command diff --git a/doc/source/adminguide/binaries.rst b/doc/source/runnova/binaries.rst index 5c50a51f1..023831021 100644 --- a/doc/source/adminguide/binaries.rst +++ b/doc/source/runnova/binaries.rst @@ -35,12 +35,12 @@ Nova api receives xml requests and sends them to the rest of the system. It is nova-objectstore ---------------- -Nova objectstore is an ultra simple file-based storage system for images that replicates most of the S3 Api. It will soon be replaced with glance and a simple image manager. +Nova objectstore is an ultra simple file-based storage system for images that replicates most of the S3 Api. It will soon be replaced with Glance (http://glance.openstack.org) and a simple image manager. nova-compute ------------ -Nova compute is responsible for managing virtual machines. It loads a Service object which exposes the public methods on ComputeManager via rpc. +Nova compute is responsible for managing virtual machines. It loads a Service object which exposes the public methods on ComputeManager via rpc. nova-volume ----------- diff --git a/doc/source/adminguide/euca2ools.rst b/doc/source/runnova/euca2ools.rst index 6f0c57358..6f0c57358 100644 --- a/doc/source/adminguide/euca2ools.rst +++ b/doc/source/runnova/euca2ools.rst diff --git a/doc/source/runnova/flags.rst b/doc/source/runnova/flags.rst new file mode 100644 index 000000000..1bfa022d9 --- /dev/null +++ b/doc/source/runnova/flags.rst @@ -0,0 +1,193 @@ +.. + Copyright 2010-2011 United States Government as represented by the + Administrator of the National Aeronautics and Space Administration. + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Flags and Flagfiles +=================== + +Nova uses a configuration file containing flags located in /etc/nova/nova.conf. You can get the most recent listing of avaialble flags by running nova-(servicename) --help, for example, nova-api --help. + +Here's a list of available flags and their default settings. + + --ajax_console_proxy_port: port that ajax_console_proxy binds + (default: '8000') + --ajax_console_proxy_topic: the topic ajax proxy nodes listen on + (default: 'ajax_proxy') + --ajax_console_proxy_url: location of ajax console proxy, in the form + "http://127.0.0.1:8000" + (default: 'http://127.0.0.1:8000') + --auth_token_ttl: Seconds for auth tokens to linger + (default: '3600') + (an integer) + --aws_access_key_id: AWS Access ID + (default: 'admin') + --aws_secret_access_key: AWS Access Key + (default: 'admin') + --compute_manager: Manager for compute + (default: 'nova.compute.manager.ComputeManager') + --compute_topic: the topic compute nodes listen on + (default: 'compute') + --connection_type: libvirt, xenapi or fake + (default: 'libvirt') + --console_manager: Manager for console proxy + (default: 'nova.console.manager.ConsoleProxyManager') + --console_topic: the topic console proxy nodes listen on + (default: 'console') + --control_exchange: the main exchange to connect to + (default: 'nova') + --db_backend: The backend to use for db + (default: 'sqlalchemy') + --default_image: default image to use, testing only + (default: 'ami-11111') + --default_instance_type: default instance type to use, testing only + (default: 'm1.small') + --default_log_levels: list of logger=LEVEL pairs + (default: 'amqplib=WARN,sqlalchemy=WARN,eventlet.wsgi.server=WARN') + (a comma separated list) + --default_project: default project for openstack + (default: 'openstack') + --ec2_dmz_host: internal ip of api server + (default: '$my_ip') + --ec2_host: ip of api server + (default: '$my_ip') + --ec2_path: suffix for ec2 + (default: '/services/Cloud') + --ec2_port: cloud controller port + (default: '8773') + (an integer) + --ec2_scheme: prefix for ec2 + (default: 'http') + --[no]enable_new_services: Services to be added to the available pool on + create + (default: 'true') + --[no]fake_network: should we use fake network devices and addresses + (default: 'false') + --[no]fake_rabbit: use a fake rabbit + (default: 'false') + --glance_host: glance host + (default: '$my_ip') + --glance_port: glance port + (default: '9292') + (an integer) + -?,--[no]help: show this help + --[no]helpshort: show usage only for this module + --[no]helpxml: like --help, but generates XML output + --host: name of this node + (default: 'osdemo03') + --image_service: The service to use for retrieving and searching for images. + (default: 'nova.image.s3.S3ImageService') + --instance_name_template: Template string to be used to generate instance + names + (default: 'instance-%08x') + --logfile: output to named file + --logging_context_format_string: format string to use for log messages with + context + (default: '%(asctime)s %(levelname)s %(name)s [%(request_id)s %(user)s + %(project)s] %(message)s') + --logging_debug_format_suffix: data to append to log format when level is + DEBUG + (default: 'from %(processName)s (pid=%(process)d) %(funcName)s + %(pathname)s:%(lineno)d') + --logging_default_format_string: format string to use for log messages without + context + (default: '%(asctime)s %(levelname)s %(name)s [-] %(message)s') + --logging_exception_prefix: prefix each line of exception output with this + format + (default: '(%(name)s): TRACE: ') + --my_ip: host ip address + (default: '184.106.73.68') + --network_manager: Manager for network + (default: 'nova.network.manager.VlanManager') + --network_topic: the topic network nodes listen on + (default: 'network') + --node_availability_zone: availability zone of this node + (default: 'nova') + --null_kernel: kernel image that indicates not to use a kernel, but to use a + raw disk image instead + (default: 'nokernel') + --osapi_host: ip of api server + (default: '$my_ip') + --osapi_path: suffix for openstack + (default: '/v1.0/') + --osapi_port: OpenStack API port + (default: '8774') + (an integer) + --osapi_scheme: prefix for openstack + (default: 'http') + --periodic_interval: seconds between running periodic tasks + (default: '60') + (a positive integer) + --pidfile: pidfile to use for this service + --rabbit_host: rabbit host + (default: 'localhost') + --rabbit_max_retries: rabbit connection attempts + (default: '12') + (an integer) + --rabbit_password: rabbit password + (default: 'guest') + --rabbit_port: rabbit port + (default: '5672') + (an integer) + --rabbit_retry_interval: rabbit connection retry interval + (default: '10') + (an integer) + --rabbit_userid: rabbit userid + (default: 'guest') + --rabbit_virtual_host: rabbit virtual host + (default: '/') + --region_list: list of region=fqdn pairs separated by commas + (default: '') + (a comma separated list) + --report_interval: seconds between nodes reporting state to datastore + (default: '10') + (a positive integer) + --s3_dmz: s3 dmz ip (for instances) + (default: '$my_ip') + --s3_host: s3 host (for infrastructure) + (default: '$my_ip') + --s3_port: s3 port + (default: '3333') + (an integer) + --scheduler_manager: Manager for scheduler + (default: 'nova.scheduler.manager.SchedulerManager') + --scheduler_topic: the topic scheduler nodes listen on + (default: 'scheduler') + --sql_connection: connection string for sql database + (default: 'sqlite:///$state_path/nova.sqlite') + --sql_idle_timeout: timeout for idle sql database connections + (default: '3600') + --sql_max_retries: sql connection attempts + (default: '12') + (an integer) + --sql_retry_interval: sql connection retry interval + (default: '10') + (an integer) + --state_path: Top-level directory for maintaining nova's state + (default: '/usr/lib/pymodules/python2.6/nova/../') + --[no]use_syslog: output to syslog + (default: 'false') + --[no]verbose: show debug output + (default: 'false') + --volume_manager: Manager for volume + (default: 'nova.volume.manager.VolumeManager') + --volume_name_template: Template string to be used to generate instance names + (default: 'volume-%08x') + --volume_topic: the topic volume nodes listen on + (default: 'volume') + --vpn_image_id: AMI for cloudpipe vpn server + (default: 'ami-cloudpipe') + --vpn_key_suffix: Suffix to add to project name for vpn key and secgroups + (default: '-vpn')
\ No newline at end of file diff --git a/doc/source/adminguide/getting.started.rst b/doc/source/runnova/getting.started.rst index 675d8e664..4cc7307b0 100644 --- a/doc/source/adminguide/getting.started.rst +++ b/doc/source/runnova/getting.started.rst @@ -105,11 +105,10 @@ Configuration Configuring the host system ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -As you read through the Administration Guide you will notice configuration hints -inline with documentation on the subsystem you are configuring. Presented in -this "Getting Started with Nova" document, we only provide what you need to -get started as quickly as possible. For a more detailed description of system -configuration, start reading through :doc:`multi.node.install`. +Nova can be configured in many different ways. In this "Getting Started with Nova" document, we only provide what you need to get started as quickly as possible. For a more detailed description of system +configuration, start reading through `Installing and Configuring OpenStack Compute <http://docs.openstack.org/openstack-compute/admin/content/ch03.html>`_. + +`Detailed instructions for creating a volume group are available <http://docs.openstack.org/openstack-compute/admin/content/ch05s07.html>`_, or use these quick instructions. * Create a volume group (you can use an actual disk for the volume group as well):: @@ -136,6 +135,8 @@ flagfile, so typically a file like ``nova-manage.conf`` would have as its first line ``--flagfile=/etc/nova/nova.conf`` to load the common flags before specifying overrides or additional options. +To get a current comprehensive list of flag file options, run bin/nova-<servicename> --help, or refer to a static list at `Reference for Flags in nova.conf <http://docs.openstack.org/openstack-compute/admin/content/ch05s08.html>`_. + A sample configuration to test the system follows:: --verbose @@ -143,13 +144,13 @@ A sample configuration to test the system follows:: --auth_driver=nova.auth.dbdriver.DbDriver Running ---------- +------- There are many parts to the nova system, each with a specific function. They are built to be highly-available, so there are may configurations they can be run in (ie: on many machines, many listeners per machine, etc). This part of the guide only gets you started quickly, to learn about HA options, see -:doc:`multi.node.install`. +`Installing and Configuring OpenStack Compute <http://docs.openstack.org/openstack-compute/admin/content/ch03.html>`_. Launch supporting services diff --git a/doc/source/adminguide/index.rst b/doc/source/runnova/index.rst index 3bd72cfdc..283d268ce 100644 --- a/doc/source/adminguide/index.rst +++ b/doc/source/runnova/index.rst @@ -15,17 +15,17 @@ License for the specific language governing permissions and limitations under the License. -Administration Guide -==================== +Running Nova +============ -This guide describes the basics of running and managing Nova. +This guide describes the basics of running and managing Nova. For more administrator's documentation, refer to `docs.openstack.org <http://docs.openstack.org>`_. Running the Cloud ----------------- -The fastest way to get a test cloud running is by following the directions in the :doc:`../quickstart`. +The fastest way to get a test cloud running is by following the directions in the :doc:`../quickstart`. It relies on a nova.sh script to run on a single machine. -Nova's cloud works via the interaction of a series of daemon processes that reside persistently on the host machine(s). Fortunately, the :doc:`../quickstart` process launches sample versions of all these daemons for you. Once you are familiar with basic Nova usage, you can learn more about daemons by reading :doc:`../service.architecture` and :doc:`binaries`. +Nova's cloud works via the interaction of a series of daemon processes that reside persistently on the host machine(s). Fortunately, the :doc:`../quickstart` process launches sample versions of all these daemons for you. Once you are familiar with basic Nova usage, you can learn more about daemons by reading :doc:`../service.architecture` and :doc:`binaries`. Administration Utilities ------------------------ @@ -60,12 +60,12 @@ For background on the core objects referenced in this section, see :doc:`../obje Deployment ---------- -For a starting multi-node architecture, you would start with two nodes - a cloud controller node and a compute node. The cloud controller node contains the nova- services plus the Nova database. The compute node installs all the nova-services but then refers to the database installation, which is hosted by the cloud controller node. Ensure that the nova.conf file is identical on each node. If you find performance issues not related to database reads or writes, but due to the messaging queue backing up, you could add additional messaging services (rabbitmq). +For a starting multi-node architecture, you would start with two nodes - a cloud controller node and a compute node. The cloud controller node contains the nova- services plus the Nova database. The compute node installs all the nova-services but then refers to the database installation, which is hosted by the cloud controller node. Ensure that the nova.conf file is identical on each node. If you find performance issues not related to database reads or writes, but due to the messaging queue backing up, you could add additional messaging services (rabbitmq). For instructions on multi-server installations, refer to `Installing and Configuring OpenStack Compute <http://docs.openstack.org/openstack-compute/admin/content/ch03.html>`_. + .. toctree:: :maxdepth: 1 - multi.node.install dbsync @@ -75,7 +75,6 @@ Networking .. toctree:: :maxdepth: 1 - multi.node.install network.vlan.rst network.flat.rst diff --git a/doc/source/adminguide/managing.images.rst b/doc/source/runnova/managing.images.rst index c5d93a6e8..c5d93a6e8 100644 --- a/doc/source/adminguide/managing.images.rst +++ b/doc/source/runnova/managing.images.rst diff --git a/doc/source/runnova/managing.instance.types.rst b/doc/source/runnova/managing.instance.types.rst new file mode 100644 index 000000000..746077716 --- /dev/null +++ b/doc/source/runnova/managing.instance.types.rst @@ -0,0 +1,84 @@ +.. + Copyright 2011 Ken Pepple + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Managing Instance Types and Flavors +=================================== + +What are Instance Types or Flavors ? +------------------------------------ + +Instance types describe the compute, memory and storage capacity of nova computing instances. In layman terms, this is the size (in terms of vCPUs, RAM, etc.) of the virtual server that you will be launching. In the EC2 API, these are called by names such as "m1.large" or "m1.tiny", while the OpenStack API terms these "flavors" with names like "512 MB Server". + +In Nova, "flavor" and "instance type" are equivalent terms. When you create an EC2 instance type, you are also creating a OpenStack API flavor. To reduce repetition, for the rest of this document I will refer to these as instance types. + +Instance types can be in either the active or inactive state: + * Active instance types are available to be used for launching instances + * Inactive instance types are not available for launching instances + +In the current (Cactus) version of nova, instance types can only be created by the nova administrator through the nova-manage command. Future versions of nova (in concert with the OpenStack API or EC2 API), may expose this functionality directly to users. + +Basic Management +---------------- + +Instance types / flavor are managed through the nova-manage binary with +the "instance_type" command and an appropriate subcommand. Note that you can also use +the "flavor" command as a synonym for "instance_types". + +To see all currently active instance types, use the list subcommand:: + + # nova-manage instance_type list + m1.medium: Memory: 4096MB, VCPUS: 2, Storage: 40GB, FlavorID: 3, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB + m1.large: Memory: 8192MB, VCPUS: 4, Storage: 80GB, FlavorID: 4, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB + m1.tiny: Memory: 512MB, VCPUS: 1, Storage: 0GB, FlavorID: 1, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB + m1.xlarge: Memory: 16384MB, VCPUS: 8, Storage: 160GB, FlavorID: 5, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB + m1.small: Memory: 2048MB, VCPUS: 1, Storage: 20GB, FlavorID: 2, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB + +By default, the list subcommand only shows active instance types. To see all instance types (inactive and active), use the list subcommand with the "--all" flag:: + + # nova-manage instance_type list --all + m1.medium: Memory: 4096MB, VCPUS: 2, Storage: 40GB, FlavorID: 3, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB + m1.large: Memory: 8192MB, VCPUS: 4, Storage: 80GB, FlavorID: 4, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB + m1.tiny: Memory: 512MB, VCPUS: 1, Storage: 0GB, FlavorID: 1, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB + m1.xlarge: Memory: 16384MB, VCPUS: 8, Storage: 160GB, FlavorID: 5, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB + m1.small: Memory: 2048MB, VCPUS: 1, Storage: 20GB, FlavorID: 2, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB + m1.deleted: Memory: 2048MB, VCPUS: 1, Storage: 20GB, FlavorID: 2, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB, inactive + +To create an instance type, use the "create" subcommand with the following positional arguments: + * memory (expressed in megabytes) + * vcpu(s) (integer) + * local storage (expressed in gigabytes) + * flavorid (unique integer) + * swap space (expressed in megabytes, defaults to zero, optional) + * RXTX quotas (expressed in gigabytes, defaults to zero, optional) + * RXTX cap (expressed in gigabytes, defaults to zero, optional) + +The following example creates an instance type named "m1.xxlarge":: + + # nova-manage instance_type create m1.xxlarge 32768 16 320 0 0 0 + m1.xxlarge created + +To delete an instance type, use the "delete" subcommand and specify the name:: + + # nova-manage instance_type delete m1.xxlarge + m1.xxlarge deleted + +Please note that the "delete" command only marks the instance type as +inactive in the database; it does not actually remove the instance type. This is done +to preserve the instance type definition for long running instances (which may not +terminate for months or years). If you are sure that you want to delete this instance +type from the database, pass the "--purge" flag after the name:: + + # nova-manage instance_type delete m1.xxlarge --purge + m1.xxlarge purged diff --git a/doc/source/adminguide/managing.instances.rst b/doc/source/runnova/managing.instances.rst index e62352017..e62352017 100644 --- a/doc/source/adminguide/managing.instances.rst +++ b/doc/source/runnova/managing.instances.rst diff --git a/doc/source/adminguide/managing.networks.rst b/doc/source/runnova/managing.networks.rst index 9eea46d70..9eea46d70 100644 --- a/doc/source/adminguide/managing.networks.rst +++ b/doc/source/runnova/managing.networks.rst diff --git a/doc/source/adminguide/managing.projects.rst b/doc/source/runnova/managing.projects.rst index 5dd7f2de9..5dd7f2de9 100644 --- a/doc/source/adminguide/managing.projects.rst +++ b/doc/source/runnova/managing.projects.rst diff --git a/doc/source/adminguide/managing.users.rst b/doc/source/runnova/managing.users.rst index 392142e86..392142e86 100644 --- a/doc/source/adminguide/managing.users.rst +++ b/doc/source/runnova/managing.users.rst diff --git a/doc/source/adminguide/managingsecurity.rst b/doc/source/runnova/managingsecurity.rst index 7893925e7..7893925e7 100644 --- a/doc/source/adminguide/managingsecurity.rst +++ b/doc/source/runnova/managingsecurity.rst diff --git a/doc/source/adminguide/monitoring.rst b/doc/source/runnova/monitoring.rst index 2c93c71b5..2c93c71b5 100644 --- a/doc/source/adminguide/monitoring.rst +++ b/doc/source/runnova/monitoring.rst diff --git a/doc/source/adminguide/network.flat.rst b/doc/source/runnova/network.flat.rst index 3d8680c6f..3d8680c6f 100644 --- a/doc/source/adminguide/network.flat.rst +++ b/doc/source/runnova/network.flat.rst diff --git a/doc/source/adminguide/network.vlan.rst b/doc/source/runnova/network.vlan.rst index c06ce8e8b..c06ce8e8b 100644 --- a/doc/source/adminguide/network.vlan.rst +++ b/doc/source/runnova/network.vlan.rst diff --git a/doc/source/adminguide/nova.manage.rst b/doc/source/runnova/nova.manage.rst index 0e9a29b6b..0636e5752 100644 --- a/doc/source/adminguide/nova.manage.rst +++ b/doc/source/runnova/nova.manage.rst @@ -182,6 +182,29 @@ Nova Floating IPs Displays a list of all floating IP addresses. +Nova Images +~~~~~~~~~~~ + +``nova-manage image image_register <path> <owner>`` + + Registers an image with the image service. + +``nova-manage image kernel_register <path> <owner>`` + + Registers a kernel with the image service. + +``nova-manage image ramdisk_register <path> <owner>`` + + Registers a ramdisk with the image service. + +``nova-manage image all_register <image_path> <kernel_path> <ramdisk_path> <owner>`` + + Registers an image kernel and ramdisk with the image service. + +``nova-manage image convert <directory>`` + + Converts all images in directory from the old (Bexar) format to the new format. + Concept: Flags -------------- diff --git a/etc/nova-api.conf b/etc/api-paste.ini index f0e749805..9f7e93d4c 100644 --- a/etc/nova-api.conf +++ b/etc/api-paste.ini @@ -1,6 +1,3 @@ -[DEFAULT] -verbose = 1 - ####### # EC2 # ####### diff --git a/nova/__init__.py b/nova/__init__.py index 8745617bc..256db55a9 100644 --- a/nova/__init__.py +++ b/nova/__init__.py @@ -30,5 +30,3 @@ .. moduleauthor:: Manish Singh <yosh@gimp.org> .. moduleauthor:: Andy Smith <andy@anarkystic.com> """ - -from exception import * diff --git a/nova/adminclient.py b/nova/adminclient.py index c614b274c..fc3c5c5fe 100644 --- a/nova/adminclient.py +++ b/nova/adminclient.py @@ -23,6 +23,8 @@ import base64 import boto import boto.exception import httplib +import re +import string from boto.ec2.regioninfo import RegionInfo @@ -165,19 +167,20 @@ class HostInfo(object): **Fields Include** - * Disk stats - * Running Instances - * Memory stats - * CPU stats - * Network address info - * Firewall info - * Bridge and devices - + * Hostname + * Compute service status + * Volume service status + * Instance count + * Volume count """ def __init__(self, connection=None): self.connection = connection self.hostname = None + self.compute = None + self.volume = None + self.instance_count = 0 + self.volume_count = 0 def __repr__(self): return 'Host:%s' % self.hostname @@ -188,7 +191,39 @@ class HostInfo(object): # this is needed by the sax parser, so ignore the ugly name def endElement(self, name, value, connection): - setattr(self, name, value) + fixed_name = string.lower(re.sub(r'([A-Z])', r'_\1', name)) + setattr(self, fixed_name, value) + + +class Vpn(object): + """ + Information about a Vpn, as parsed through SAX + + **Fields Include** + + * instance_id + * project_id + * public_ip + * public_port + * created_at + * internal_ip + * state + """ + + def __init__(self, connection=None): + self.connection = connection + self.instance_id = None + self.project_id = None + + def __repr__(self): + return 'Vpn:%s:%s' % (self.project_id, self.instance_id) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + fixed_name = string.lower(re.sub(r'([A-Z])', r'_\1', name)) + setattr(self, fixed_name, value) class InstanceType(object): @@ -422,6 +457,16 @@ class NovaAdminClient(object): zip = self.apiconn.get_object('GenerateX509ForUser', params, UserInfo) return zip.file + def start_vpn(self, project): + """ + Starts the vpn for a user + """ + return self.apiconn.get_object('StartVpn', {'Project': project}, Vpn) + + def get_vpns(self): + """Return a list of vpn with project name""" + return self.apiconn.get_list('DescribeVpns', {}, [('item', Vpn)]) + def get_hosts(self): return self.apiconn.get_list('DescribeHosts', {}, [('item', HostInfo)]) diff --git a/nova/api/direct.py b/nova/api/direct.py index 208b6d086..dfca250e0 100644 --- a/nova/api/direct.py +++ b/nova/api/direct.py @@ -187,7 +187,7 @@ class ServiceWrapper(wsgi.Controller): def __init__(self, service_handle): self.service_handle = service_handle - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): arg_dict = req.environ['wsgiorg.routing_args'][1] action = arg_dict['action'] @@ -206,7 +206,7 @@ class ServiceWrapper(wsgi.Controller): params = dict([(str(k), v) for (k, v) in params.iteritems()]) result = method(context, **params) if type(result) is dict or type(result) is list: - return self._serialize(result, req) + return self._serialize(result, req.best_match_content_type()) else: return result @@ -218,7 +218,7 @@ class Proxy(object): self.prefix = prefix def __do_request(self, path, context, **kwargs): - req = webob.Request.blank(path) + req = wsgi.Request.blank(path) req.method = 'POST' req.body = urllib.urlencode({'json': utils.dumps(kwargs)}) req.environ['openstack.context'] = context diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index ddcdc673c..fccebca5d 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -20,8 +20,6 @@ Starting point for routing EC2 requests. """ -import datetime -import routes import webob import webob.dec import webob.exc @@ -55,25 +53,22 @@ flags.DEFINE_list('lockout_memcached_servers', None, class RequestLogging(wsgi.Middleware): """Access-Log akin logging for all EC2 API requests.""" - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): + start = utils.utcnow() rv = req.get_response(self.application) - self.log_request_completion(rv, req) + self.log_request_completion(rv, req, start) return rv - def log_request_completion(self, response, request): + def log_request_completion(self, response, request, start): controller = request.environ.get('ec2.controller', None) if controller: controller = controller.__class__.__name__ action = request.environ.get('ec2.action', None) ctxt = request.environ.get('ec2.context', None) - seconds = 'X' - microseconds = 'X' - if ctxt: - delta = datetime.datetime.utcnow() - \ - ctxt.timestamp - seconds = delta.seconds - microseconds = delta.microseconds + delta = utils.utcnow() - start + seconds = delta.seconds + microseconds = delta.microseconds LOG.info( "%s.%ss %s %s %s %s:%s %s [%s] %s %s", seconds, @@ -117,7 +112,7 @@ class Lockout(wsgi.Middleware): debug=0) super(Lockout, self).__init__(application) - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): access_key = str(req.params['AWSAccessKeyId']) failures_key = "authfailures-%s" % access_key @@ -146,7 +141,7 @@ class Authenticate(wsgi.Middleware): """Authenticate an EC2 request and add 'ec2.context' to WSGI environ.""" - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): # Read request signature and access id. try: @@ -195,7 +190,7 @@ class Requestify(wsgi.Middleware): super(Requestify, self).__init__(app) self.controller = utils.import_class(controller)() - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): non_args = ['Action', 'Signature', 'AWSAccessKeyId', 'SignatureMethod', 'SignatureVersion', 'Version', 'Timestamp'] @@ -203,6 +198,12 @@ class Requestify(wsgi.Middleware): try: # Raise KeyError if omitted action = req.params['Action'] + # Fix bug lp:720157 for older (version 1) clients + version = req.params['SignatureVersion'] + if int(version) == 1: + non_args.remove('SignatureMethod') + if 'SignatureMethod' in args: + args.pop('SignatureMethod') for non_arg in non_args: # Remove, but raise KeyError if omitted args.pop(non_arg) @@ -233,7 +234,7 @@ class Authorizer(wsgi.Middleware): super(Authorizer, self).__init__(application) self.action_roles = { 'CloudController': { - 'DescribeAvailabilityzones': ['all'], + 'DescribeAvailabilityZones': ['all'], 'DescribeRegions': ['all'], 'DescribeSnapshots': ['all'], 'DescribeKeyPairs': ['all'], @@ -274,7 +275,7 @@ class Authorizer(wsgi.Middleware): }, } - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): context = req.environ['ec2.context'] controller = req.environ['ec2.request'].controller.__class__.__name__ @@ -295,7 +296,7 @@ class Authorizer(wsgi.Middleware): return True if 'none' in roles: return False - return any(context.project.has_role(context.user.id, role) + return any(context.project.has_role(context.user_id, role) for role in roles) @@ -308,7 +309,7 @@ class Executor(wsgi.Application): response, or a 400 upon failure. """ - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): context = req.environ['ec2.context'] api_request = req.environ['ec2.request'] @@ -370,7 +371,7 @@ class Executor(wsgi.Application): class Versions(wsgi.Application): - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): """Respond to a request for all EC2 versions.""" # available api versions diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py index 735951082..d9a4ef999 100644 --- a/nova/api/ec2/admin.py +++ b/nova/api/ec2/admin.py @@ -21,14 +21,17 @@ Admin API controller, exposed through http via the api worker. """ import base64 +import datetime from nova import db from nova import exception +from nova import flags from nova import log as logging +from nova import utils from nova.auth import manager -from nova.compute import instance_types +FLAGS = flags.FLAGS LOG = logging.getLogger('nova.api.ec2.admin') @@ -55,22 +58,54 @@ def project_dict(project): return {} -def host_dict(host): +def host_dict(host, compute_service, instances, volume_service, volumes, now): """Convert a host model object to a result dict""" - if host: - return host.state - else: - return {} + rv = {'hostanme': host, 'instance_count': len(instances), + 'volume_count': len(volumes)} + if compute_service: + latest = compute_service['updated_at'] or compute_service['created_at'] + delta = now - latest + if delta.seconds <= FLAGS.service_down_time: + rv['compute'] = 'up' + else: + rv['compute'] = 'down' + if volume_service: + latest = volume_service['updated_at'] or volume_service['created_at'] + delta = now - latest + if delta.seconds <= FLAGS.service_down_time: + rv['volume'] = 'up' + else: + rv['volume'] = 'down' + return rv -def instance_dict(name, inst): - return {'name': name, +def instance_dict(inst): + return {'name': inst['name'], 'memory_mb': inst['memory_mb'], 'vcpus': inst['vcpus'], 'disk_gb': inst['local_gb'], 'flavor_id': inst['flavorid']} +def vpn_dict(project, vpn_instance): + rv = {'project_id': project.id, + 'public_ip': project.vpn_ip, + 'public_port': project.vpn_port} + if vpn_instance: + rv['instance_id'] = vpn_instance['ec2_id'] + rv['created_at'] = utils.isotime(vpn_instance['created_at']) + address = vpn_instance.get('fixed_ip', None) + if address: + rv['internal_ip'] = address['address'] + if utils.vpn_ping(project.vpn_ip, project.vpn_port): + rv['state'] = 'running' + else: + rv['state'] = 'down' + else: + rv['state'] = 'pending' + return rv + + class AdminController(object): """ API Controller for users, hosts, nodes, and workers. @@ -79,9 +114,9 @@ class AdminController(object): def __str__(self): return 'AdminController' - def describe_instance_types(self, _context, **_kwargs): - return {'instanceTypeSet': [instance_dict(n, v) for n, v in - instance_types.INSTANCE_TYPES.iteritems()]} + def describe_instance_types(self, context, **_kwargs): + """Returns all active instance types data (vcpus, memory, etc.)""" + return {'instanceTypeSet': [db.instance_type_get_all(context)]} def describe_user(self, _context, name, **_kwargs): """Returns user data, including access and secret keys.""" @@ -223,19 +258,68 @@ class AdminController(object): raise exception.ApiError(_('operation must be add or remove')) return True + def _vpn_for(self, context, project_id): + """Get the VPN instance for a project ID.""" + for instance in db.instance_get_all_by_project(context, project_id): + if (instance['image_id'] == FLAGS.vpn_image_id + and not instance['state_description'] in + ['shutting_down', 'shutdown']): + return instance + + def start_vpn(self, context, project): + instance = self._vpn_for(context, project) + if not instance: + # NOTE(vish) import delayed because of __init__.py + from nova.cloudpipe import pipelib + pipe = pipelib.CloudPipe() + try: + pipe.launch_vpn_instance(project) + except db.NoMoreNetworks: + raise exception.ApiError("Unable to claim IP for VPN instance" + ", ensure it isn't running, and try " + "again in a few minutes") + instance = self._vpn_for(context, project) + return {'instance_id': instance['ec2_id']} + + def describe_vpns(self, context): + vpns = [] + for project in manager.AuthManager().get_projects(): + instance = self._vpn_for(context, project.id) + vpns.append(vpn_dict(project, instance)) + return {'items': vpns} + # FIXME(vish): these host commands don't work yet, perhaps some of the # required data can be retrieved from service objects? - def describe_hosts(self, _context, **_kwargs): + def describe_hosts(self, context, **_kwargs): """Returns status info for all nodes. Includes: - * Disk Space - * Instance List - * RAM used - * CPU used - * DHCP servers running - * Iptables / bridges + * Hostname + * Compute (up, down, None) + * Instance count + * Volume (up, down, None) + * Volume Count """ - return {'hostSet': [host_dict(h) for h in db.host_get_all()]} + services = db.service_get_all(context) + now = datetime.datetime.utcnow() + hosts = [] + rv = [] + for host in [service['host'] for service in services]: + if not host in hosts: + hosts.append(host) + for host in hosts: + compute = [s for s in services if s['host'] == host \ + and s['binary'] == 'nova-compute'] + if compute: + compute = compute[0] + instances = db.instance_get_all_by_host(context, host) + volume = [s for s in services if s['host'] == host \ + and s['binary'] == 'nova-volume'] + if volume: + volume = volume[0] + volumes = db.volume_get_all_by_host(context, host) + rv.append(host_dict(host, compute, instances, volume, volumes, + now)) + return {'hosts': rv} def describe_host(self, _context, name, **_kwargs): """Returns status info for single node.""" diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py index 7e72d67fb..d7ad08d2f 100644 --- a/nova/api/ec2/apirequest.py +++ b/nova/api/ec2/apirequest.py @@ -20,6 +20,7 @@ APIRequest class """ +import datetime import re # TODO(termie): replace minidom with etree from xml.dom import minidom @@ -45,8 +46,29 @@ def _underscore_to_xmlcase(str): return res[:1].lower() + res[1:] +def _database_to_isoformat(datetimeobj): + """Return a xs:dateTime parsable string from datatime""" + return datetimeobj.strftime("%Y-%m-%dT%H:%M:%SZ") + + def _try_convert(value): - """Return a non-string if possible""" + """Return a non-string from a string or unicode, if possible. + + ============= ===================================================== + When value is returns + ============= ===================================================== + zero-length '' + 'None' None + 'True' True + 'False' False + '0', '-0' 0 + 0xN, -0xN int from hex (postitive) (N is any number) + 0bN, -0bN int from binary (positive) (N is any number) + * try conversion to int, float, complex, fallback value + + """ + if len(value) == 0: + return '' if value == 'None': return None if value == 'True': @@ -171,6 +193,9 @@ class APIRequest(object): self._render_dict(xml, data_el, data.__dict__) elif isinstance(data, bool): data_el.appendChild(xml.createTextNode(str(data).lower())) + elif isinstance(data, datetime.datetime): + data_el.appendChild( + xml.createTextNode(_database_to_isoformat(data))) elif data != None: data_el.appendChild(xml.createTextNode(str(data))) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 00d044e95..b1917e9ea 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -39,7 +39,9 @@ from nova import log as logging from nova import network from nova import utils from nova import volume +from nova.api.ec2 import ec2utils from nova.compute import instance_types +from nova.image import s3 FLAGS = flags.FLAGS @@ -73,30 +75,19 @@ def _gen_key(context, user_id, key_name): return {'private_key': private_key, 'fingerprint': fingerprint} -def ec2_id_to_id(ec2_id): - """Convert an ec2 ID (i-[base 16 number]) to an instance id (int)""" - return int(ec2_id.split('-')[-1], 16) - - -def id_to_ec2_id(instance_id, template='i-%08x'): - """Convert an instance ID (int) to an ec2 ID (i-[base 16 number])""" - return template % instance_id - - class CloudController(object): """ CloudController provides the critical dispatch between inbound API calls through the endpoint and messages sent to the other nodes. """ def __init__(self): - self.image_service = utils.import_object(FLAGS.image_service) + self.image_service = s3.S3ImageService() self.network_api = network.API() self.volume_api = volume.API() self.compute_api = compute.API( network_api=self.network_api, - image_service=self.image_service, volume_api=self.volume_api, - hostname_factory=id_to_ec2_id) + hostname_factory=ec2utils.id_to_ec2_id) self.setup() def __str__(self): @@ -115,7 +106,7 @@ class CloudController(object): start = os.getcwd() os.chdir(FLAGS.ca_path) # TODO(vish): Do this with M2Crypto instead - utils.runthis(_("Generating root CA: %s"), "sh genrootca.sh") + utils.runthis(_("Generating root CA: %s"), "sh", "genrootca.sh") os.chdir(start) def _get_mpi_data(self, context, project_id): @@ -154,11 +145,14 @@ class CloudController(object): availability_zone = self._get_availability_zone_by_host(ctxt, host) floating_ip = db.instance_get_floating_address(ctxt, instance_ref['id']) - ec2_id = id_to_ec2_id(instance_ref['id']) + ec2_id = ec2utils.id_to_ec2_id(instance_ref['id']) + image_ec2_id = self._image_ec2_id(instance_ref['image_id'], 'machine') + k_ec2_id = self._image_ec2_id(instance_ref['kernel_id'], 'kernel') + r_ec2_id = self._image_ec2_id(instance_ref['ramdisk_id'], 'ramdisk') data = { 'user-data': base64.b64decode(instance_ref['user_data']), 'meta-data': { - 'ami-id': instance_ref['image_id'], + 'ami-id': image_ec2_id, 'ami-launch-index': instance_ref['launch_index'], 'ami-manifest-path': 'FIXME', 'block-device-mapping': { @@ -173,12 +167,12 @@ class CloudController(object): 'instance-type': instance_ref['instance_type'], 'local-hostname': hostname, 'local-ipv4': address, - 'kernel-id': instance_ref['kernel_id'], + 'kernel-id': k_ec2_id, + 'ramdisk-id': r_ec2_id, 'placement': {'availability-zone': availability_zone}, 'public-hostname': hostname, 'public-ipv4': floating_ip or '', 'public-keys': keys, - 'ramdisk-id': instance_ref['ramdisk_id'], 'reservation-id': instance_ref['reservation_id'], 'security-groups': '', 'mpi': mpi}} @@ -198,8 +192,9 @@ class CloudController(object): return self._describe_availability_zones(context, **kwargs) def _describe_availability_zones(self, context, **kwargs): - enabled_services = db.service_get_all(context) - disabled_services = db.service_get_all(context, True) + ctxt = context.elevated() + enabled_services = db.service_get_all(ctxt) + disabled_services = db.service_get_all(ctxt, True) available_zones = [] for zone in [service.availability_zone for service in enabled_services]: @@ -282,7 +277,7 @@ class CloudController(object): 'description': 'fixme'}]} def describe_key_pairs(self, context, key_name=None, **kwargs): - key_pairs = db.key_pair_get_all_by_user(context, context.user.id) + key_pairs = db.key_pair_get_all_by_user(context, context.user_id) if not key_name is None: key_pairs = [x for x in key_pairs if x['name'] in key_name] @@ -290,18 +285,18 @@ class CloudController(object): for key_pair in key_pairs: # filter out the vpn keys suffix = FLAGS.vpn_key_suffix - if context.user.is_admin() or \ + if context.is_admin or \ not key_pair['name'].endswith(suffix): result.append({ 'keyName': key_pair['name'], 'keyFingerprint': key_pair['fingerprint'], }) - return {'keypairsSet': result} + return {'keySet': result} def create_key_pair(self, context, key_name, **kwargs): LOG.audit(_("Create key pair %s"), key_name, context=context) - data = _gen_key(context, context.user.id, key_name) + data = _gen_key(context, context.user_id, key_name) return {'keyName': key_name, 'keyFingerprint': data['fingerprint'], 'keyMaterial': data['private_key']} @@ -310,7 +305,7 @@ class CloudController(object): def delete_key_pair(self, context, key_name, **kwargs): LOG.audit(_("Delete key pair %s"), key_name, context=context) try: - db.key_pair_destroy(context, context.user.id, key_name) + db.key_pair_destroy(context, context.user_id, key_name) except exception.NotFound: # aws returns true even if the key doesn't exist pass @@ -318,16 +313,23 @@ class CloudController(object): def describe_security_groups(self, context, group_name=None, **kwargs): self.compute_api.ensure_default_security_group(context) - if context.user.is_admin(): + if group_name: + groups = [] + for name in group_name: + group = db.security_group_get_by_name(context, + context.project_id, + name) + groups.append(group) + elif context.is_admin: groups = db.security_group_get_all(context) else: groups = db.security_group_get_by_project(context, context.project_id) groups = [self._format_security_group(context, g) for g in groups] - if not group_name is None: - groups = [g for g in groups if g.name in group_name] - return {'securityGroupInfo': groups} + return {'securityGroupInfo': + list(sorted(groups, + key=lambda k: (k['ownerId'], k['groupName'])))} def _format_security_group(self, context, group): g = {} @@ -492,7 +494,7 @@ class CloudController(object): if db.security_group_exists(context, context.project_id, group_name): raise exception.ApiError(_('group %s already exists') % group_name) - group = {'user_id': context.user.id, + group = {'user_id': context.user_id, 'project_id': context.project_id, 'name': group_name, 'description': group_description} @@ -512,9 +514,12 @@ class CloudController(object): def get_console_output(self, context, instance_id, **kwargs): LOG.audit(_("Get console output for instance %s"), instance_id, context=context) - # instance_id is passed in as a list of instances - ec2_id = instance_id[0] - instance_id = ec2_id_to_id(ec2_id) + # instance_id may be passed in as a list of instances + if type(instance_id) == list: + ec2_id = instance_id[0] + else: + ec2_id = instance_id + instance_id = ec2utils.ec2_id_to_id(ec2_id) output = self.compute_api.get_console_output( context, instance_id=instance_id) now = datetime.datetime.utcnow() @@ -524,14 +529,15 @@ class CloudController(object): def get_ajax_console(self, context, instance_id, **kwargs): ec2_id = instance_id[0] - internal_id = ec2_id_to_id(ec2_id) - return self.compute_api.get_ajax_console(context, internal_id) + instance_id = ec2utils.ec2_id_to_id(ec2_id) + return self.compute_api.get_ajax_console(context, + instance_id=instance_id) def describe_volumes(self, context, volume_id=None, **kwargs): if volume_id: volumes = [] for ec2_id in volume_id: - internal_id = ec2_id_to_id(ec2_id) + internal_id = ec2utils.ec2_id_to_id(ec2_id) volume = self.volume_api.get(context, internal_id) volumes.append(volume) else: @@ -544,11 +550,11 @@ class CloudController(object): instance_data = None if volume.get('instance', None): instance_id = volume['instance']['id'] - instance_ec2_id = id_to_ec2_id(instance_id) + instance_ec2_id = ec2utils.id_to_ec2_id(instance_id) instance_data = '%s[%s]' % (instance_ec2_id, volume['instance']['host']) v = {} - v['volumeId'] = id_to_ec2_id(volume['id'], 'vol-%08x') + v['volumeId'] = ec2utils.id_to_ec2_id(volume['id'], 'vol-%08x') v['status'] = volume['status'] v['size'] = volume['size'] v['availabilityZone'] = volume['availability_zone'] @@ -566,8 +572,7 @@ class CloudController(object): 'device': volume['mountpoint'], 'instanceId': instance_ec2_id, 'status': 'attached', - 'volumeId': id_to_ec2_id(volume['id'], - 'vol-%08x')}] + 'volumeId': v['volumeId']}] else: v['attachmentSet'] = [{}] @@ -586,12 +591,12 @@ class CloudController(object): return {'volumeSet': [self._format_volume(context, dict(volume))]} def delete_volume(self, context, volume_id, **kwargs): - volume_id = ec2_id_to_id(volume_id) + volume_id = ec2utils.ec2_id_to_id(volume_id) self.volume_api.delete(context, volume_id=volume_id) return True def update_volume(self, context, volume_id, **kwargs): - volume_id = ec2_id_to_id(volume_id) + volume_id = ec2utils.ec2_id_to_id(volume_id) updatable_fields = ['display_name', 'display_description'] changes = {} for field in updatable_fields: @@ -602,8 +607,8 @@ class CloudController(object): return True def attach_volume(self, context, volume_id, instance_id, device, **kwargs): - volume_id = ec2_id_to_id(volume_id) - instance_id = ec2_id_to_id(instance_id) + volume_id = ec2utils.ec2_id_to_id(volume_id) + instance_id = ec2utils.ec2_id_to_id(instance_id) msg = _("Attach volume %(volume_id)s to instance %(instance_id)s" " at %(device)s") % locals() LOG.audit(msg, context=context) @@ -614,22 +619,22 @@ class CloudController(object): volume = self.volume_api.get(context, volume_id) return {'attachTime': volume['attach_time'], 'device': volume['mountpoint'], - 'instanceId': id_to_ec2_id(instance_id), + 'instanceId': ec2utils.id_to_ec2_id(instance_id), 'requestId': context.request_id, 'status': volume['attach_status'], - 'volumeId': id_to_ec2_id(volume_id, 'vol-%08x')} + 'volumeId': ec2utils.id_to_ec2_id(volume_id, 'vol-%08x')} def detach_volume(self, context, volume_id, **kwargs): - volume_id = ec2_id_to_id(volume_id) + volume_id = ec2utils.ec2_id_to_id(volume_id) LOG.audit(_("Detach volume %s"), volume_id, context=context) volume = self.volume_api.get(context, volume_id) instance = self.compute_api.detach_volume(context, volume_id=volume_id) return {'attachTime': volume['attach_time'], 'device': volume['mountpoint'], - 'instanceId': id_to_ec2_id(instance['id']), + 'instanceId': ec2utils.id_to_ec2_id(instance['id']), 'requestId': context.request_id, 'status': volume['attach_status'], - 'volumeId': id_to_ec2_id(volume_id, 'vol-%08x')} + 'volumeId': ec2utils.id_to_ec2_id(volume_id, 'vol-%08x')} def _convert_to_set(self, lst, label): if lst == None or lst == []: @@ -663,20 +668,21 @@ class CloudController(object): if instance_id: instances = [] for ec2_id in instance_id: - internal_id = ec2_id_to_id(ec2_id) - instance = self.compute_api.get(context, internal_id) + internal_id = ec2utils.ec2_id_to_id(ec2_id) + instance = self.compute_api.get(context, + instance_id=internal_id) instances.append(instance) else: instances = self.compute_api.get_all(context, **kwargs) for instance in instances: - if not context.user.is_admin(): + if not context.is_admin: if instance['image_id'] == FLAGS.vpn_image_id: continue i = {} instance_id = instance['id'] - ec2_id = id_to_ec2_id(instance_id) + ec2_id = ec2utils.id_to_ec2_id(instance_id) i['instanceId'] = ec2_id - i['imageId'] = instance['image_id'] + i['imageId'] = self._image_ec2_id(instance['image_id']) i['instanceState'] = { 'code': instance['state'], 'name': instance['state_description']} @@ -697,7 +703,7 @@ class CloudController(object): i['dnsName'] = i['publicDnsName'] or i['privateDnsName'] i['keyName'] = instance['key_name'] - if context.user.is_admin(): + if context.is_admin: i['keyName'] = '%s (%s, %s)' % (i['keyName'], instance['project_id'], instance['host']) @@ -731,7 +737,7 @@ class CloudController(object): def format_addresses(self, context): addresses = [] - if context.user.is_admin(): + if context.is_admin: iterator = db.floating_ip_get_all(context) else: iterator = db.floating_ip_get_all_by_project(context, @@ -742,10 +748,10 @@ class CloudController(object): if (floating_ip_ref['fixed_ip'] and floating_ip_ref['fixed_ip']['instance']): instance_id = floating_ip_ref['fixed_ip']['instance']['id'] - ec2_id = id_to_ec2_id(instance_id) + ec2_id = ec2utils.id_to_ec2_id(instance_id) address_rv = {'public_ip': address, 'instance_id': ec2_id} - if context.user.is_admin(): + if context.is_admin: details = "%s (%s)" % (address_rv['instance_id'], floating_ip_ref['project_id']) address_rv['instance_id'] = details @@ -765,7 +771,7 @@ class CloudController(object): def associate_address(self, context, instance_id, public_ip, **kwargs): LOG.audit(_("Associate address %(public_ip)s to" " instance %(instance_id)s") % locals(), context=context) - instance_id = ec2_id_to_id(instance_id) + instance_id = ec2utils.ec2_id_to_id(instance_id) self.compute_api.associate_floating_ip(context, instance_id=instance_id, address=public_ip) @@ -778,13 +784,19 @@ class CloudController(object): def run_instances(self, context, **kwargs): max_count = int(kwargs.get('max_count', 1)) + if kwargs.get('kernel_id'): + kernel = self._get_image(context, kwargs['kernel_id']) + kwargs['kernel_id'] = kernel['id'] + if kwargs.get('ramdisk_id'): + ramdisk = self._get_image(context, kwargs['ramdisk_id']) + kwargs['ramdisk_id'] = ramdisk['id'] instances = self.compute_api.create(context, instance_type=instance_types.get_by_type( kwargs.get('instance_type', None)), - image_id=kwargs['image_id'], + image_id=self._get_image(context, kwargs['image_id'])['id'], min_count=int(kwargs.get('min_count', max_count)), max_count=max_count, - kernel_id=kwargs.get('kernel_id', None), + kernel_id=kwargs.get('kernel_id'), ramdisk_id=kwargs.get('ramdisk_id'), display_name=kwargs.get('display_name'), display_description=kwargs.get('display_description'), @@ -801,7 +813,7 @@ class CloudController(object): instance_id is a kwarg so its name cannot be modified.""" LOG.debug(_("Going to start terminating instances")) for ec2_id in instance_id: - instance_id = ec2_id_to_id(ec2_id) + instance_id = ec2utils.ec2_id_to_id(ec2_id) self.compute_api.delete(context, instance_id=instance_id) return True @@ -809,49 +821,103 @@ class CloudController(object): """instance_id is a list of instance ids""" LOG.audit(_("Reboot instance %r"), instance_id, context=context) for ec2_id in instance_id: - instance_id = ec2_id_to_id(ec2_id) + instance_id = ec2utils.ec2_id_to_id(ec2_id) self.compute_api.reboot(context, instance_id=instance_id) return True def rescue_instance(self, context, instance_id, **kwargs): """This is an extension to the normal ec2_api""" - instance_id = ec2_id_to_id(instance_id) + instance_id = ec2utils.ec2_id_to_id(instance_id) self.compute_api.rescue(context, instance_id=instance_id) return True def unrescue_instance(self, context, instance_id, **kwargs): """This is an extension to the normal ec2_api""" - instance_id = ec2_id_to_id(instance_id) + instance_id = ec2utils.ec2_id_to_id(instance_id) self.compute_api.unrescue(context, instance_id=instance_id) return True - def update_instance(self, context, ec2_id, **kwargs): + def update_instance(self, context, instance_id, **kwargs): updatable_fields = ['display_name', 'display_description'] changes = {} for field in updatable_fields: if field in kwargs: changes[field] = kwargs[field] if changes: - instance_id = ec2_id_to_id(ec2_id) + instance_id = ec2utils.ec2_id_to_id(instance_id) self.compute_api.update(context, instance_id=instance_id, **kwargs) return True + _type_prefix_map = {'machine': 'ami', + 'kernel': 'aki', + 'ramdisk': 'ari'} + + def _image_ec2_id(self, image_id, image_type='machine'): + prefix = self._type_prefix_map[image_type] + template = prefix + '-%08x' + return ec2utils.id_to_ec2_id(int(image_id), template=template) + + def _get_image(self, context, ec2_id): + try: + internal_id = ec2utils.ec2_id_to_id(ec2_id) + return self.image_service.show(context, internal_id) + except exception.NotFound: + return self.image_service.show_by_name(context, ec2_id) + + def _format_image(self, image): + """Convert from format defined by BaseImageService to S3 format.""" + i = {} + image_type = image['properties'].get('type') + ec2_id = self._image_ec2_id(image.get('id'), image_type) + name = image.get('name') + if name: + i['imageId'] = "%s (%s)" % (ec2_id, name) + else: + i['imageId'] = ec2_id + kernel_id = image['properties'].get('kernel_id') + if kernel_id: + i['kernelId'] = self._image_ec2_id(kernel_id, 'kernel') + ramdisk_id = image['properties'].get('ramdisk_id') + if ramdisk_id: + i['ramdiskId'] = self._image_ec2_id(ramdisk_id, 'ramdisk') + i['imageOwnerId'] = image['properties'].get('owner_id') + i['imageLocation'] = image['properties'].get('image_location') + i['imageState'] = image['properties'].get('image_state') + i['type'] = image_type + i['isPublic'] = str(image['properties'].get('is_public', '')) == 'True' + i['architecture'] = image['properties'].get('architecture') + return i + def describe_images(self, context, image_id=None, **kwargs): - # Note: image_id is a list! - images = self.image_service.index(context) + # NOTE: image_id is a list! if image_id: - images = filter(lambda x: x['imageId'] in image_id, images) + images = [] + for ec2_id in image_id: + try: + image = self._get_image(context, ec2_id) + except exception.NotFound: + raise exception.NotFound(_('Image %s not found') % + ec2_id) + images.append(image) + else: + images = self.image_service.detail(context) + images = [self._format_image(i) for i in images] return {'imagesSet': images} def deregister_image(self, context, image_id, **kwargs): LOG.audit(_("De-registering image %s"), image_id, context=context) - self.image_service.deregister(context, image_id) + image = self._get_image(context, image_id) + internal_id = image['id'] + self.image_service.delete(context, internal_id) return {'imageId': image_id} def register_image(self, context, image_location=None, **kwargs): if image_location is None and 'name' in kwargs: image_location = kwargs['name'] - image_id = self.image_service.register(context, image_location) + metadata = {'properties': {'image_location': image_location}} + image = self.image_service.create(context, metadata) + image_id = self._image_ec2_id(image['id'], + image['properties']['type']) msg = _("Registered image %(image_location)s with" " id %(image_id)s") % locals() LOG.audit(msg, context=context) @@ -862,11 +928,11 @@ class CloudController(object): raise exception.ApiError(_('attribute not supported: %s') % attribute) try: - image = self.image_service.show(context, image_id) - except IndexError: - raise exception.ApiError(_('invalid id: %s') % image_id) - result = {'image_id': image_id, 'launchPermission': []} - if image['isPublic']: + image = self._get_image(context, image_id) + except exception.NotFound: + raise exception.NotFound(_('Image %s not found') % image_id) + result = {'imageId': image_id, 'launchPermission': []} + if image['properties']['is_public']: result['launchPermission'].append({'group': 'all'}) return result @@ -883,8 +949,18 @@ class CloudController(object): if not operation_type in ['add', 'remove']: raise exception.ApiError(_('operation_type must be add or remove')) LOG.audit(_("Updating image %s publicity"), image_id, context=context) - return self.image_service.modify(context, image_id, operation_type) + + try: + image = self._get_image(context, image_id) + except exception.NotFound: + raise exception.NotFound(_('Image %s not found') % image_id) + internal_id = image['id'] + del(image['id']) + raise Exception(image) + image['properties']['is_public'] = (operation_type == 'add') + return self.image_service.update(context, internal_id, image) def update_image(self, context, image_id, **kwargs): - result = self.image_service.update(context, image_id, dict(kwargs)) + internal_id = ec2utils.ec2_id_to_id(image_id) + result = self.image_service.update(context, internal_id, dict(kwargs)) return result diff --git a/contrib/puppet/files/production/genvpn.sh b/nova/api/ec2/ec2utils.py index 538c3cd33..3b34f6ea5 100644 --- a/contrib/puppet/files/production/genvpn.sh +++ b/nova/api/ec2/ec2utils.py @@ -1,4 +1,3 @@ -#!/bin/bash # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the @@ -17,19 +16,17 @@ # License for the specific language governing permissions and limitations # under the License. -# This gets zipped and run on the cloudpipe-managed OpenVPN server -NAME=$1 -SUBJ=$2 +from nova import exception -mkdir -p projects/$NAME -cd projects/$NAME -# generate a server priv key -openssl genrsa -out server.key 2048 +def ec2_id_to_id(ec2_id): + """Convert an ec2 ID (i-[base 16 number]) to an instance id (int)""" + try: + return int(ec2_id.split('-')[-1], 16) + except ValueError: + raise exception.NotFound(_("Id %s Not Found") % ec2_id) -# generate a server CSR -openssl req -new -key server.key -out server.csr -batch -subj "$SUBJ" -if [ "`id -u`" != "`grep nova /etc/passwd | cut -d':' -f3`" ]; then - sudo chown -R nova:nogroup . -fi +def id_to_ec2_id(instance_id, template='i-%08x'): + """Convert an instance ID (int) to an ec2 ID (i-[base 16 number])""" + return template % instance_id diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py index 6fb441656..28f99b0ef 100644 --- a/nova/api/ec2/metadatarequesthandler.py +++ b/nova/api/ec2/metadatarequesthandler.py @@ -65,7 +65,7 @@ class MetadataRequestHandler(wsgi.Application): data = data[item] return data - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): cc = cloud.CloudController() remote_address = req.remote_addr diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index 056c7dd27..ab9dbb780 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -34,6 +34,7 @@ from nova.api.openstack import flavors from nova.api.openstack import images from nova.api.openstack import servers from nova.api.openstack import shared_ip_groups +from nova.api.openstack import zones LOG = logging.getLogger('nova.api.openstack') @@ -46,7 +47,7 @@ flags.DEFINE_bool('allow_admin_api', class FaultWrapper(wsgi.Middleware): """Calls down the middleware stack, making exceptions into faults.""" - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): try: return req.get_response(self.application) @@ -73,12 +74,20 @@ class APIRouter(wsgi.Router): server_members = {'action': 'POST'} if FLAGS.allow_admin_api: LOG.debug(_("Including admin operations in API.")) + server_members['pause'] = 'POST' server_members['unpause'] = 'POST' - server_members["diagnostics"] = "GET" - server_members["actions"] = "GET" + server_members['diagnostics'] = 'GET' + server_members['actions'] = 'GET' server_members['suspend'] = 'POST' server_members['resume'] = 'POST' + server_members['rescue'] = 'POST' + server_members['unrescue'] = 'POST' + server_members['reset_network'] = 'POST' + server_members['inject_network_info'] = 'POST' + + mapper.resource("zone", "zones", controller=zones.Controller(), + collection={'detail': 'GET', 'info': 'GET'}), mapper.resource("server", "servers", controller=servers.Controller(), collection={'detail': 'GET'}, @@ -106,7 +115,7 @@ class APIRouter(wsgi.Router): class Versions(wsgi.Application): - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): """Respond to a request for all OpenStack API versions.""" response = { @@ -115,4 +124,6 @@ class Versions(wsgi.Application): metadata = { "application/xml": { "attributes": dict(version=["status", "id"])}} - return wsgi.Serializer(req.environ, metadata).to_content_type(response) + + content_type = req.best_match_content_type() + return wsgi.Serializer(metadata).serialize(response, content_type) diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index 1dfdd5318..de8905f46 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -26,6 +26,7 @@ import webob.dec from nova import auth from nova import context from nova import db +from nova import exception from nova import flags from nova import manager from nova import utils @@ -45,7 +46,7 @@ class AuthMiddleware(wsgi.Middleware): self.auth = auth.manager.AuthManager() super(AuthMiddleware, self).__init__(application) - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): if not self.has_authentication(req): return self.authenticate(req) @@ -103,11 +104,14 @@ class AuthMiddleware(wsgi.Middleware): 2 days ago. """ ctxt = context.get_admin_context() - token = self.db.auth_get_token(ctxt, token_hash) + try: + token = self.db.auth_token_get(ctxt, token_hash) + except exception.NotFound: + return None if token: delta = datetime.datetime.now() - token.created_at if delta.days >= 2: - self.db.auth_destroy_token(ctxt, token) + self.db.auth_token_destroy(ctxt, token.token_hash) else: return self.auth.get_user(token.user_id) return None @@ -117,7 +121,7 @@ class AuthMiddleware(wsgi.Middleware): username - string key - string API key - req - webob.Request object + req - wsgi.Request object """ ctxt = context.get_admin_context() user = self.auth.get_user_from_access_key(key) @@ -131,6 +135,6 @@ class AuthMiddleware(wsgi.Middleware): token_dict['server_management_url'] = req.url token_dict['storage_url'] = '' token_dict['user_id'] = user.id - token = self.db.auth_create_token(ctxt, token_dict) + token = self.db.auth_token_create(ctxt, token_dict) return token, user return None, None diff --git a/nova/api/openstack/backup_schedules.py b/nova/api/openstack/backup_schedules.py index 197125d86..7abb5f884 100644 --- a/nova/api/openstack/backup_schedules.py +++ b/nova/api/openstack/backup_schedules.py @@ -15,7 +15,6 @@ # License for the specific language governing permissions and limitations # under the License. -import logging import time from webob import exc diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index 6d2fa16e8..74ac21024 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -15,25 +15,41 @@ # License for the specific language governing permissions and limitations # under the License. -from nova import exception +import webob.exc +from nova import exception -def limited(items, req): - """Return a slice of items according to requested offset and limit. - items - a sliceable - req - wobob.Request possibly containing offset and limit GET variables. - offset is where to start in the list, and limit is the maximum number - of items to return. +def limited(items, request, max_limit=1000): + """ + Return a slice of items according to requested offset and limit. - If limit is not specified, 0, or > 1000, defaults to 1000. + @param items: A sliceable entity + @param request: `wsgi.Request` possibly containing 'offset' and 'limit' + GET variables. 'offset' is where to start in the list, + and 'limit' is the maximum number of items to return. If + 'limit' is not specified, 0, or > max_limit, we default + to max_limit. Negative values for either offset or limit + will cause exc.HTTPBadRequest() exceptions to be raised. + @kwarg max_limit: The maximum number of items to return from 'items' """ + try: + offset = int(request.GET.get('offset', 0)) + except ValueError: + raise webob.exc.HTTPBadRequest(_('offset param must be an integer')) + + try: + limit = int(request.GET.get('limit', max_limit)) + except ValueError: + raise webob.exc.HTTPBadRequest(_('limit param must be an integer')) + + if limit < 0: + raise webob.exc.HTTPBadRequest(_('limit param must be positive')) + + if offset < 0: + raise webob.exc.HTTPBadRequest(_('offset param must be positive')) - offset = int(req.GET.get('offset', 0)) - limit = int(req.GET.get('limit', 0)) - if not limit: - limit = 1000 - limit = min(1000, limit) + limit = min(max_limit, limit or max_limit) range_end = offset + limit return items[offset:range_end] diff --git a/nova/api/openstack/consoles.py b/nova/api/openstack/consoles.py index 9ebdbe710..8c291c2eb 100644 --- a/nova/api/openstack/consoles.py +++ b/nova/api/openstack/consoles.py @@ -65,7 +65,7 @@ class Controller(wsgi.Controller): def create(self, req, server_id): """Creates a new console""" - #info = self._deserialize(req.body, req) + #info = self._deserialize(req.body, req.get_content_type()) self.console_api.create_console( req.environ['nova.context'], int(server_id)) diff --git a/nova/api/openstack/faults.py b/nova/api/openstack/faults.py index 224a7ef0b..2fd733299 100644 --- a/nova/api/openstack/faults.py +++ b/nova/api/openstack/faults.py @@ -42,7 +42,7 @@ class Fault(webob.exc.HTTPException): """Create a Fault for the given webob.exc.exception.""" self.wrapped_exc = exception - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): """Generate a WSGI response based on the exception passed to ctor.""" # Replace the body with fault details. @@ -57,6 +57,7 @@ class Fault(webob.exc.HTTPException): fault_data[fault_name]['retryAfter'] = retry # 'code' is an attribute on the fault tag itself metadata = {'application/xml': {'attributes': {fault_name: 'code'}}} - serializer = wsgi.Serializer(req.environ, metadata) - self.wrapped_exc.body = serializer.to_content_type(fault_data) + serializer = wsgi.Serializer(metadata) + content_type = req.best_match_content_type() + self.wrapped_exc.body = serializer.serialize(fault_data, content_type) return self.wrapped_exc diff --git a/nova/api/openstack/flavors.py b/nova/api/openstack/flavors.py index f620d4107..f3d040ba3 100644 --- a/nova/api/openstack/flavors.py +++ b/nova/api/openstack/flavors.py @@ -17,6 +17,8 @@ from webob import exc +from nova import db +from nova import context from nova.api.openstack import faults from nova.api.openstack import common from nova.compute import instance_types @@ -39,19 +41,19 @@ class Controller(wsgi.Controller): def detail(self, req): """Return all flavors in detail.""" - items = [self.show(req, id)['flavor'] for id in self._all_ids()] - items = common.limited(items, req) + items = [self.show(req, id)['flavor'] for id in self._all_ids(req)] return dict(flavors=items) def show(self, req, id): """Return data about the given flavor id.""" - for name, val in instance_types.INSTANCE_TYPES.iteritems(): - if val['flavorid'] == int(id): - item = dict(ram=val['memory_mb'], disk=val['local_gb'], - id=val['flavorid'], name=name) - return dict(flavor=item) + ctxt = req.environ['nova.context'] + values = db.instance_type_get_by_flavor_id(ctxt, id) + return dict(flavor=values) raise faults.Fault(exc.HTTPNotFound()) - def _all_ids(self): + def _all_ids(self, req): """Return the list of all flavorids.""" - return [i['flavorid'] for i in instance_types.INSTANCE_TYPES.values()] + ctxt = req.environ['nova.context'] + inst_types = db.instance_type_get_all(ctxt) + flavor_ids = [inst_types[i]['flavorid'] for i in inst_types.keys()] + return sorted(flavor_ids) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 9d56bc508..98f0dd96b 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -15,8 +15,6 @@ # License for the specific language governing permissions and limitations # under the License. -import logging - from webob import exc from nova import compute @@ -153,7 +151,7 @@ class Controller(wsgi.Controller): def create(self, req): context = req.environ['nova.context'] - env = self._deserialize(req.body, req) + env = self._deserialize(req.body, req.get_content_type()) instance_id = env["image"]["serverId"] name = env["image"]["name"] diff --git a/nova/api/openstack/ratelimiting/__init__.py b/nova/api/openstack/ratelimiting/__init__.py index cbb4b897e..88ffc3246 100644 --- a/nova/api/openstack/ratelimiting/__init__.py +++ b/nova/api/openstack/ratelimiting/__init__.py @@ -57,7 +57,7 @@ class RateLimitingMiddleware(wsgi.Middleware): self.limiter = WSGIAppProxy(service_host) super(RateLimitingMiddleware, self).__init__(application) - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): """Rate limit the request. @@ -183,7 +183,7 @@ class WSGIApp(object): """Create the WSGI application using the given Limiter instance.""" self.limiter = limiter - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): parts = req.path_info.split('/') # format: /limiter/<username>/<urlencoded action> diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 17c5519a1..dc28a0782 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 OpenStack LLC. # All Rights Reserved. # @@ -15,6 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. +import hashlib import json import traceback @@ -35,7 +34,6 @@ import nova.api.openstack LOG = logging.getLogger('server') -LOG.setLevel(logging.DEBUG) FLAGS = flags.FLAGS @@ -53,7 +51,8 @@ def _translate_detail_keys(inst): power_state.PAUSED: 'paused', power_state.SHUTDOWN: 'active', power_state.SHUTOFF: 'active', - power_state.CRASHED: 'error'} + power_state.CRASHED: 'error', + power_state.FAILED: 'error'} inst_dict = {} mapped_keys = dict(status='state', imageId='image_id', @@ -64,8 +63,24 @@ def _translate_detail_keys(inst): inst_dict['status'] = power_mapping[inst_dict['status']] inst_dict['addresses'] = dict(public=[], private=[]) - inst_dict['metadata'] = {} + + # grab single private fixed ip + private_ips = utils.get_from_path(inst, 'fixed_ip/address') + inst_dict['addresses']['private'] = private_ips + + # grab all public floating ips + public_ips = utils.get_from_path(inst, 'fixed_ip/floating_ips/address') + inst_dict['addresses']['public'] = public_ips + + # Return the metadata as a dictionary + metadata = {} + for item in inst['metadata']: + metadata[item['key']] = item['value'] + inst_dict['metadata'] = metadata + inst_dict['hostId'] = '' + if inst['host']: + inst_dict['hostId'] = hashlib.sha224(inst['host']).hexdigest() return dict(server=inst_dict) @@ -83,7 +98,7 @@ class Controller(wsgi.Controller): 'application/xml': { "attributes": { "server": ["id", "imageId", "name", "flavorId", "hostId", - "status", "progress"]}}} + "status", "progress", "adminPass"]}}} def __init__(self): self.compute_api = compute.API() @@ -124,38 +139,35 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPNotFound()) return exc.HTTPAccepted() - def _get_kernel_ramdisk_from_image(self, req, image_id): - """ - Machine images are associated with Kernels and Ramdisk images via - metadata stored in Glance as 'image_properties' - """ - def lookup(param): - _image_id = image_id - try: - return image['properties'][param] - except KeyError: - raise exception.NotFound( - _("%(param)s property not found for image %(_image_id)s") % - locals()) - - image_id = str(image_id) - image = self._image_service.show(req.environ['nova.context'], image_id) - return lookup('kernel_id'), lookup('ramdisk_id') - def create(self, req): """ Creates a new server for a given user """ - env = self._deserialize(req.body, req) + env = self._deserialize(req.body, req.get_content_type()) if not env: return faults.Fault(exc.HTTPUnprocessableEntity()) - key_pair = auth_manager.AuthManager.get_key_pairs( - req.environ['nova.context'])[0] + context = req.environ['nova.context'] + key_pairs = auth_manager.AuthManager.get_key_pairs(context) + if not key_pairs: + raise exception.NotFound(_("No keypairs defined")) + key_pair = key_pairs[0] + image_id = common.get_image_id_from_image_hash(self._image_service, - req.environ['nova.context'], env['server']['imageId']) + context, env['server']['imageId']) kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( req, image_id) + + # Metadata is a list, not a Dictionary, because we allow duplicate keys + # (even though JSON can't encode this) + # In future, we may not allow duplicate keys. + # However, the CloudServers API is not definitive on this front, + # and we want to be compatible. + metadata = [] + if env['server'].get('metadata'): + for k, v in env['server']['metadata'].items(): + metadata.append({'key': k, 'value': v}) + instances = self.compute_api.create( - req.environ['nova.context'], + context, instance_types.get_by_flavor_id(env['server']['flavorId']), image_id, kernel_id=kernel_id, @@ -163,12 +175,24 @@ class Controller(wsgi.Controller): display_name=env['server']['name'], display_description=env['server']['name'], key_name=key_pair['name'], - key_data=key_pair['public_key']) - return _translate_keys(instances[0]) + key_data=key_pair['public_key'], + metadata=metadata, + onset_files=env.get('onset_files', [])) + + server = _translate_keys(instances[0]) + password = "%s%s" % (server['server']['name'][:4], + utils.generate_password(12)) + server['server']['adminPass'] = password + self.compute_api.set_admin_password(context, server['server']['id'], + password) + return server def update(self, req, id): """ Updates the server name or password """ - inst_dict = self._deserialize(req.body, req) + if len(req.body) == 0: + raise exc.HTTPUnprocessableEntity() + + inst_dict = self._deserialize(req.body, req.get_content_type()) if not inst_dict: return faults.Fault(exc.HTTPUnprocessableEntity()) @@ -189,10 +213,58 @@ class Controller(wsgi.Controller): return exc.HTTPNoContent() def action(self, req, id): - """ Multi-purpose method used to reboot, rebuild, and - resize a server """ - input_dict = self._deserialize(req.body, req) - #TODO(sandy): rebuild/resize not supported. + """Multi-purpose method used to reboot, rebuild, or + resize a server""" + + actions = { + 'reboot': self._action_reboot, + 'resize': self._action_resize, + 'confirmResize': self._action_confirm_resize, + 'revertResize': self._action_revert_resize, + 'rebuild': self._action_rebuild, + } + + input_dict = self._deserialize(req.body, req.get_content_type()) + for key in actions.keys(): + if key in input_dict: + return actions[key](input_dict, req, id) + return faults.Fault(exc.HTTPNotImplemented()) + + def _action_confirm_resize(self, input_dict, req, id): + try: + self.compute_api.confirm_resize(req.environ['nova.context'], id) + except Exception, e: + LOG.exception(_("Error in confirm-resize %s"), e) + return faults.Fault(exc.HTTPBadRequest()) + return exc.HTTPNoContent() + + def _action_revert_resize(self, input_dict, req, id): + try: + self.compute_api.revert_resize(req.environ['nova.context'], id) + except Exception, e: + LOG.exception(_("Error in revert-resize %s"), e) + return faults.Fault(exc.HTTPBadRequest()) + return exc.HTTPAccepted() + + def _action_rebuild(self, input_dict, req, id): + return faults.Fault(exc.HTTPNotImplemented()) + + def _action_resize(self, input_dict, req, id): + """ Resizes a given instance to the flavor size requested """ + try: + if 'resize' in input_dict and 'flavorId' in input_dict['resize']: + flavor_id = input_dict['resize']['flavorId'] + self.compute_api.resize(req.environ['nova.context'], id, + flavor_id) + else: + LOG.exception(_("Missing arguments for resize")) + return faults.Fault(exc.HTTPUnprocessableEntity()) + except Exception, e: + LOG.exception(_("Error in resize %s"), e) + return faults.Fault(exc.HTTPBadRequest()) + return faults.Fault(exc.HTTPAccepted()) + + def _action_reboot(self, input_dict, req, id): try: reboot_type = input_dict['reboot']['type'] except Exception: @@ -249,6 +321,34 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() + def reset_network(self, req, id): + """ + Reset networking on an instance (admin only). + + """ + context = req.environ['nova.context'] + try: + self.compute_api.reset_network(context, id) + except: + readable = traceback.format_exc() + LOG.exception(_("Compute.api::reset_network %s"), readable) + return faults.Fault(exc.HTTPUnprocessableEntity()) + return exc.HTTPAccepted() + + def inject_network_info(self, req, id): + """ + Inject network info for an instance (admin only). + + """ + context = req.environ['nova.context'] + try: + self.compute_api.inject_network_info(context, id) + except: + readable = traceback.format_exc() + LOG.exception(_("Compute.api::inject_network_info %s"), readable) + return faults.Fault(exc.HTTPUnprocessableEntity()) + return exc.HTTPAccepted() + def pause(self, req, id): """ Permit Admins to Pause the server. """ ctxt = req.environ['nova.context'] @@ -293,6 +393,28 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() + def rescue(self, req, id): + """Permit users to rescue the server.""" + context = req.environ["nova.context"] + try: + self.compute_api.rescue(context, id) + except: + readable = traceback.format_exc() + LOG.exception(_("compute.api::rescue %s"), readable) + return faults.Fault(exc.HTTPUnprocessableEntity()) + return exc.HTTPAccepted() + + def unrescue(self, req, id): + """Permit users to unrescue the server.""" + context = req.environ["nova.context"] + try: + self.compute_api.unrescue(context, id) + except: + readable = traceback.format_exc() + LOG.exception(_("compute.api::unrescue %s"), readable) + return faults.Fault(exc.HTTPUnprocessableEntity()) + return exc.HTTPAccepted() + def get_ajax_console(self, req, id): """ Returns a url to an instance's ajaxterm console. """ try: @@ -320,3 +442,37 @@ class Controller(wsgi.Controller): action=item.action, error=item.error)) return dict(actions=actions) + + def _get_kernel_ramdisk_from_image(self, req, image_id): + """Retrevies kernel and ramdisk IDs from Glance + + Only 'machine' (ami) type use kernel and ramdisk outside of the + image. + """ + # FIXME(sirp): Since we're retrieving the kernel_id from an + # image_property, this means only Glance is supported. + # The BaseImageService needs to expose a consistent way of accessing + # kernel_id and ramdisk_id + image = self._image_service.show(req.environ['nova.context'], image_id) + + if image['status'] != 'active': + raise exception.Invalid( + _("Cannot build from image %(image_id)s, status not active") % + locals()) + + if image['disk_format'] != 'ami': + return None, None + + try: + kernel_id = image['properties']['kernel_id'] + except KeyError: + raise exception.NotFound( + _("Kernel not found for image %(image_id)s") % locals()) + + try: + ramdisk_id = image['properties']['ramdisk_id'] + except KeyError: + raise exception.NotFound( + _("Ramdisk not found for image %(image_id)s") % locals()) + + return kernel_id, ramdisk_id diff --git a/nova/api/openstack/shared_ip_groups.py b/nova/api/openstack/shared_ip_groups.py index bd3cc23a8..5d78f9377 100644 --- a/nova/api/openstack/shared_ip_groups.py +++ b/nova/api/openstack/shared_ip_groups.py @@ -15,8 +15,6 @@ # License for the specific language governing permissions and limitations # under the License. -import logging - from webob import exc from nova import wsgi diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py new file mode 100644 index 000000000..8fe84275a --- /dev/null +++ b/nova/api/openstack/zones.py @@ -0,0 +1,95 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import common + +from nova import flags +from nova import wsgi +from nova import db +from nova.scheduler import api + + +FLAGS = flags.FLAGS + + +def _filter_keys(item, keys): + """ + Filters all model attributes except for keys + item is a dict + + """ + return dict((k, v) for k, v in item.iteritems() if k in keys) + + +def _exclude_keys(item, keys): + return dict((k, v) for k, v in item.iteritems() if k not in keys) + + +def _scrub_zone(zone): + return _filter_keys(zone, ('id', 'api_url')) + + +class Controller(wsgi.Controller): + + _serialization_metadata = { + 'application/xml': { + "attributes": { + "zone": ["id", "api_url", "name", "capabilities"]}}} + + def index(self, req): + """Return all zones in brief""" + # Ask the ZoneManager in the Scheduler for most recent data, + # or fall-back to the database ... + items = api.API().get_zone_list(req.environ['nova.context']) + if not items: + items = db.zone_get_all(req.environ['nova.context']) + + items = common.limited(items, req) + items = [_exclude_keys(item, ['username', 'password']) + for item in items] + return dict(zones=items) + + def detail(self, req): + """Return all zones in detail""" + return self.index(req) + + def info(self, req): + """Return name and capabilities for this zone.""" + return dict(zone=dict(name=FLAGS.zone_name, + capabilities=FLAGS.zone_capabilities)) + + def show(self, req, id): + """Return data about the given zone id""" + zone_id = int(id) + zone = db.zone_get(req.environ['nova.context'], zone_id) + return dict(zone=_scrub_zone(zone)) + + def delete(self, req, id): + zone_id = int(id) + db.zone_delete(req.environ['nova.context'], zone_id) + return {} + + def create(self, req): + context = req.environ['nova.context'] + env = self._deserialize(req.body, req.get_content_type()) + zone = db.zone_create(context, env["zone"]) + return dict(zone=_scrub_zone(zone)) + + def update(self, req, id): + context = req.environ['nova.context'] + env = self._deserialize(req.body, req.get_content_type()) + zone_id = int(id) + zone = db.zone_update(context, zone_id, env["zone"]) + return dict(zone=_scrub_zone(zone)) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index e652f1caa..5da7751a0 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -74,6 +74,25 @@ LOG = logging.getLogger("nova.ldapdriver") # in which we may want to change the interface a bit more. +def _clean(attr): + """Clean attr for insertion into ldap""" + if attr is None: + return None + if type(attr) is unicode: + return str(attr) + return attr + + +def sanitize(fn): + """Decorator to sanitize all args""" + def _wrapped(self, *args, **kwargs): + args = [_clean(x) for x in args] + kwargs = dict((k, _clean(v)) for (k, v) in kwargs) + return fn(self, *args, **kwargs) + _wrapped.func_name = fn.func_name + return _wrapped + + class LdapDriver(object): """Ldap Auth driver @@ -106,23 +125,27 @@ class LdapDriver(object): self.conn.unbind_s() return False + @sanitize def get_user(self, uid): """Retrieve user by id""" attr = self.__get_ldap_user(uid) return self.__to_user(attr) + @sanitize def get_user_from_access_key(self, access): """Retrieve user by access key""" query = '(accessKey=%s)' % access dn = FLAGS.ldap_user_subtree return self.__to_user(self.__find_object(dn, query)) + @sanitize def get_project(self, pid): """Retrieve project by id""" dn = self.__project_to_dn(pid) attr = self.__find_object(dn, LdapDriver.project_pattern) return self.__to_project(attr) + @sanitize def get_users(self): """Retrieve list of users""" attrs = self.__find_objects(FLAGS.ldap_user_subtree, @@ -134,6 +157,7 @@ class LdapDriver(object): users.append(user) return users + @sanitize def get_projects(self, uid=None): """Retrieve list of projects""" pattern = LdapDriver.project_pattern @@ -143,6 +167,7 @@ class LdapDriver(object): pattern) return [self.__to_project(attr) for attr in attrs] + @sanitize def create_user(self, name, access_key, secret_key, is_admin): """Create a user""" if self.__user_exists(name): @@ -196,6 +221,7 @@ class LdapDriver(object): self.conn.add_s(self.__uid_to_dn(name), attr) return self.__to_user(dict(attr)) + @sanitize def create_project(self, name, manager_uid, description=None, member_uids=None): """Create a project""" @@ -231,6 +257,7 @@ class LdapDriver(object): self.conn.add_s(dn, attr) return self.__to_project(dict(attr)) + @sanitize def modify_project(self, project_id, manager_uid=None, description=None): """Modify an existing project""" if not manager_uid and not description: @@ -249,21 +276,25 @@ class LdapDriver(object): dn = self.__project_to_dn(project_id) self.conn.modify_s(dn, attr) + @sanitize def add_to_project(self, uid, project_id): """Add user to project""" dn = self.__project_to_dn(project_id) return self.__add_to_group(uid, dn) + @sanitize def remove_from_project(self, uid, project_id): """Remove user from project""" dn = self.__project_to_dn(project_id) return self.__remove_from_group(uid, dn) + @sanitize def is_in_project(self, uid, project_id): """Check if user is in project""" dn = self.__project_to_dn(project_id) return self.__is_in_group(uid, dn) + @sanitize def has_role(self, uid, role, project_id=None): """Check if user has role @@ -273,6 +304,7 @@ class LdapDriver(object): role_dn = self.__role_to_dn(role, project_id) return self.__is_in_group(uid, role_dn) + @sanitize def add_role(self, uid, role, project_id=None): """Add role for user (or user and project)""" role_dn = self.__role_to_dn(role, project_id) @@ -283,11 +315,13 @@ class LdapDriver(object): else: return self.__add_to_group(uid, role_dn) + @sanitize def remove_role(self, uid, role, project_id=None): """Remove role for user (or user and project)""" role_dn = self.__role_to_dn(role, project_id) return self.__remove_from_group(uid, role_dn) + @sanitize def get_user_roles(self, uid, project_id=None): """Retrieve list of roles for user (or user and project)""" if project_id is None: @@ -307,6 +341,7 @@ class LdapDriver(object): roles = self.__find_objects(project_dn, query) return [role['cn'][0] for role in roles] + @sanitize def delete_user(self, uid): """Delete a user""" if not self.__user_exists(uid): @@ -332,12 +367,14 @@ class LdapDriver(object): # Delete entry self.conn.delete_s(self.__uid_to_dn(uid)) + @sanitize def delete_project(self, project_id): """Delete a project""" project_dn = self.__project_to_dn(project_id) self.__delete_roles(project_dn) self.__delete_group(project_dn) + @sanitize def modify_user(self, uid, access_key=None, secret_key=None, admin=None): """Modify an existing user""" if not access_key and not secret_key and admin is None: diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template index c53a4acdc..cda2ecc28 100644 --- a/nova/auth/novarc.template +++ b/nova/auth/novarc.template @@ -10,7 +10,6 @@ export NOVA_CERT=${NOVA_KEY_DIR}/%(nova)s export EUCALYPTUS_CERT=${NOVA_CERT} # euca-bundle-image seems to require this set alias ec2-bundle-image="ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_PRIVATE_KEY} --user 42 --ec2cert ${NOVA_CERT}" alias ec2-upload-bundle="ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}" -export CLOUD_SERVERS_API_KEY="%(access)s" -export CLOUD_SERVERS_USERNAME="%(user)s" -export CLOUD_SERVERS_URL="%(os)s" - +export NOVA_API_KEY="%(access)s" +export NOVA_USERNAME="%(user)s" +export NOVA_URL="%(os)s" diff --git a/nova/compute/api.py b/nova/compute/api.py index ac02dbcfa..f5638ba0b 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -67,10 +67,10 @@ class API(base.Base): """Get the network topic for an instance.""" try: instance = self.get(context, instance_id) - except exception.NotFound as e: + except exception.NotFound: LOG.warning(_("Instance %d was not found in get_network_topic"), instance_id) - raise e + raise host = instance['host'] if not host: @@ -85,11 +85,12 @@ class API(base.Base): min_count=1, max_count=1, display_name='', display_description='', key_name=None, key_data=None, security_group='default', - availability_zone=None, user_data=None): + availability_zone=None, user_data=None, metadata=[], + onset_files=None): """Create the number of instances requested if quota and other arguments check out ok.""" - type_data = instance_types.INSTANCE_TYPES[instance_type] + type_data = instance_types.get_instance_type(instance_type) num_instances = quota.allowed_instances(context, max_count, type_data) if num_instances < min_count: pid = context.project_id @@ -99,25 +100,48 @@ class API(base.Base): "run %s more instances of this type.") % num_instances, "InstanceLimitExceeded") - is_vpn = image_id == FLAGS.vpn_image_id - if not is_vpn: - image = self.image_service.show(context, image_id) - if kernel_id is None: - kernel_id = image.get('kernelId', None) - if ramdisk_id is None: - ramdisk_id = image.get('ramdiskId', None) - # No kernel and ramdisk for raw images - if kernel_id == str(FLAGS.null_kernel): - kernel_id = None - ramdisk_id = None - LOG.debug(_("Creating a raw instance")) - # Make sure we have access to kernel and ramdisk (if not raw) - logging.debug("Using Kernel=%s, Ramdisk=%s" % - (kernel_id, ramdisk_id)) - if kernel_id: - self.image_service.show(context, kernel_id) - if ramdisk_id: - self.image_service.show(context, ramdisk_id) + num_metadata = len(metadata) + quota_metadata = quota.allowed_metadata_items(context, num_metadata) + if quota_metadata < num_metadata: + pid = context.project_id + msg = (_("Quota exceeeded for %(pid)s," + " tried to set %(num_metadata)s metadata properties") + % locals()) + LOG.warn(msg) + raise quota.QuotaError(msg, "MetadataLimitExceeded") + + # Because metadata is stored in the DB, we hard-code the size limits + # In future, we may support more variable length strings, so we act + # as if this is quota-controlled for forwards compatibility + for metadata_item in metadata: + k = metadata_item['key'] + v = metadata_item['value'] + if len(k) > 255 or len(v) > 255: + pid = context.project_id + msg = (_("Quota exceeeded for %(pid)s," + " metadata property key or value too long") + % locals()) + LOG.warn(msg) + raise quota.QuotaError(msg, "MetadataLimitExceeded") + + image = self.image_service.show(context, image_id) + if kernel_id is None: + kernel_id = image['properties'].get('kernel_id', None) + if ramdisk_id is None: + ramdisk_id = image['properties'].get('ramdisk_id', None) + # FIXME(sirp): is there a way we can remove null_kernel? + # No kernel and ramdisk for raw images + if kernel_id == str(FLAGS.null_kernel): + kernel_id = None + ramdisk_id = None + LOG.debug(_("Creating a raw instance")) + # Make sure we have access to kernel and ramdisk (if not raw) + logging.debug("Using Kernel=%s, Ramdisk=%s" % + (kernel_id, ramdisk_id)) + if kernel_id: + self.image_service.show(context, kernel_id) + if ramdisk_id: + self.image_service.show(context, ramdisk_id) if security_group is None: security_group = ['default'] @@ -141,6 +165,7 @@ class API(base.Base): 'image_id': image_id, 'kernel_id': kernel_id or '', 'ramdisk_id': ramdisk_id or '', + 'state': 0, 'state_description': 'scheduling', 'user_id': context.user_id, 'project_id': context.project_id, @@ -155,8 +180,8 @@ class API(base.Base): 'key_name': key_name, 'key_data': key_data, 'locked': False, + 'metadata': metadata, 'availability_zone': availability_zone} - elevated = context.elevated() instances = [] LOG.debug(_("Going to run %s instances..."), num_instances) @@ -193,7 +218,8 @@ class API(base.Base): {"method": "run_instance", "args": {"topic": FLAGS.compute_topic, "instance_id": instance_id, - "availability_zone": availability_zone}}) + "availability_zone": availability_zone, + "onset_files": onset_files}}) for group_id in security_groups: self.trigger_security_group_members_refresh(elevated, group_id) @@ -293,13 +319,13 @@ class API(base.Base): LOG.debug(_("Going to try to terminate %s"), instance_id) try: instance = self.get(context, instance_id) - except exception.NotFound as e: - LOG.warning(_("Instance %d was not found during terminate"), + except exception.NotFound: + LOG.warning(_("Instance %s was not found during terminate"), instance_id) - raise e + raise if (instance['state_description'] == 'terminating'): - LOG.warning(_("Instance %d is already being terminated"), + LOG.warning(_("Instance %s is already being terminated"), instance_id) return @@ -379,6 +405,10 @@ class API(base.Base): kwargs = {'method': method, 'args': params} return rpc.call(context, queue, kwargs) + def _cast_scheduler_message(self, context, args): + """Generic handler for RPC calls to the scheduler""" + rpc.cast(context, FLAGS.scheduler_topic, args) + def snapshot(self, context, instance_id, name): """Snapshot the given instance. @@ -395,6 +425,45 @@ class API(base.Base): """Reboot the given instance.""" self._cast_compute_message('reboot_instance', context, instance_id) + def revert_resize(self, context, instance_id): + """Reverts a resize, deleting the 'new' instance in the process""" + context = context.elevated() + migration_ref = self.db.migration_get_by_instance_and_status(context, + instance_id, 'finished') + if not migration_ref: + raise exception.NotFound(_("No finished migrations found for " + "instance")) + + params = {'migration_id': migration_ref['id']} + self._cast_compute_message('revert_resize', context, instance_id, + migration_ref['dest_compute'], params=params) + + def confirm_resize(self, context, instance_id): + """Confirms a migration/resize, deleting the 'old' instance in the + process.""" + context = context.elevated() + migration_ref = self.db.migration_get_by_instance_and_status(context, + instance_id, 'finished') + if not migration_ref: + raise exception.NotFound(_("No finished migrations found for " + "instance")) + instance_ref = self.db.instance_get(context, instance_id) + params = {'migration_id': migration_ref['id']} + self._cast_compute_message('confirm_resize', context, instance_id, + migration_ref['source_compute'], params=params) + + self.db.migration_update(context, migration_id, + {'status': 'confirmed'}) + self.db.instance_update(context, instance_id, + {'host': migration_ref['dest_compute'], }) + + def resize(self, context, instance_id, flavor): + """Resize a running instance.""" + self._cast_scheduler_message(context, + {"method": "prep_resize", + "args": {"topic": FLAGS.compute_topic, + "instance_id": instance_id, }},) + def pause(self, context, instance_id): """Pause the given instance.""" self._cast_compute_message('pause_instance', context, instance_id) @@ -430,9 +499,14 @@ class API(base.Base): """Unrescue the given instance.""" self._cast_compute_message('unrescue_instance', context, instance_id) - def set_admin_password(self, context, instance_id): + def set_admin_password(self, context, instance_id, password=None): """Set the root/admin password for the given instance.""" - self._cast_compute_message('set_admin_password', context, instance_id) + self._cast_compute_message('set_admin_password', context, instance_id, + password) + + def inject_file(self, context, instance_id): + """Write a file to the given instance.""" + self._cast_compute_message('inject_file', context, instance_id) def get_ajax_console(self, context, instance_id): """Get a url to an AJAX Console""" @@ -444,7 +518,7 @@ class API(base.Base): {'method': 'authorize_ajax_console', 'args': {'token': output['token'], 'host': output['host'], 'port': output['port']}}) - return {'url': '%s?token=%s' % (FLAGS.ajax_console_proxy_url, + return {'url': '%s/?token=%s' % (FLAGS.ajax_console_proxy_url, output['token'])} def get_console_output(self, context, instance_id): @@ -466,6 +540,20 @@ class API(base.Base): instance = self.get(context, instance_id) return instance['locked'] + def reset_network(self, context, instance_id): + """ + Reset networking on the instance. + + """ + self._cast_compute_message('reset_network', context, instance_id) + + def inject_network_info(self, context, instance_id): + """ + Inject network info for the instance. + + """ + self._cast_compute_message('inject_network_info', context, instance_id) + def attach_volume(self, context, instance_id, volume_id, device): if not re.match("^/dev/[a-z]d[a-z]+$", device): raise exception.ApiError(_("Invalid device specified: %s. " diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py index 196d6a8df..fa02a5dfa 100644 --- a/nova/compute/instance_types.py +++ b/nova/compute/instance_types.py @@ -4,6 +4,7 @@ # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright (c) 2010 Citrix Systems, Inc. +# Copyright 2011 Ken Pepple # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -21,30 +22,120 @@ The built-in instance properties. """ -from nova import flags +from nova import context +from nova import db from nova import exception +from nova import flags +from nova import log as logging FLAGS = flags.FLAGS -INSTANCE_TYPES = { - 'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1), - 'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2), - 'm1.medium': dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3), - 'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4), - 'm1.xlarge': dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)} +LOG = logging.getLogger('nova.instance_types') + + +def create(name, memory, vcpus, local_gb, flavorid, swap=0, + rxtx_quota=0, rxtx_cap=0): + """Creates instance types / flavors + arguments: name memory vcpus local_gb flavorid swap rxtx_quota rxtx_cap + """ + for option in [memory, vcpus, local_gb, flavorid]: + try: + int(option) + except ValueError: + raise exception.InvalidInputException( + _("create arguments must be positive integers")) + if (int(memory) <= 0) or (int(vcpus) <= 0) or (int(local_gb) < 0): + raise exception.InvalidInputException( + _("create arguments must be positive integers")) + + try: + db.instance_type_create( + context.get_admin_context(), + dict(name=name, + memory_mb=memory, + vcpus=vcpus, + local_gb=local_gb, + flavorid=flavorid, + swap=swap, + rxtx_quota=rxtx_quota, + rxtx_cap=rxtx_cap)) + except exception.DBError, e: + LOG.exception(_('DB error: %s' % e)) + raise exception.ApiError(_("Cannot create instance type: %s" % name)) + + +def destroy(name): + """Marks instance types / flavors as deleted + arguments: name""" + if name == None: + raise exception.InvalidInputException(_("No instance type specified")) + else: + try: + db.instance_type_destroy(context.get_admin_context(), name) + except exception.NotFound: + LOG.exception(_('Instance type %s not found for deletion' % name)) + raise exception.ApiError(_("Unknown instance type: %s" % name)) + + +def purge(name): + """Removes instance types / flavors from database + arguments: name""" + if name == None: + raise exception.InvalidInputException(_("No instance type specified")) + else: + try: + db.instance_type_purge(context.get_admin_context(), name) + except exception.NotFound: + LOG.exception(_('Instance type %s not found for purge' % name)) + raise exception.ApiError(_("Unknown instance type: %s" % name)) + + +def get_all_types(inactive=0): + """Retrieves non-deleted instance_types. + Pass true as argument if you want deleted instance types returned also.""" + return db.instance_type_get_all(context.get_admin_context(), inactive) + + +def get_all_flavors(): + """retrieves non-deleted flavors. alias for instance_types.get_all_types(). + Pass true as argument if you want deleted instance types returned also.""" + return get_all_types(context.get_admin_context()) + + +def get_instance_type(name): + """Retrieves single instance type by name""" + if name is None: + return FLAGS.default_instance_type + try: + ctxt = context.get_admin_context() + inst_type = db.instance_type_get_by_name(ctxt, name) + return inst_type + except exception.DBError: + raise exception.ApiError(_("Unknown instance type: %s" % name)) def get_by_type(instance_type): - """Build instance data structure and save it to the data store.""" + """retrieve instance type name""" if instance_type is None: return FLAGS.default_instance_type - if instance_type not in INSTANCE_TYPES: - raise exception.ApiError(_("Unknown instance type: %s"), - instance_type) - return instance_type + + try: + ctxt = context.get_admin_context() + inst_type = db.instance_type_get_by_name(ctxt, instance_type) + return inst_type['name'] + except exception.DBError, e: + LOG.exception(_('DB error: %s' % e)) + raise exception.ApiError(_("Unknown instance type: %s" %\ + instance_type)) def get_by_flavor_id(flavor_id): - for instance_type, details in INSTANCE_TYPES.iteritems(): - if details['flavorid'] == flavor_id: - return instance_type - return FLAGS.default_instance_type + """retrieve instance type's name by flavor_id""" + if flavor_id is None: + return FLAGS.default_instance_type + try: + ctxt = context.get_admin_context() + flavor = db.instance_type_get_by_flavor_id(ctxt, flavor_id) + return flavor['name'] + except exception.DBError, e: + LOG.exception(_('DB error: %s' % e)) + raise exception.ApiError(_("Unknown flavor: %s" % flavor_id)) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index f4418af26..b35216dd3 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -34,6 +34,7 @@ terminating it. :func:`nova.utils.import_object` """ +import base64 import datetime import random import string @@ -127,10 +128,10 @@ class ComputeManager(manager.Manager): info = self.driver.get_info(instance_ref['name']) state = info['state'] except exception.NotFound: - state = power_state.NOSTATE + state = power_state.FAILED self.db.instance_set_state(context, instance_id, state) - def get_console_topic(self, context, **_kwargs): + def get_console_topic(self, context, **kwargs): """Retrieves the console host for a project on this host Currently this is just set in the flags for each compute host.""" @@ -139,7 +140,7 @@ class ComputeManager(manager.Manager): FLAGS.console_topic, FLAGS.console_host) - def get_network_topic(self, context, **_kwargs): + def get_network_topic(self, context, **kwargs): """Retrieves the network host for a project on this host""" # TODO(vish): This method should be memoized. This will make # the call to get_network_host cheaper, so that @@ -158,21 +159,22 @@ class ComputeManager(manager.Manager): @exception.wrap_exception def refresh_security_group_rules(self, context, - security_group_id, **_kwargs): + security_group_id, **kwargs): """This call passes straight through to the virtualization driver.""" return self.driver.refresh_security_group_rules(security_group_id) @exception.wrap_exception def refresh_security_group_members(self, context, - security_group_id, **_kwargs): + security_group_id, **kwargs): """This call passes straight through to the virtualization driver.""" return self.driver.refresh_security_group_members(security_group_id) @exception.wrap_exception - def run_instance(self, context, instance_id, **_kwargs): + def run_instance(self, context, instance_id, **kwargs): """Launch a new instance with specified options.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) + instance_ref.onset_files = kwargs.get('onset_files', []) if instance_ref['name'] in self.driver.list_instances(): raise exception.Error(_("Instance has already been created")) LOG.audit(_("instance %s: starting..."), instance_id, @@ -323,28 +325,43 @@ class ComputeManager(manager.Manager): """Set the root/admin password for an instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - if instance_ref['state'] != power_state.RUNNING: - logging.warn('trying to reset the password on a non-running ' - 'instance: %s (state: %s expected: %s)', - instance_ref['id'], - instance_ref['state'], - power_state.RUNNING) - - logging.debug('instance %s: setting admin password', + instance_id = instance_ref['id'] + instance_state = instance_ref['state'] + expected_state = power_state.RUNNING + if instance_state != expected_state: + LOG.warn(_('trying to reset the password on a non-running ' + 'instance: %(instance_id)s (state: %(instance_state)s ' + 'expected: %(expected_state)s)') % locals()) + LOG.audit(_('instance %s: setting admin password'), instance_ref['name']) if new_pass is None: # Generate a random password - new_pass = self._generate_password(FLAGS.password_length) - + new_pass = utils.generate_password(FLAGS.password_length) self.driver.set_admin_password(instance_ref, new_pass) self._update_state(context, instance_id) - def _generate_password(self, length=20): - """Generate a random sequence of letters and digits - to be used as a password. - """ - chrs = string.letters + string.digits - return "".join([random.choice(chrs) for i in xrange(length)]) + @exception.wrap_exception + @checks_instance_lock + def inject_file(self, context, instance_id, path, file_contents): + """Write a file to the specified path on an instance on this server""" + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + instance_id = instance_ref['id'] + instance_state = instance_ref['state'] + expected_state = power_state.RUNNING + if instance_state != expected_state: + LOG.warn(_('trying to inject a file into a non-running ' + 'instance: %(instance_id)s (state: %(instance_state)s ' + 'expected: %(expected_state)s)') % locals()) + # Files/paths *should* be base64-encoded at this point, but + # double-check to make sure. + b64_path = utils.ensure_b64_encoding(path) + b64_contents = utils.ensure_b64_encoding(file_contents) + plain_path = base64.b64decode(b64_path) + nm = instance_ref['name'] + msg = _('instance %(nm)s: injecting file to %(plain_path)s') % locals() + LOG.audit(msg) + self.driver.inject_file(instance_ref, b64_path, b64_contents) @exception.wrap_exception @checks_instance_lock @@ -353,12 +370,19 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) LOG.audit(_('instance %s: rescuing'), instance_id, context=context) - self.db.instance_set_state(context, - instance_id, - power_state.NOSTATE, - 'rescuing') + self.db.instance_set_state( + context, + instance_id, + power_state.NOSTATE, + 'rescuing') self.network_manager.setup_compute_network(context, instance_id) - self.driver.rescue(instance_ref) + self.driver.rescue( + instance_ref, + lambda result: self._update_state_callback( + self, + context, + instance_id, + result)) self._update_state(context, instance_id) @exception.wrap_exception @@ -368,11 +392,18 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) LOG.audit(_('instance %s: unrescuing'), instance_id, context=context) - self.db.instance_set_state(context, - instance_id, - power_state.NOSTATE, - 'unrescuing') - self.driver.unrescue(instance_ref) + self.db.instance_set_state( + context, + instance_id, + power_state.NOSTATE, + 'unrescuing') + self.driver.unrescue( + instance_ref, + lambda result: self._update_state_callback( + self, + context, + instance_id, + result)) self._update_state(context, instance_id) @staticmethod @@ -382,6 +413,110 @@ class ComputeManager(manager.Manager): @exception.wrap_exception @checks_instance_lock + def confirm_resize(self, context, instance_id, migration_id): + """Destroys the source instance""" + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + migration_ref = self.db.migration_get(context, migration_id) + self.driver.destroy(instance_ref) + + @exception.wrap_exception + @checks_instance_lock + def revert_resize(self, context, instance_id, migration_id): + """Destroys the new instance on the destination machine, + reverts the model changes, and powers on the old + instance on the source machine""" + instance_ref = self.db.instance_get(context, instance_id) + migration_ref = self.db.migration_get(context, migration_id) + + #TODO(mdietz): we may want to split these into separate methods. + if migration_ref['source_compute'] == FLAGS.host: + self.driver._start(instance_ref) + self.db.migration_update(context, migration_id, + {'status': 'reverted'}) + else: + self.driver.destroy(instance_ref) + topic = self.db.queue_get_for(context, FLAGS.compute_topic, + instance_ref['host']) + rpc.cast(context, topic, + {'method': 'revert_resize', + 'args': { + 'migration_id': migration_ref['id'], + 'instance_id': instance_id, }, + }) + + @exception.wrap_exception + @checks_instance_lock + def prep_resize(self, context, instance_id): + """Initiates the process of moving a running instance to another + host, possibly changing the RAM and disk size in the process""" + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + if instance_ref['host'] == FLAGS.host: + raise exception.Error(_( + 'Migration error: destination same as source!')) + + migration_ref = self.db.migration_create(context, + {'instance_id': instance_id, + 'source_compute': instance_ref['host'], + 'dest_compute': FLAGS.host, + 'dest_host': self.driver.get_host_ip_addr(), + 'status': 'pre-migrating'}) + LOG.audit(_('instance %s: migrating to '), instance_id, + context=context) + topic = self.db.queue_get_for(context, FLAGS.compute_topic, + instance_ref['host']) + rpc.cast(context, topic, + {'method': 'resize_instance', + 'args': { + 'migration_id': migration_ref['id'], + 'instance_id': instance_id, }, + }) + + @exception.wrap_exception + @checks_instance_lock + def resize_instance(self, context, instance_id, migration_id): + """Starts the migration of a running instance to another host""" + migration_ref = self.db.migration_get(context, migration_id) + instance_ref = self.db.instance_get(context, instance_id) + self.db.migration_update(context, migration_id, + {'status': 'migrating', }) + + disk_info = self.driver.migrate_disk_and_power_off(instance_ref, + migration_ref['dest_host']) + self.db.migration_update(context, migration_id, + {'status': 'post-migrating', }) + + #TODO(mdietz): This is where we would update the VM record + #after resizing + service = self.db.service_get_by_host_and_topic(context, + migration_ref['dest_compute'], FLAGS.compute_topic) + topic = self.db.queue_get_for(context, FLAGS.compute_topic, + migration_ref['dest_compute']) + rpc.cast(context, topic, + {'method': 'finish_resize', + 'args': { + 'migration_id': migration_id, + 'instance_id': instance_id, + 'disk_info': disk_info, }, + }) + + @exception.wrap_exception + @checks_instance_lock + def finish_resize(self, context, instance_id, migration_id, disk_info): + """Completes the migration process by setting up the newly transferred + disk and turning on the instance on its new host machine""" + migration_ref = self.db.migration_get(context, migration_id) + instance_ref = self.db.instance_get(context, + migration_ref['instance_id']) + + self.driver.finish_resize(instance_ref, disk_info) + + self.db.migration_update(context, migration_id, + {'status': 'finished', }) + + @exception.wrap_exception + @checks_instance_lock def pause_instance(self, context, instance_id): """Pause an instance on this server.""" context = context.elevated() @@ -498,6 +633,30 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_get(context, instance_id) return instance_ref['locked'] + @checks_instance_lock + def reset_network(self, context, instance_id): + """ + Reset networking on the instance. + + """ + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + LOG.debug(_('instance %s: reset network'), instance_id, + context=context) + self.driver.reset_network(instance_ref) + + @checks_instance_lock + def inject_network_info(self, context, instance_id): + """ + Inject network info for the instance. + + """ + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + LOG.debug(_('instance %s: inject network info'), instance_id, + context=context) + self.driver.inject_network_info(instance_ref) + @exception.wrap_exception def get_console_output(self, context, instance_id): """Send the console output for an instance.""" @@ -511,7 +670,7 @@ class ComputeManager(manager.Manager): def get_ajax_console(self, context, instance_id): """Return connection information for an ajax console""" context = context.elevated() - logging.debug(_("instance %s: getting ajax console"), instance_id) + LOG.debug(_("instance %s: getting ajax console"), instance_id) instance_ref = self.db.instance_get(context, instance_id) return self.driver.get_ajax_console(instance_ref) diff --git a/nova/compute/power_state.py b/nova/compute/power_state.py index 37039d2ec..adfc2dff0 100644 --- a/nova/compute/power_state.py +++ b/nova/compute/power_state.py @@ -27,6 +27,7 @@ SHUTDOWN = 0x04 SHUTOFF = 0x05 CRASHED = 0x06 SUSPENDED = 0x07 +FAILED = 0x08 def name(code): @@ -38,5 +39,6 @@ def name(code): SHUTDOWN: 'shutdown', SHUTOFF: 'shutdown', CRASHED: 'crashed', - SUSPENDED: 'suspended'} + SUSPENDED: 'suspended', + FAILED: 'failed to spawn'} return d[code] diff --git a/nova/console/manager.py b/nova/console/manager.py index 5697e7cb1..57c75cf4f 100644 --- a/nova/console/manager.py +++ b/nova/console/manager.py @@ -20,11 +20,11 @@ Console Proxy Service """ import functools -import logging import socket from nova import exception from nova import flags +from nova import log as logging from nova import manager from nova import rpc from nova import utils diff --git a/nova/console/xvp.py b/nova/console/xvp.py index ee66dac46..68d8c8565 100644 --- a/nova/console/xvp.py +++ b/nova/console/xvp.py @@ -20,7 +20,6 @@ XVP (Xenserver VNC Proxy) driver. """ import fcntl -import logging import os import signal import subprocess @@ -31,6 +30,7 @@ from nova import context from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import utils flags.DEFINE_string('console_xvp_conf_template', @@ -133,10 +133,10 @@ class XVPConsoleProxy(object): return logging.debug(_("Starting xvp")) try: - utils.execute('xvp -p %s -c %s -l %s' % - (FLAGS.console_xvp_pid, - FLAGS.console_xvp_conf, - FLAGS.console_xvp_log)) + utils.execute('xvp', + '-p', FLAGS.console_xvp_pid, + '-c', FLAGS.console_xvp_conf, + '-l', FLAGS.console_xvp_log) except exception.ProcessExecutionError, err: logging.error(_("Error starting xvp: %s") % err) @@ -190,5 +190,5 @@ class XVPConsoleProxy(object): flag = '-x' #xvp will blow up on passwords that are too long (mdragon) password = password[:maxlen] - out, err = utils.execute('xvp %s' % flag, process_input=password) + out, err = utils.execute('xvp', flag, process_input=password) return out.strip() diff --git a/nova/context.py b/nova/context.py index f2669c9f1..0256bf448 100644 --- a/nova/context.py +++ b/nova/context.py @@ -28,7 +28,6 @@ from nova import utils class RequestContext(object): - def __init__(self, user, project, is_admin=None, read_deleted=False, remote_address=None, timestamp=None, request_id=None): if hasattr(user, 'id'): @@ -53,7 +52,7 @@ class RequestContext(object): self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: - timestamp = datetime.datetime.utcnow() + timestamp = utils.utcnow() if isinstance(timestamp, str) or isinstance(timestamp, unicode): timestamp = utils.parse_isotime(timestamp) self.timestamp = timestamp @@ -101,7 +100,7 @@ class RequestContext(object): return cls(**values) def elevated(self, read_deleted=False): - """Return a version of this context with admin flag set""" + """Return a version of this context with admin flag set.""" return RequestContext(self.user_id, self.project_id, True, diff --git a/nova/crypto.py b/nova/crypto.py index a34b940f5..2a8d4abca 100644 --- a/nova/crypto.py +++ b/nova/crypto.py @@ -105,8 +105,10 @@ def generate_key_pair(bits=1024): tmpdir = tempfile.mkdtemp() keyfile = os.path.join(tmpdir, 'temp') - utils.execute('ssh-keygen -q -b %d -N "" -f %s' % (bits, keyfile)) - (out, err) = utils.execute('ssh-keygen -q -l -f %s.pub' % (keyfile)) + utils.execute('ssh-keygen', '-q', '-b', bits, '-N', '', + '-f', keyfile) + (out, err) = utils.execute('ssh-keygen', '-q', '-l', '-f', + '%s.pub' % (keyfile)) fingerprint = out.split(' ')[1] private_key = open(keyfile).read() public_key = open(keyfile + '.pub').read() @@ -118,7 +120,8 @@ def generate_key_pair(bits=1024): # bio = M2Crypto.BIO.MemoryBuffer() # key.save_pub_key_bio(bio) # public_key = bio.read() - # public_key, err = execute('ssh-keygen -y -f /dev/stdin', private_key) + # public_key, err = execute('ssh-keygen', '-y', '-f', + # '/dev/stdin', private_key) return (private_key, public_key, fingerprint) @@ -143,9 +146,10 @@ def revoke_cert(project_id, file_name): start = os.getcwd() os.chdir(ca_folder(project_id)) # NOTE(vish): potential race condition here - utils.execute("openssl ca -config ./openssl.cnf -revoke '%s'" % file_name) - utils.execute("openssl ca -gencrl -config ./openssl.cnf -out '%s'" % - FLAGS.crl_file) + utils.execute('openssl', 'ca', '-config', './openssl.cnf', '-revoke', + file_name) + utils.execute('openssl', 'ca', '-gencrl', '-config', './openssl.cnf', + '-out', FLAGS.crl_file) os.chdir(start) @@ -193,9 +197,9 @@ def generate_x509_cert(user_id, project_id, bits=1024): tmpdir = tempfile.mkdtemp() keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key')) csrfile = os.path.join(tmpdir, 'temp.csr') - utils.execute("openssl genrsa -out %s %s" % (keyfile, bits)) - utils.execute("openssl req -new -key %s -out %s -batch -subj %s" % - (keyfile, csrfile, subject)) + utils.execute('openssl', 'genrsa', '-out', keyfile, str(bits)) + utils.execute('openssl', 'req', '-new', '-key', keyfile, '-out', csrfile, + '-batch', '-subj', subject) private_key = open(keyfile).read() csr = open(csrfile).read() shutil.rmtree(tmpdir) @@ -212,8 +216,8 @@ def _ensure_project_folder(project_id): if not os.path.exists(ca_path(project_id)): start = os.getcwd() os.chdir(ca_folder()) - utils.execute("sh geninter.sh %s %s" % - (project_id, _project_cert_subject(project_id))) + utils.execute('sh', 'geninter.sh', project_id, + _project_cert_subject(project_id)) os.chdir(start) @@ -228,8 +232,8 @@ def generate_vpn_files(project_id): start = os.getcwd() os.chdir(ca_folder()) # TODO(vish): the shell scripts could all be done in python - utils.execute("sh genvpn.sh %s %s" % - (project_id, _vpn_cert_subject(project_id))) + utils.execute('sh', 'genvpn.sh', + project_id, _vpn_cert_subject(project_id)) with open(csr_fn, "r") as csrfile: csr_text = csrfile.read() (serial, signed_csr) = sign_csr(csr_text, project_id) @@ -259,9 +263,10 @@ def _sign_csr(csr_text, ca_folder): start = os.getcwd() # Change working dir to CA os.chdir(ca_folder) - utils.execute("openssl ca -batch -out %s -config " - "./openssl.cnf -infiles %s" % (outbound, inbound)) - out, _err = utils.execute("openssl x509 -in %s -serial -noout" % outbound) + utils.execute('openssl', 'ca', '-batch', '-out', outbound, '-config', + './openssl.cnf', '-infiles', inbound) + out, _err = utils.execute('openssl', 'x509', '-in', outbound, + '-serial', '-noout') serial = out.rpartition("=")[2] os.chdir(start) with open(outbound, "r") as crtfile: diff --git a/nova/db/api.py b/nova/db/api.py index 789cb8ebb..aa86f0af1 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -80,13 +80,18 @@ def service_destroy(context, instance_id): def service_get(context, service_id): - """Get an service or raise if it does not exist.""" + """Get a service or raise if it does not exist.""" return IMPL.service_get(context, service_id) +def service_get_by_host_and_topic(context, host, topic): + """Get a service by host it's on and topic it listens to""" + return IMPL.service_get_by_host_and_topic(context, host, topic) + + def service_get_all(context, disabled=False): - """Get all service.""" - return IMPL.service_get_all(context, None, disabled) + """Get all services.""" + return IMPL.service_get_all(context, disabled) def service_get_all_by_topic(context, topic): @@ -254,6 +259,28 @@ def floating_ip_get_by_address(context, address): #################### +def migration_update(context, id, values): + """Update a migration instance""" + return IMPL.migration_update(context, id, values) + + +def migration_create(context, values): + """Create a migration record""" + return IMPL.migration_create(context, values) + + +def migration_get(context, migration_id): + """Finds a migration by the id""" + return IMPL.migration_get(context, migration_id) + + +def migration_get_by_instance_and_status(context, instance_id, status): + """Finds a migration by the instance id its migrating""" + return IMPL.migration_get_by_instance_and_status(context, instance_id, + status) + +#################### + def fixed_ip_associate(context, address, instance_id): """Associate fixed ip to instance. @@ -288,11 +315,21 @@ def fixed_ip_disassociate_all_by_timeout(context, host, time): return IMPL.fixed_ip_disassociate_all_by_timeout(context, host, time) +def fixed_ip_get_all(context): + """Get all defined fixed ips.""" + return IMPL.fixed_ip_get_all(context) + + def fixed_ip_get_by_address(context, address): """Get a fixed ip by address or raise if it does not exist.""" return IMPL.fixed_ip_get_by_address(context, address) +def fixed_ip_get_all_by_instance(context, instance_id): + """Get fixed ips by instance or raise if none exist.""" + return IMPL.fixed_ip_get_all_by_instance(context, instance_id) + + def fixed_ip_get_instance(context, address): """Get an instance for a fixed ip by address.""" return IMPL.fixed_ip_get_instance(context, address) @@ -480,6 +517,13 @@ def network_create_safe(context, values): return IMPL.network_create_safe(context, values) +def network_delete_safe(context, network_id): + """Delete network with key network_id. + This method assumes that the network is not associated with any project + """ + return IMPL.network_delete_safe(context, network_id) + + def network_create_fixed_ips(context, network_id, num_vpn_clients): """Create the ips for the network, reserving sepecified ips.""" return IMPL.network_create_fixed_ips(context, network_id, num_vpn_clients) @@ -500,6 +544,11 @@ def network_get(context, network_id): return IMPL.network_get(context, network_id) +def network_get_all(context): + """Return all defined networks.""" + return IMPL.network_get_all(context) + + # pylint: disable-msg=C0103 def network_get_associated_fixed_ips(context, network_id): """Get all network's ips that have been associated.""" @@ -511,11 +560,21 @@ def network_get_by_bridge(context, bridge): return IMPL.network_get_by_bridge(context, bridge) +def network_get_by_cidr(context, cidr): + """Get a network by cidr or raise if it does not exist""" + return IMPL.network_get_by_cidr(context, cidr) + + def network_get_by_instance(context, instance_id): """Get a network by instance id or raise if it does not exist.""" return IMPL.network_get_by_instance(context, instance_id) +def network_get_all_by_instance(context, instance_id): + """Get all networks by instance id or raise if none exist.""" + return IMPL.network_get_all_by_instance(context, instance_id) + + def network_get_index(context, network_id): """Get non-conflicting index for network.""" return IMPL.network_get_index(context, network_id) @@ -556,7 +615,7 @@ def project_get_network(context, project_id, associate=True): """ - return IMPL.project_get_network(context, project_id) + return IMPL.project_get_network(context, project_id, associate) def project_get_network_v6(context, project_id): @@ -610,19 +669,24 @@ def iscsi_target_create_safe(context, values): ############### -def auth_destroy_token(context, token): +def auth_token_destroy(context, token_id): """Destroy an auth token.""" - return IMPL.auth_destroy_token(context, token) + return IMPL.auth_token_destroy(context, token_id) -def auth_get_token(context, token_hash): +def auth_token_get(context, token_hash): """Retrieves a token given the hash representing it.""" - return IMPL.auth_get_token(context, token_hash) + return IMPL.auth_token_get(context, token_hash) -def auth_create_token(context, token): +def auth_token_update(context, token_hash, values): + """Updates a token given the hash representing it.""" + return IMPL.auth_token_update(context, token_hash, values) + + +def auth_token_create(context, token): """Creates a new token.""" - return IMPL.auth_create_token(context, token) + return IMPL.auth_token_create(context, token) ################### @@ -980,3 +1044,66 @@ def console_get_all_by_instance(context, instance_id): def console_get(context, console_id, instance_id=None): """Get a specific console (possibly on a given instance).""" return IMPL.console_get(context, console_id, instance_id) + + + ################## + + +def instance_type_create(context, values): + """Create a new instance type""" + return IMPL.instance_type_create(context, values) + + +def instance_type_get_all(context, inactive=0): + """Get all instance types""" + return IMPL.instance_type_get_all(context, inactive) + + +def instance_type_get_by_name(context, name): + """Get instance type by name""" + return IMPL.instance_type_get_by_name(context, name) + + +def instance_type_get_by_flavor_id(context, id): + """Get instance type by name""" + return IMPL.instance_type_get_by_flavor_id(context, id) + + +def instance_type_destroy(context, name): + """Delete a instance type""" + return IMPL.instance_type_destroy(context, name) + + +def instance_type_purge(context, name): + """Purges (removes) an instance type from DB + Use instance_type_destroy for most cases + """ + return IMPL.instance_type_purge(context, name) + + +#################### + + +def zone_create(context, values): + """Create a new child Zone entry.""" + return IMPL.zone_create(context, values) + + +def zone_update(context, zone_id, values): + """Update a child Zone entry.""" + return IMPL.zone_update(context, values) + + +def zone_delete(context, zone_id): + """Delete a child Zone.""" + return IMPL.zone_delete(context, zone_id) + + +def zone_get(context, zone_id): + """Get a specific child Zone.""" + return IMPL.zone_get(context, zone_id) + + +def zone_get_all(context): + """Get all child Zones.""" + return IMPL.zone_get_all(context) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 85250d56e..3e94082df 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -136,15 +136,12 @@ def service_get(context, service_id, session=None): @require_admin_context -def service_get_all(context, session=None, disabled=False): - if not session: - session = get_session() - - result = session.query(models.Service).\ +def service_get_all(context, disabled=False): + session = get_session() + return session.query(models.Service).\ filter_by(deleted=can_read_deleted(context)).\ filter_by(disabled=disabled).\ all() - return result @require_admin_context @@ -158,6 +155,17 @@ def service_get_all_by_topic(context, topic): @require_admin_context +def service_get_by_host_and_topic(context, host, topic): + session = get_session() + return session.query(models.Service).\ + filter_by(deleted=False).\ + filter_by(disabled=False).\ + filter_by(host=host).\ + filter_by(topic=topic).\ + first() + + +@require_admin_context def service_get_all_by_host(context, host): session = get_session() return session.query(models.Service).\ @@ -579,10 +587,21 @@ def fixed_ip_disassociate_all_by_timeout(_context, host, time): 'AND instance_id IS NOT NULL ' 'AND allocated = 0', {'host': host, - 'time': time.isoformat()}) + 'time': time}) return result.rowcount +@require_admin_context +def fixed_ip_get_all(context, session=None): + if not session: + session = get_session() + result = session.query(models.FixedIp).all() + if not result: + raise exception.NotFound(_('No fixed ips defined')) + + return result + + @require_context def fixed_ip_get_by_address(context, address, session=None): if not session: @@ -609,6 +628,17 @@ def fixed_ip_get_instance(context, address): @require_context +def fixed_ip_get_all_by_instance(context, instance_id): + session = get_session() + rv = session.query(models.FixedIp).\ + filter_by(instance_id=instance_id).\ + filter_by(deleted=False) + if not rv: + raise exception.NotFound(_('No address for instance %s') % instance_id) + return rv + + +@require_context def fixed_ip_get_instance_v6(context, address): session = get_session() mac = utils.to_mac(address) @@ -693,6 +723,7 @@ def instance_get(context, instance_id, session=None): options(joinedload_all('security_groups.rules')).\ options(joinedload('volumes')).\ options(joinedload_all('fixed_ip.network')).\ + options(joinedload('metadata')).\ filter_by(id=instance_id).\ filter_by(deleted=can_read_deleted(context)).\ first() @@ -701,6 +732,7 @@ def instance_get(context, instance_id, session=None): options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload_all('security_groups.rules')).\ options(joinedload('volumes')).\ + options(joinedload('metadata')).\ filter_by(project_id=context.project_id).\ filter_by(id=instance_id).\ filter_by(deleted=False).\ @@ -719,6 +751,7 @@ def instance_get_all(context): return session.query(models.Instance).\ options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload('security_groups')).\ + options(joinedload_all('fixed_ip.network')).\ filter_by(deleted=can_read_deleted(context)).\ all() @@ -729,6 +762,7 @@ def instance_get_all_by_user(context, user_id): return session.query(models.Instance).\ options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload('security_groups')).\ + options(joinedload_all('fixed_ip.network')).\ filter_by(deleted=can_read_deleted(context)).\ filter_by(user_id=user_id).\ all() @@ -740,6 +774,7 @@ def instance_get_all_by_host(context, host): return session.query(models.Instance).\ options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload('security_groups')).\ + options(joinedload_all('fixed_ip.network')).\ filter_by(host=host).\ filter_by(deleted=can_read_deleted(context)).\ all() @@ -753,6 +788,7 @@ def instance_get_all_by_project(context, project_id): return session.query(models.Instance).\ options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload('security_groups')).\ + options(joinedload_all('fixed_ip.network')).\ filter_by(project_id=project_id).\ filter_by(deleted=can_read_deleted(context)).\ all() @@ -766,6 +802,7 @@ def instance_get_all_by_reservation(context, reservation_id): return session.query(models.Instance).\ options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload('security_groups')).\ + options(joinedload_all('fixed_ip.network')).\ filter_by(reservation_id=reservation_id).\ filter_by(deleted=can_read_deleted(context)).\ all() @@ -773,6 +810,7 @@ def instance_get_all_by_reservation(context, reservation_id): return session.query(models.Instance).\ options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload('security_groups')).\ + options(joinedload_all('fixed_ip.network')).\ filter_by(project_id=context.project_id).\ filter_by(reservation_id=reservation_id).\ filter_by(deleted=False).\ @@ -1017,8 +1055,18 @@ def network_create_safe(context, values): @require_admin_context +def network_delete_safe(context, network_id): + session = get_session() + with session.begin(): + network_ref = network_get(context, network_id=network_id, \ + session=session) + session.delete(network_ref) + + +@require_admin_context def network_disassociate(context, network_id): - network_update(context, network_id, {'project_id': None}) + network_update(context, network_id, {'project_id': None, + 'host': None}) @require_admin_context @@ -1050,6 +1098,15 @@ def network_get(context, network_id, session=None): return result +@require_admin_context +def network_get_all(context): + session = get_session() + result = session.query(models.Network) + if not result: + raise exception.NotFound(_('No networks defined')) + return result + + # NOTE(vish): pylint complains because of the long method name, but # it fits with the names of the rest of the methods # pylint: disable-msg=C0103 @@ -1080,6 +1137,18 @@ def network_get_by_bridge(context, bridge): @require_admin_context +def network_get_by_cidr(context, cidr): + session = get_session() + result = session.query(models.Network).\ + filter_by(cidr=cidr).first() + + if not result: + raise exception.NotFound(_('Network with cidr %s does not exist') % + cidr) + return result + + +@require_admin_context def network_get_by_instance(_context, instance_id): session = get_session() rv = session.query(models.Network).\ @@ -1094,6 +1163,19 @@ def network_get_by_instance(_context, instance_id): @require_admin_context +def network_get_all_by_instance(_context, instance_id): + session = get_session() + rv = session.query(models.Network).\ + filter_by(deleted=False).\ + join(models.Network.fixed_ips).\ + filter_by(instance_id=instance_id).\ + filter_by(deleted=False) + if not rv: + raise exception.NotFound(_('No network for instance %s') % instance_id) + return rv + + +@require_admin_context def network_set_host(context, network_id, host_id): session = get_session() with session.begin(): @@ -1212,16 +1294,20 @@ def iscsi_target_create_safe(context, values): @require_admin_context -def auth_destroy_token(_context, token): +def auth_token_destroy(context, token_id): session = get_session() - session.delete(token) + with session.begin(): + token_ref = auth_token_get(context, token_id, session=session) + token_ref.delete(session=session) @require_admin_context -def auth_get_token(_context, token_hash): - session = get_session() +def auth_token_get(context, token_hash, session=None): + if session is None: + session = get_session() tk = session.query(models.AuthToken).\ filter_by(token_hash=token_hash).\ + filter_by(deleted=can_read_deleted(context)).\ first() if not tk: raise exception.NotFound(_('Token %s does not exist') % token_hash) @@ -1229,7 +1315,16 @@ def auth_get_token(_context, token_hash): @require_admin_context -def auth_create_token(_context, token): +def auth_token_update(context, token_hash, values): + session = get_session() + with session.begin(): + token_ref = auth_token_get(context, token_hash, session=session) + token_ref.update(values) + token_ref.save(session=session) + + +@require_admin_context +def auth_token_create(_context, token): tk = models.AuthToken() tk.update(token) tk.save() @@ -1909,6 +2004,51 @@ def host_get_networks(context, host): all() +################### + + +@require_admin_context +def migration_create(context, values): + migration = models.Migration() + migration.update(values) + migration.save() + return migration + + +@require_admin_context +def migration_update(context, id, values): + session = get_session() + with session.begin(): + migration = migration_get(context, id, session=session) + migration.update(values) + migration.save(session=session) + return migration + + +@require_admin_context +def migration_get(context, id, session=None): + if not session: + session = get_session() + result = session.query(models.Migration).\ + filter_by(id=id).first() + if not result: + raise exception.NotFound(_("No migration found with id %s") + % migration_id) + return result + + +@require_admin_context +def migration_get_by_instance_and_status(context, instance_id, status): + session = get_session() + result = session.query(models.Migration).\ + filter_by(instance_id=instance_id).\ + filter_by(status=status).first() + if not result: + raise exception.NotFound(_("No migration found with instance id %s") + % migration_id) + return result + + ################## @@ -2008,3 +2148,139 @@ def console_get(context, console_id, instance_id=None): raise exception.NotFound(_("No console with id %(console_id)s" " %(idesc)s") % locals()) return result + + + ################## + + +@require_admin_context +def instance_type_create(_context, values): + try: + instance_type_ref = models.InstanceTypes() + instance_type_ref.update(values) + instance_type_ref.save() + except: + raise exception.DBError + return instance_type_ref + + +@require_context +def instance_type_get_all(context, inactive=0): + """ + Returns a dict describing all instance_types with name as key. + """ + session = get_session() + if inactive: + inst_types = session.query(models.InstanceTypes).\ + order_by("name").\ + all() + else: + inst_types = session.query(models.InstanceTypes).\ + filter_by(deleted=inactive).\ + order_by("name").\ + all() + if inst_types: + inst_dict = {} + for i in inst_types: + inst_dict[i['name']] = dict(i) + return inst_dict + else: + raise exception.NotFound + + +@require_context +def instance_type_get_by_name(context, name): + """Returns a dict describing specific instance_type""" + session = get_session() + inst_type = session.query(models.InstanceTypes).\ + filter_by(name=name).\ + first() + if not inst_type: + raise exception.NotFound(_("No instance type with name %s") % name) + else: + return dict(inst_type) + + +@require_context +def instance_type_get_by_flavor_id(context, id): + """Returns a dict describing specific flavor_id""" + session = get_session() + inst_type = session.query(models.InstanceTypes).\ + filter_by(flavorid=int(id)).\ + first() + if not inst_type: + raise exception.NotFound(_("No flavor with name %s") % id) + else: + return dict(inst_type) + + +@require_admin_context +def instance_type_destroy(context, name): + """ Marks specific instance_type as deleted""" + session = get_session() + instance_type_ref = session.query(models.InstanceTypes).\ + filter_by(name=name) + records = instance_type_ref.update(dict(deleted=1)) + if records == 0: + raise exception.NotFound + else: + return instance_type_ref + + +@require_admin_context +def instance_type_purge(context, name): + """ Removes specific instance_type from DB + Usually instance_type_destroy should be used + """ + session = get_session() + instance_type_ref = session.query(models.InstanceTypes).\ + filter_by(name=name) + records = instance_type_ref.delete() + if records == 0: + raise exception.NotFound + else: + return instance_type_ref + + +#################### + + +@require_admin_context +def zone_create(context, values): + zone = models.Zone() + zone.update(values) + zone.save() + return zone + + +@require_admin_context +def zone_update(context, zone_id, values): + zone = session.query(models.Zone).filter_by(id=zone_id).first() + if not zone: + raise exception.NotFound(_("No zone with id %(zone_id)s") % locals()) + zone.update(values) + zone.save() + return zone + + +@require_admin_context +def zone_delete(context, zone_id): + session = get_session() + with session.begin(): + session.execute('delete from zones ' + 'where id=:id', {'id': zone_id}) + + +@require_admin_context +def zone_get(context, zone_id): + session = get_session() + result = session.query(models.Zone).filter_by(id=zone_id).first() + if not result: + raise exception.NotFound(_("No zone with id %(zone_id)s") % locals()) + return result + + +@require_admin_context +def zone_get_all(context): + session = get_session() + return session.query(models.Zone).all() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py b/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py index 366944591..9e7ab3554 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py @@ -508,17 +508,19 @@ def upgrade(migrate_engine): # bind migrate_engine to your metadata meta.bind = migrate_engine - for table in (auth_tokens, export_devices, fixed_ips, floating_ips, - instances, key_pairs, networks, - projects, quotas, security_groups, security_group_inst_assoc, - security_group_rules, services, users, - user_project_association, user_project_role_association, - user_role_association, volumes): + tables = [auth_tokens, + instances, key_pairs, networks, fixed_ips, floating_ips, + quotas, security_groups, security_group_inst_assoc, + security_group_rules, services, users, projects, + user_project_association, user_project_role_association, + user_role_association, volumes, export_devices] + for table in tables: try: table.create() except Exception: logging.info(repr(table)) logging.exception('Exception while creating table') + meta.drop_all(tables=tables) raise diff --git a/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py index 699b837f8..413536a59 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py @@ -209,13 +209,16 @@ def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; # bind migrate_engine to your metadata meta.bind = migrate_engine - for table in (certificates, consoles, console_pools, instance_actions, - iscsi_targets): + + tables = [certificates, console_pools, consoles, instance_actions, + iscsi_targets] + for table in tables: try: table.create() except Exception: logging.info(repr(table)) logging.exception('Exception while creating table') + meta.drop_all(tables=tables) raise auth_tokens.c.user_id.alter(type=String(length=255, diff --git a/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py b/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py new file mode 100644 index 000000000..5ba7910f1 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py @@ -0,0 +1,51 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +networks = Table('networks', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + + +# +# New Tables +# + + +# +# Tables to alter +# + +networks_label = Column( + 'label', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + networks.create_column(networks_label) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py b/nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py new file mode 100644 index 000000000..ade981687 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py @@ -0,0 +1,61 @@ +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +# +# New Tables +# +zones = Table('zones', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('api_url', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('username', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('password', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +# +# Tables to alter +# + +# (none currently) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + for table in (zones, ): + try: + table.create() + except Exception: + logging.info(repr(table)) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py b/nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py new file mode 100644 index 000000000..4cb07e0d8 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py @@ -0,0 +1,78 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of instances or services. +instances = Table('instances', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +quotas = Table('quotas', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + + +# +# New Tables +# + +instance_metadata_table = Table('instance_metadata', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_id', + Integer(), + ForeignKey('instances.id'), + nullable=False), + Column('key', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('value', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False))) + + +# +# New columns +# +quota_metadata_items = Column('metadata_items', Integer()) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + for table in (instance_metadata_table, ): + try: + table.create() + except Exception: + logging.info(repr(table)) + logging.exception('Exception while creating table') + raise + + quotas.create_column(quota_metadata_items) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py b/nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py new file mode 100644 index 000000000..705fc8ff3 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py @@ -0,0 +1,72 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +# Table stub-definitions +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of instances or services. +# +volumes = Table('volumes', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + + +# +# New Tables +# +# None + +# +# Tables to alter +# +# None + +# +# Columns to add to existing tables +# + +volumes_provider_location = Column('provider_location', + String(length=256, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + +volumes_provider_auth = Column('provider_auth', + String(length=256, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + # Add columns to existing tables + volumes.create_column(volumes_provider_location) + volumes.create_column(volumes_provider_auth) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py b/nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py new file mode 100644 index 000000000..427934d53 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py @@ -0,0 +1,90 @@ +# Copyright 2011 OpenStack LLC +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +# Table stub-definitions +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of instances or services. +# +fixed_ips = Table( + "fixed_ips", + meta, + Column( + "id", + Integer(), + primary_key=True, + nullable=False)) + +# +# New Tables +# +# None + +# +# Tables to alter +# +# None + +# +# Columns to add to existing tables +# + +fixed_ips_addressV6 = Column( + "addressV6", + String( + length=255, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + + +fixed_ips_netmaskV6 = Column( + "netmaskV6", + String( + length=3, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + + +fixed_ips_gatewayV6 = Column( + "gatewayV6", + String( + length=255, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + # Add columns to existing tables + fixed_ips.create_column(fixed_ips_addressV6) + fixed_ips.create_column(fixed_ips_netmaskV6) + fixed_ips.create_column(fixed_ips_gatewayV6) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py b/nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py new file mode 100644 index 000000000..66609054e --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py @@ -0,0 +1,87 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Ken Pepple +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import api +from nova import db +from nova import log as logging + +import datetime + +meta = MetaData() + + +# +# New Tables +# +instance_types = Table('instance_types', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + unique=True), + Column('id', Integer(), primary_key=True, nullable=False), + Column('memory_mb', Integer(), nullable=False), + Column('vcpus', Integer(), nullable=False), + Column('local_gb', Integer(), nullable=False), + Column('flavorid', Integer(), nullable=False, unique=True), + Column('swap', Integer(), nullable=False, default=0), + Column('rxtx_quota', Integer(), nullable=False, default=0), + Column('rxtx_cap', Integer(), nullable=False, default=0)) + + +def upgrade(migrate_engine): + # Upgrade operations go here + # Don't create your own engine; bind migrate_engine + # to your metadata + meta.bind = migrate_engine + try: + instance_types.create() + except Exception: + logging.info(repr(table)) + logging.exception('Exception while creating instance_types table') + raise + + # Here are the old static instance types + INSTANCE_TYPES = { + 'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1), + 'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2), + 'm1.medium': dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3), + 'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4), + 'm1.xlarge': dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)} + try: + i = instance_types.insert() + for name, values in INSTANCE_TYPES.iteritems(): + # FIXME(kpepple) should we be seeding created_at / updated_at ? + # now = datetime.datatime.utcnow() + i.execute({'name': name, 'memory_mb': values["memory_mb"], + 'vcpus': values["vcpus"], 'deleted': 0, + 'local_gb': values["local_gb"], + 'flavorid': values["flavorid"]}) + except Exception: + logging.info(repr(table)) + logging.exception('Exception while seeding instance_types table') + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + for table in (instance_types): + table.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py b/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py new file mode 100644 index 000000000..4fda525f1 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py @@ -0,0 +1,61 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License.from sqlalchemy import * + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of instances or services. +instances = Table('instances', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +# +# New Tables +# + +migrations = Table('migrations', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('source_compute', String(255)), + Column('dest_compute', String(255)), + Column('dest_host', String(255)), + Column('instance_id', Integer, ForeignKey('instances.id'), + nullable=True), + Column('status', String(255)), + ) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + for table in (migrations, ): + try: + table.create() + except Exception: + logging.info(repr(table)) + logging.exception('Exception while creating table') + raise diff --git a/nova/db/sqlalchemy/migration.py b/nova/db/sqlalchemy/migration.py index 2a13c5466..d9e303599 100644 --- a/nova/db/sqlalchemy/migration.py +++ b/nova/db/sqlalchemy/migration.py @@ -17,12 +17,22 @@ # under the License. import os +import sys from nova import flags import sqlalchemy from migrate.versioning import api as versioning_api -from migrate.versioning import exceptions as versioning_exceptions + +try: + from migrate.versioning import exceptions as versioning_exceptions +except ImportError: + try: + # python-migration changed location of exceptions after 1.6.3 + # See LP Bug #717467 + from migrate import exceptions as versioning_exceptions + except ImportError: + sys.exit(_("python-migrate is not installed. Exiting.")) FLAGS = flags.FLAGS @@ -45,12 +55,12 @@ def db_version(): engine = sqlalchemy.create_engine(FLAGS.sql_connection, echo=False) meta.reflect(bind=engine) try: - for table in ('auth_tokens', 'export_devices', 'fixed_ips', - 'floating_ips', 'instances', + for table in ('auth_tokens', 'zones', 'export_devices', + 'fixed_ips', 'floating_ips', 'instances', 'key_pairs', 'networks', 'projects', 'quotas', 'security_group_instance_association', 'security_group_rules', 'security_groups', - 'services', + 'services', 'migrations', 'users', 'user_project_association', 'user_project_role_association', 'user_role_association', diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 7efb36c0e..6ef284e65 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -126,11 +126,16 @@ class Certificate(BASE, NovaBase): class Instance(BASE, NovaBase): """Represents a guest vm.""" __tablename__ = 'instances' + onset_files = [] + id = Column(Integer, primary_key=True, autoincrement=True) @property def name(self): - return FLAGS.instance_name_template % self.id + base_name = FLAGS.instance_name_template % self.id + if getattr(self, '_rescue', False): + base_name += "-rescue" + return base_name admin_pass = Column(String(255)) user_id = Column(String(255)) @@ -210,6 +215,20 @@ class InstanceActions(BASE, NovaBase): error = Column(Text) +class InstanceTypes(BASE, NovaBase): + """Represent possible instance_types or flavor of VM offered""" + __tablename__ = "instance_types" + id = Column(Integer, primary_key=True) + name = Column(String(255), unique=True) + memory_mb = Column(Integer) + vcpus = Column(Integer) + local_gb = Column(Integer) + flavorid = Column(Integer, unique=True) + swap = Column(Integer, nullable=False, default=0) + rxtx_quota = Column(Integer, nullable=False, default=0) + rxtx_cap = Column(Integer, nullable=False, default=0) + + class Volume(BASE, NovaBase): """Represents a block storage device that can be attached to a vm.""" __tablename__ = 'volumes' @@ -243,6 +262,9 @@ class Volume(BASE, NovaBase): display_name = Column(String(255)) display_description = Column(String(255)) + provider_location = Column(String(255)) + provider_auth = Column(String(255)) + class Quota(BASE, NovaBase): """Represents quota overrides for a project.""" @@ -256,6 +278,7 @@ class Quota(BASE, NovaBase): volumes = Column(Integer) gigabytes = Column(Integer) floating_ips = Column(Integer) + metadata_items = Column(Integer) class ExportDevice(BASE, NovaBase): @@ -366,6 +389,18 @@ class KeyPair(BASE, NovaBase): public_key = Column(Text) +class Migration(BASE, NovaBase): + """Represents a running host-to-host migration.""" + __tablename__ = 'migrations' + id = Column(Integer, primary_key=True, nullable=False) + source_compute = Column(String(255)) + dest_compute = Column(String(255)) + dest_host = Column(String(255)) + instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) + #TODO(_cerberus_): enum + status = Column(String(255)) + + class Network(BASE, NovaBase): """Represents a network.""" __tablename__ = 'networks' @@ -373,6 +408,7 @@ class Network(BASE, NovaBase): "vpn_public_port"), {'mysql_engine': 'InnoDB'}) id = Column(Integer, primary_key=True) + label = Column(String(255)) injected = Column(Boolean, default=False) cidr = Column(String(255), unique=True) @@ -432,6 +468,9 @@ class FixedIp(BASE, NovaBase): allocated = Column(Boolean, default=False) leased = Column(Boolean, default=False) reserved = Column(Boolean, default=False) + addressV6 = Column(String(255)) + netmaskV6 = Column(String(3)) + gatewayV6 = Column(String(255)) class User(BASE, NovaBase): @@ -535,6 +574,29 @@ class Console(BASE, NovaBase): pool = relationship(ConsolePool, backref=backref('consoles')) +class InstanceMetadata(BASE, NovaBase): + """Represents a metadata key/value pair for an instance""" + __tablename__ = 'instance_metadata' + id = Column(Integer, primary_key=True) + key = Column(String(255)) + value = Column(String(255)) + instance_id = Column(Integer, ForeignKey('instances.id'), nullable=False) + instance = relationship(Instance, backref="metadata", + foreign_keys=instance_id, + primaryjoin='and_(' + 'InstanceMetadata.instance_id == Instance.id,' + 'InstanceMetadata.deleted == False)') + + +class Zone(BASE, NovaBase): + """Represents a child zone of this zone.""" + __tablename__ = 'zones' + id = Column(Integer, primary_key=True) + api_url = Column(String(255)) + username = Column(String(255)) + password = Column(String(255)) + + def register_models(): """Register Models and create metadata. @@ -543,11 +605,12 @@ def register_models(): connection is lost and needs to be reestablished. """ from sqlalchemy import create_engine - models = (Service, Instance, InstanceActions, + models = (Service, Instance, InstanceActions, InstanceTypes, Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp, Network, SecurityGroup, SecurityGroupIngressRule, SecurityGroupInstanceAssociation, AuthToken, User, - Project, Certificate, ConsolePool, Console) # , Image, Host + Project, Certificate, ConsolePool, Console, Zone, + InstanceMetadata, Migration) engine = create_engine(FLAGS.sql_connection, echo=False) for model in models: model.metadata.create_all(engine) diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py index dc885f138..4a9a28f43 100644 --- a/nova/db/sqlalchemy/session.py +++ b/nova/db/sqlalchemy/session.py @@ -20,6 +20,7 @@ Session Handling for SQLAlchemy backend """ from sqlalchemy import create_engine +from sqlalchemy import pool from sqlalchemy.orm import sessionmaker from nova import exception @@ -37,9 +38,14 @@ def get_session(autocommit=True, expire_on_commit=False): global _MAKER if not _MAKER: if not _ENGINE: + kwargs = {'pool_recycle': FLAGS.sql_idle_timeout, + 'echo': False} + + if FLAGS.sql_connection.startswith('sqlite'): + kwargs['poolclass'] = pool.NullPool + _ENGINE = create_engine(FLAGS.sql_connection, - pool_recycle=FLAGS.sql_idle_timeout, - echo=False) + **kwargs) _MAKER = (sessionmaker(bind=_ENGINE, autocommit=autocommit, expire_on_commit=expire_on_commit)) diff --git a/nova/exception.py b/nova/exception.py index 7d65bd6a5..93c5fe3d7 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -88,6 +88,10 @@ class InvalidInputException(Error): pass +class InvalidContentType(Error): + pass + + class TimeoutException(Error): pass diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index dd82a9366..a7dee8caf 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -48,7 +48,6 @@ class Exchange(object): nm = self.name LOG.debug(_('(%(nm)s) publish (key: %(routing_key)s)' ' %(message)s') % locals()) - routing_key = routing_key.split('.')[0] if routing_key in self._routes: for f in self._routes[routing_key]: LOG.debug(_('Publishing to route %s'), f) diff --git a/nova/flags.py b/nova/flags.py index 43bc174d2..9123e9ac7 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -160,9 +160,45 @@ class StrWrapper(object): raise KeyError(name) -FLAGS = FlagValues() -gflags.FLAGS = FLAGS -gflags.DEFINE_flag(gflags.HelpFlag(), FLAGS) +# Copied from gflags with small mods to get the naming correct. +# Originally gflags checks for the first module that is not gflags that is +# in the call chain, we want to check for the first module that is not gflags +# and not this module. +def _GetCallingModule(): + """Returns the name of the module that's calling into this module. + + We generally use this function to get the name of the module calling a + DEFINE_foo... function. + """ + # Walk down the stack to find the first globals dict that's not ours. + for depth in range(1, sys.getrecursionlimit()): + if not sys._getframe(depth).f_globals is globals(): + module_name = __GetModuleName(sys._getframe(depth).f_globals) + if module_name == 'gflags': + continue + if module_name is not None: + return module_name + raise AssertionError("No module was found") + + +# Copied from gflags because it is a private function +def __GetModuleName(globals_dict): + """Given a globals dict, returns the name of the module that defines it. + + Args: + globals_dict: A dictionary that should correspond to an environment + providing the values of the globals. + + Returns: + A string (the name of the module) or None (if the module could not + be identified. + """ + for name, module in sys.modules.iteritems(): + if getattr(module, '__dict__', None) is globals_dict: + if name == '__main__': + return sys.argv[0] + return name + return None def _wrapper(func): @@ -173,6 +209,11 @@ def _wrapper(func): return _wrapped +FLAGS = FlagValues() +gflags.FLAGS = FLAGS +gflags._GetCallingModule = _GetCallingModule + + DEFINE = _wrapper(gflags.DEFINE) DEFINE_string = _wrapper(gflags.DEFINE_string) DEFINE_integer = _wrapper(gflags.DEFINE_integer) @@ -185,8 +226,6 @@ DEFINE_spaceseplist = _wrapper(gflags.DEFINE_spaceseplist) DEFINE_multistring = _wrapper(gflags.DEFINE_multistring) DEFINE_multi_int = _wrapper(gflags.DEFINE_multi_int) DEFINE_flag = _wrapper(gflags.DEFINE_flag) - - HelpFlag = gflags.HelpFlag HelpshortFlag = gflags.HelpshortFlag HelpXMLFlag = gflags.HelpXMLFlag @@ -208,7 +247,7 @@ def _get_my_ip(): (addr, port) = csock.getsockname() csock.close() return addr - except socket.gaierror as ex: + except socket.error as ex: return "127.0.0.1" @@ -282,12 +321,17 @@ DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger') DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'), "Top-level directory for maintaining nova's state") +DEFINE_string('lock_path', os.path.join(os.path.dirname(__file__), '../'), + "Directory for lock files") +DEFINE_string('logdir', None, 'output to a per-service log file in named ' + 'directory') +DEFINE_string('sqlite_db', 'nova.sqlite', 'file name for sqlite') DEFINE_string('sql_connection', - 'sqlite:///$state_path/nova.sqlite', + 'sqlite:///$state_path/$sqlite_db', 'connection string for sql database') -DEFINE_string('sql_idle_timeout', - '3600', +DEFINE_integer('sql_idle_timeout', + 3600, 'timeout for idle sql database connections') DEFINE_integer('sql_max_retries', 12, 'sql connection attempts') DEFINE_integer('sql_retry_interval', 10, 'sql connection retry interval') @@ -304,7 +348,7 @@ DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager', 'Manager for scheduler') # The service to use for image search and retrieval -DEFINE_string('image_service', 'nova.image.s3.S3ImageService', +DEFINE_string('image_service', 'nova.image.local.LocalImageService', 'The service to use for retrieving and searching for images.') DEFINE_string('host', socket.gethostname(), @@ -312,3 +356,7 @@ DEFINE_string('host', socket.gethostname(), DEFINE_string('node_availability_zone', 'nova', 'availability zone of this node') + +DEFINE_string('zone_name', 'nova', 'name of this zone') +DEFINE_string('zone_capabilities', 'kypervisor:xenserver;os:linux', + 'Key/Value tags which represent capabilities of this zone') diff --git a/nova/image/glance.py b/nova/image/glance.py index 593c4bce6..15fca69b8 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -17,9 +17,8 @@ """Implementation of an image service that uses Glance as the backend""" from __future__ import absolute_import -import httplib -import json -import urlparse + +from glance.common import exception as glance_exception from nova import exception from nova import flags @@ -53,31 +52,64 @@ class GlanceImageService(service.BaseImageService): """ return self.client.get_images_detailed() - def show(self, context, id): + def show(self, context, image_id): """ Returns a dict containing image data for the given opaque image id. """ - image = self.client.get_image_meta(id) - if image: - return image - raise exception.NotFound + try: + image = self.client.get_image_meta(image_id) + except glance_exception.NotFound: + raise exception.NotFound + return image - def create(self, context, data): + def show_by_name(self, context, name): + """ + Returns a dict containing image data for the given name. + """ + # TODO(vish): replace this with more efficient call when glance + # supports it. + images = self.detail(context) + image = None + for cantidate in images: + if name == cantidate.get('name'): + image = cantidate + break + if image is None: + raise exception.NotFound + return image + + def get(self, context, image_id, data): + """ + Calls out to Glance for metadata and data and writes data. + """ + try: + metadata, image_chunks = self.client.get_image(image_id) + except glance_exception.NotFound: + raise exception.NotFound + for chunk in image_chunks: + data.write(chunk) + return metadata + + def create(self, context, metadata, data=None): """ Store the image data and return the new image id. :raises AlreadyExists if the image already exist. """ - return self.client.add_image(image_meta=data) + return self.client.add_image(metadata, data) - def update(self, context, image_id, data): + def update(self, context, image_id, metadata, data=None): """Replace the contents of the given image with the new data. :raises NotFound if the image does not exist. """ - return self.client.update_image(image_id, data) + try: + result = self.client.update_image(image_id, metadata, data) + except glance_exception.NotFound: + raise exception.NotFound + return result def delete(self, context, image_id): """ @@ -86,7 +118,11 @@ class GlanceImageService(service.BaseImageService): :raises NotFound if the image does not exist. """ - return self.client.delete_image(image_id) + try: + result = self.client.delete_image(image_id) + except glance_exception.NotFound: + raise exception.NotFound + return result def delete_all(self): """ diff --git a/nova/image/local.py b/nova/image/local.py index f78b9aa89..c4ac3baaa 100644 --- a/nova/image/local.py +++ b/nova/image/local.py @@ -15,57 +15,110 @@ # License for the specific language governing permissions and limitations # under the License. -import cPickle as pickle +import json import os.path import random -import tempfile +import shutil +from nova import flags from nova import exception from nova.image import service -class LocalImageService(service.BaseImageService): +FLAGS = flags.FLAGS +flags.DEFINE_string('images_path', '$state_path/images', + 'path to decrypted images') + +class LocalImageService(service.BaseImageService): """Image service storing images to local disk. + It assumes that image_ids are integers. """ def __init__(self): - self._path = tempfile.mkdtemp() + self._path = FLAGS.images_path - def _path_to(self, image_id): - return os.path.join(self._path, str(image_id)) + def _path_to(self, image_id, fname='info.json'): + if fname: + return os.path.join(self._path, '%08x' % int(image_id), fname) + return os.path.join(self._path, '%08x' % int(image_id)) def _ids(self): """The list of all image ids.""" - return [int(i) for i in os.listdir(self._path)] + return [int(i, 16) for i in os.listdir(self._path)] def index(self, context): - return [dict(id=i['id'], name=i['name']) for i in self.detail(context)] + return [dict(image_id=i['id'], name=i.get('name')) + for i in self.detail(context)] def detail(self, context): - return [self.show(context, id) for id in self._ids()] + images = [] + for image_id in self._ids(): + try: + image = self.show(context, image_id) + images.append(image) + except exception.NotFound: + continue + return images + + def show(self, context, image_id): + try: + with open(self._path_to(image_id)) as metadata_file: + return json.load(metadata_file) + except (IOError, ValueError): + raise exception.NotFound - def show(self, context, id): + def show_by_name(self, context, name): + """Returns a dict containing image data for the given name.""" + # NOTE(vish): Not very efficient, but the local image service + # is for testing so it should be fine. + images = self.detail(context) + image = None + for cantidate in images: + if name == cantidate.get('name'): + image = cantidate + break + if image == None: + raise exception.NotFound + return image + + def get(self, context, image_id, data): + """Get image and metadata.""" try: - return pickle.load(open(self._path_to(id))) - except IOError: + with open(self._path_to(image_id)) as metadata_file: + metadata = json.load(metadata_file) + with open(self._path_to(image_id, 'image')) as image_file: + shutil.copyfileobj(image_file, data) + except (IOError, ValueError): raise exception.NotFound + return metadata - def create(self, context, data): - """Store the image data and return the new image id.""" - id = random.randint(0, 2 ** 31 - 1) - data['id'] = id - self.update(context, id, data) - return id + def create(self, context, metadata, data=None): + """Store the image data and return the new image.""" + image_id = random.randint(0, 2 ** 31 - 1) + image_path = self._path_to(image_id, None) + if not os.path.exists(image_path): + os.mkdir(image_path) + return self.update(context, image_id, metadata, data) - def update(self, context, image_id, data): + def update(self, context, image_id, metadata, data=None): """Replace the contents of the given image with the new data.""" + metadata['id'] = image_id try: - pickle.dump(data, open(self._path_to(image_id), 'w')) - except IOError: + if data: + location = self._path_to(image_id, 'image') + with open(location, 'w') as image_file: + shutil.copyfileobj(data, image_file) + # NOTE(vish): update metadata similarly to glance + metadata['status'] = 'active' + metadata['location'] = location + with open(self._path_to(image_id), 'w') as metadata_file: + json.dump(metadata, metadata_file) + except (IOError, ValueError): raise exception.NotFound + return metadata def delete(self, context, image_id): """Delete the given image. @@ -73,18 +126,11 @@ class LocalImageService(service.BaseImageService): """ try: - os.unlink(self._path_to(image_id)) - except IOError: + shutil.rmtree(self._path_to(image_id, None)) + except (IOError, ValueError): raise exception.NotFound def delete_all(self): """Clears out all images in local directory.""" - for id in self._ids(): - os.unlink(self._path_to(id)) - - def delete_imagedir(self): - """Deletes the local directory. - Raises OSError if directory is not empty. - - """ - os.rmdir(self._path) + for image_id in self._ids(): + shutil.rmtree(self._path_to(image_id, None)) diff --git a/nova/image/s3.py b/nova/image/s3.py index 08a40f191..85a2c651c 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -21,8 +21,13 @@ Proxy AMI-related calls from the cloud controller, to the running objectstore service. """ -import json -import urllib +import binascii +import eventlet +import os +import shutil +import tarfile +import tempfile +from xml.etree import ElementTree import boto.s3.connection @@ -31,74 +36,78 @@ from nova import flags from nova import utils from nova.auth import manager from nova.image import service +from nova.api.ec2 import ec2utils FLAGS = flags.FLAGS +flags.DEFINE_string('image_decryption_dir', '/tmp', + 'parent dir for tempdir used for image decryption') class S3ImageService(service.BaseImageService): + def __init__(self, service=None, *args, **kwargs): + if service == None: + service = utils.import_object(FLAGS.image_service) + self.service = service + self.service.__init__(*args, **kwargs) - def modify(self, context, image_id, operation): - self._conn(context).make_request( - method='POST', - bucket='_images', - query_args=self._qs({'image_id': image_id, - 'operation': operation})) - return True - - def update(self, context, image_id, attributes): - """update an image's attributes / info.json""" - attributes.update({"image_id": image_id}) - self._conn(context).make_request( - method='POST', - bucket='_images', - query_args=self._qs(attributes)) - return True - - def register(self, context, image_location): - """ rpc call to register a new image based from a manifest """ - image_id = utils.generate_uid('ami') - self._conn(context).make_request( - method='PUT', - bucket='_images', - query_args=self._qs({'image_location': image_location, - 'image_id': image_id})) - return image_id - - def _fix_image_id(self, images): - """S3 has imageId but OpenStack wants id""" - for image in images: - if 'imageId' in image: - image['id'] = image['imageId'] - return images + def create(self, context, metadata, data=None): + """metadata['properties'] should contain image_location""" + image = self._s3_create(context, metadata) + return image + + def delete(self, context, image_id): + # FIXME(vish): call to show is to check filter + self.show(context, image_id) + self.service.delete(context, image_id) + + def update(self, context, image_id, metadata, data=None): + # FIXME(vish): call to show is to check filter + self.show(context, image_id) + image = self.service.update(context, image_id, metadata, data) + return image def index(self, context): - """Return a list of all images that a user can see.""" - response = self._conn(context).make_request( - method='GET', - bucket='_images') - return self._fix_image_id(json.loads(response.read())) + images = self.service.index(context) + # FIXME(vish): index doesn't filter so we do it manually + return self._filter(context, images) + + def detail(self, context): + images = self.service.detail(context) + # FIXME(vish): detail doesn't filter so we do it manually + return self._filter(context, images) + + @classmethod + def _is_visible(cls, context, image): + return (context.is_admin + or context.project_id == image['properties']['owner_id'] + or image['properties']['is_public'] == 'True') + + @classmethod + def _filter(cls, context, images): + filtered = [] + for image in images: + if not cls._is_visible(context, image): + continue + filtered.append(image) + return filtered def show(self, context, image_id): - """return a image object if the context has permissions""" - if FLAGS.connection_type == 'fake': - return {'imageId': 'bar'} - result = self.index(context) - result = [i for i in result if i['imageId'] == image_id] - if not result: - raise exception.NotFound(_('Image %s could not be found') - % image_id) - image = result[0] + image = self.service.show(context, image_id) + if not self._is_visible(context, image): + raise exception.NotFound return image - def deregister(self, context, image_id): - """ unregister an image """ - self._conn(context).make_request( - method='DELETE', - bucket='_images', - query_args=self._qs({'image_id': image_id})) + def show_by_name(self, context, name): + image = self.service.show_by_name(context, name) + if not self._is_visible(context, image): + raise exception.NotFound + return image - def _conn(self, context): + @staticmethod + def _conn(context): + # TODO(vish): is there a better way to get creds to sign + # for the user? access = manager.AuthManager().get_access_key(context.user, context.project) secret = str(context.user.secret) @@ -110,8 +119,159 @@ class S3ImageService(service.BaseImageService): port=FLAGS.s3_port, host=FLAGS.s3_host) - def _qs(self, params): - pairs = [] - for key in params.keys(): - pairs.append(key + '=' + urllib.quote(params[key])) - return '&'.join(pairs) + @staticmethod + def _download_file(bucket, filename, local_dir): + key = bucket.get_key(filename) + local_filename = os.path.join(local_dir, filename) + key.get_contents_to_filename(local_filename) + return local_filename + + def _s3_create(self, context, metadata): + """Gets a manifext from s3 and makes an image""" + + image_path = tempfile.mkdtemp(dir=FLAGS.image_decryption_dir) + + image_location = metadata['properties']['image_location'] + bucket_name = image_location.split("/")[0] + manifest_path = image_location[len(bucket_name) + 1:] + bucket = self._conn(context).get_bucket(bucket_name) + key = bucket.get_key(manifest_path) + manifest = key.get_contents_as_string() + + manifest = ElementTree.fromstring(manifest) + image_format = 'ami' + image_type = 'machine' + + try: + kernel_id = manifest.find("machine_configuration/kernel_id").text + if kernel_id == 'true': + image_format = 'aki' + image_type = 'kernel' + kernel_id = None + except Exception: + kernel_id = None + + try: + ramdisk_id = manifest.find("machine_configuration/ramdisk_id").text + if ramdisk_id == 'true': + image_format = 'ari' + image_type = 'ramdisk' + ramdisk_id = None + except Exception: + ramdisk_id = None + + try: + arch = manifest.find("machine_configuration/architecture").text + except Exception: + arch = 'x86_64' + + properties = metadata['properties'] + properties['owner_id'] = context.project_id + properties['architecture'] = arch + + if kernel_id: + properties['kernel_id'] = ec2utils.ec2_id_to_id(kernel_id) + + if ramdisk_id: + properties['ramdisk_id'] = ec2utils.ec2_id_to_id(ramdisk_id) + + properties['is_public'] = False + properties['type'] = image_type + metadata.update({'disk_format': image_format, + 'container_format': image_format, + 'status': 'queued', + 'is_public': True, + 'properties': properties}) + metadata['properties']['image_state'] = 'pending' + image = self.service.create(context, metadata) + image_id = image['id'] + + def delayed_create(): + """This handles the fetching and decrypting of the part files.""" + parts = [] + for fn_element in manifest.find("image").getiterator("filename"): + part = self._download_file(bucket, fn_element.text, image_path) + parts.append(part) + + # NOTE(vish): this may be suboptimal, should we use cat? + encrypted_filename = os.path.join(image_path, 'image.encrypted') + with open(encrypted_filename, 'w') as combined: + for filename in parts: + with open(filename) as part: + shutil.copyfileobj(part, combined) + + metadata['properties']['image_state'] = 'decrypting' + self.service.update(context, image_id, metadata) + + hex_key = manifest.find("image/ec2_encrypted_key").text + encrypted_key = binascii.a2b_hex(hex_key) + hex_iv = manifest.find("image/ec2_encrypted_iv").text + encrypted_iv = binascii.a2b_hex(hex_iv) + + # FIXME(vish): grab key from common service so this can run on + # any host. + cloud_pk = os.path.join(FLAGS.ca_path, "private/cakey.pem") + + decrypted_filename = os.path.join(image_path, 'image.tar.gz') + self._decrypt_image(encrypted_filename, encrypted_key, + encrypted_iv, cloud_pk, decrypted_filename) + + metadata['properties']['image_state'] = 'untarring' + self.service.update(context, image_id, metadata) + + unz_filename = self._untarzip_image(image_path, decrypted_filename) + + metadata['properties']['image_state'] = 'uploading' + with open(unz_filename) as image_file: + self.service.update(context, image_id, metadata, image_file) + metadata['properties']['image_state'] = 'available' + self.service.update(context, image_id, metadata) + + shutil.rmtree(image_path) + + eventlet.spawn_n(delayed_create) + + return image + + @staticmethod + def _decrypt_image(encrypted_filename, encrypted_key, encrypted_iv, + cloud_private_key, decrypted_filename): + key, err = utils.execute('openssl', + 'rsautl', + '-decrypt', + '-inkey', '%s' % cloud_private_key, + process_input=encrypted_key, + check_exit_code=False) + if err: + raise exception.Error(_("Failed to decrypt private key: %s") + % err) + iv, err = utils.execute('openssl', + 'rsautl', + '-decrypt', + '-inkey', '%s' % cloud_private_key, + process_input=encrypted_iv, + check_exit_code=False) + if err: + raise exception.Error(_("Failed to decrypt initialization " + "vector: %s") % err) + + _out, err = utils.execute('openssl', 'enc', + '-d', '-aes-128-cbc', + '-in', '%s' % (encrypted_filename,), + '-K', '%s' % (key,), + '-iv', '%s' % (iv,), + '-out', '%s' % (decrypted_filename,), + check_exit_code=False) + if err: + raise exception.Error(_("Failed to decrypt image file " + "%(image_file)s: %(err)s") % + {'image_file': encrypted_filename, + 'err': err}) + + @staticmethod + def _untarzip_image(path, filename): + tar_file = tarfile.open(filename, "r|gz") + tar_file.extractall(path) + image_file = tar_file.getnames()[0] + tar_file.close() + return os.path.join(path, image_file) diff --git a/nova/image/service.py b/nova/image/service.py index ebee2228d..c09052cab 100644 --- a/nova/image/service.py +++ b/nova/image/service.py @@ -56,9 +56,9 @@ class BaseImageService(object): """ raise NotImplementedError - def show(self, context, id): + def show(self, context, image_id): """ - Returns a dict containing image data for the given opaque image id. + Returns a dict containing image metadata for the given opaque image id. :retval a mapping with the following signature: @@ -76,17 +76,27 @@ class BaseImageService(object): """ raise NotImplementedError - def create(self, context, data): + def get(self, context, data): """ - Store the image data and return the new image id. + Returns a dict containing image metadata and writes image data to data. + + :param data: a file-like object to hold binary image data + + :raises NotFound if the image does not exist + """ + raise NotImplementedError + + def create(self, context, metadata, data=None): + """ + Store the image metadata and data and return the new image id. :raises AlreadyExists if the image already exist. """ raise NotImplementedError - def update(self, context, image_id, data): - """Replace the contents of the given image with the new data. + def update(self, context, image_id, metadata, data=None): + """Update the given image with the new metadata and data. :raises NotFound if the image does not exist. diff --git a/nova/log.py b/nova/log.py index b541488bd..d194ab8f0 100644 --- a/nova/log.py +++ b/nova/log.py @@ -28,9 +28,11 @@ It also allows setting of formatting information through flags. import cStringIO +import inspect import json import logging import logging.handlers +import os import sys import traceback @@ -52,7 +54,7 @@ flags.DEFINE_string('logging_default_format_string', 'format string to use for log messages without context') flags.DEFINE_string('logging_debug_format_suffix', - 'from %(processName)s (pid=%(process)d) %(funcName)s' + 'from (pid=%(process)d) %(funcName)s' ' %(pathname)s:%(lineno)d', 'data to append to log format when level is DEBUG') @@ -63,6 +65,7 @@ flags.DEFINE_string('logging_exception_prefix', flags.DEFINE_list('default_log_levels', ['amqplib=WARN', 'sqlalchemy=WARN', + 'boto=WARN', 'eventlet.wsgi.server=WARN'], 'list of logger=LEVEL pairs') @@ -92,7 +95,7 @@ critical = logging.critical log = logging.log # handlers StreamHandler = logging.StreamHandler -FileHandler = logging.FileHandler +WatchedFileHandler = logging.handlers.WatchedFileHandler # logging.SysLogHandler is nicer than logging.logging.handler.SysLogHandler. SysLogHandler = logging.handlers.SysLogHandler @@ -111,22 +114,16 @@ def _dictify_context(context): return context -def basicConfig(): - logging.basicConfig() - for handler in logging.root.handlers: - handler.setFormatter(_formatter) - if FLAGS.verbose: - logging.root.setLevel(logging.DEBUG) - else: - logging.root.setLevel(logging.INFO) - if FLAGS.use_syslog: - syslog = SysLogHandler(address='/dev/log') - syslog.setFormatter(_formatter) - logging.root.addHandler(syslog) +def _get_binary_name(): + return os.path.basename(inspect.stack()[-1][1]) + + +def _get_log_file_path(binary=None): if FLAGS.logfile: - logfile = FileHandler(FLAGS.logfile) - logfile.setFormatter(_formatter) - logging.root.addHandler(logfile) + return FLAGS.logfile + if FLAGS.logdir: + binary = binary or _get_binary_name() + return '%s.log' % (os.path.join(FLAGS.logdir, binary),) class NovaLogger(logging.Logger): @@ -136,23 +133,19 @@ class NovaLogger(logging.Logger): This becomes the class that is instanciated by logging.getLogger. """ def __init__(self, name, level=NOTSET): - level_name = self._get_level_from_flags(name, FLAGS) - level = globals()[level_name] logging.Logger.__init__(self, name, level) + self.setup_from_flags() - def _get_level_from_flags(self, name, FLAGS): - # if exactly "nova", or a child logger, honor the verbose flag - if (name == "nova" or name.startswith("nova.")) and FLAGS.verbose: - return 'DEBUG' + def setup_from_flags(self): + """Setup logger from flags""" + level = NOTSET for pair in FLAGS.default_log_levels: - logger, _sep, level = pair.partition('=') + logger, _sep, level_name = pair.partition('=') # NOTE(todd): if we set a.b, we want a.b.c to have the same level # (but not a.bc, so we check the dot) - if name == logger: - return level - if name.startswith(logger) and name[len(logger)] == '.': - return level - return 'INFO' + if self.name == logger or self.name.startswith("%s." % logger): + level = globals()[level_name] + self.setLevel(level) def _log(self, level, msg, args, exc_info=None, extra=None, context=None): """Extract context from any log call""" @@ -161,12 +154,12 @@ class NovaLogger(logging.Logger): if context: extra.update(_dictify_context(context)) extra.update({"nova_version": version.version_string_with_vcs()}) - logging.Logger._log(self, level, msg, args, exc_info, extra) + return logging.Logger._log(self, level, msg, args, exc_info, extra) def addHandler(self, handler): """Each handler gets our custom formatter""" handler.setFormatter(_formatter) - logging.Logger.addHandler(self, handler) + return logging.Logger.addHandler(self, handler) def audit(self, msg, *args, **kwargs): """Shortcut for our AUDIT level""" @@ -193,23 +186,6 @@ class NovaLogger(logging.Logger): self.error(message, **kwargs) -def handle_exception(type, value, tb): - logging.root.critical(str(value), exc_info=(type, value, tb)) - - -sys.excepthook = handle_exception -logging.setLoggerClass(NovaLogger) - - -class NovaRootLogger(NovaLogger): - pass - -if not isinstance(logging.root, NovaRootLogger): - logging.root = NovaRootLogger("nova.root", WARNING) - NovaLogger.root = logging.root - NovaLogger.manager.root = logging.root - - class NovaFormatter(logging.Formatter): """ A nova.context.RequestContext aware formatter configured through flags. @@ -256,8 +232,76 @@ class NovaFormatter(logging.Formatter): _formatter = NovaFormatter() +class NovaRootLogger(NovaLogger): + def __init__(self, name, level=NOTSET): + self.logpath = None + self.filelog = None + self.streamlog = StreamHandler() + self.syslog = None + NovaLogger.__init__(self, name, level) + + def setup_from_flags(self): + """Setup logger from flags""" + global _filelog + if FLAGS.use_syslog: + self.syslog = SysLogHandler(address='/dev/log') + self.addHandler(self.syslog) + elif self.syslog: + self.removeHandler(self.syslog) + logpath = _get_log_file_path() + if logpath: + self.removeHandler(self.streamlog) + if logpath != self.logpath: + self.removeHandler(self.filelog) + self.filelog = WatchedFileHandler(logpath) + self.addHandler(self.filelog) + self.logpath = logpath + else: + self.removeHandler(self.filelog) + self.addHandler(self.streamlog) + if FLAGS.verbose: + self.setLevel(DEBUG) + else: + self.setLevel(INFO) + + +def handle_exception(type, value, tb): + extra = {} + if FLAGS.verbose: + extra['exc_info'] = (type, value, tb) + logging.root.critical(str(value), **extra) + + +def reset(): + """Resets logging handlers. Should be called if FLAGS changes.""" + for logger in NovaLogger.manager.loggerDict.itervalues(): + if isinstance(logger, NovaLogger): + logger.setup_from_flags() + + +def setup(): + """Setup nova logging.""" + if not isinstance(logging.root, NovaRootLogger): + logging._acquireLock() + for handler in logging.root.handlers: + logging.root.removeHandler(handler) + logging.root = NovaRootLogger("nova") + NovaLogger.root = logging.root + NovaLogger.manager.root = logging.root + for logger in NovaLogger.manager.loggerDict.itervalues(): + logger.root = logging.root + if isinstance(logger, logging.Logger): + NovaLogger.manager._fixupParents(logger) + NovaLogger.manager.loggerDict["nova"] = logging.root + logging._releaseLock() + sys.excepthook = handle_exception + reset() + + +root = logging.root +logging.setLoggerClass(NovaLogger) + + def audit(msg, *args, **kwargs): """Shortcut for logging to root log with sevrity 'AUDIT'.""" - if len(logging.root.handlers) == 0: - basicConfig() logging.root.log(AUDIT, msg, *args, **kwargs) diff --git a/nova/network/api.py b/nova/network/api.py index bf43acb51..4ee1148cb 100644 --- a/nova/network/api.py +++ b/nova/network/api.py @@ -21,6 +21,7 @@ Handles all requests relating to instances (guest vms). """ from nova import db +from nova import exception from nova import flags from nova import log as logging from nova import quota diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index de0e488ae..9f9d282b6 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -17,14 +17,17 @@ Implements vlans, bridges, and iptables rules using linux utilities. """ +import inspect import os +from eventlet import semaphore + from nova import db +from nova import exception from nova import flags from nova import log as logging from nova import utils - LOG = logging.getLogger("nova.linux_net") @@ -43,7 +46,7 @@ flags.DEFINE_string('dhcp_domain', flags.DEFINE_string('networks_path', '$state_path/networks', 'Location to keep network config files') -flags.DEFINE_string('public_interface', 'vlan1', +flags.DEFINE_string('public_interface', 'eth0', 'Interface for public IP addresses') flags.DEFINE_string('vlan_interface', 'eth0', 'network device for vlans') @@ -51,8 +54,8 @@ flags.DEFINE_string('dhcpbridge', _bin_file('nova-dhcpbridge'), 'location of nova-dhcpbridge') flags.DEFINE_string('routing_source_ip', '$my_ip', 'Public IP of network host') -flags.DEFINE_bool('use_nova_chains', False, - 'use the nova_ routing chains instead of default') +flags.DEFINE_string('input_chain', 'INPUT', + 'chain to add nova_input to') flags.DEFINE_string('dns_server', None, 'if set, uses specific dns server for dnsmasq') @@ -60,111 +63,379 @@ flags.DEFINE_string('dmz_cidr', '10.128.0.0/24', 'dmz range that should be accepted') +binary_name = os.path.basename(inspect.stack()[-1][1]) + + +class IptablesRule(object): + """An iptables rule + + You shouldn't need to use this class directly, it's only used by + IptablesManager + """ + def __init__(self, chain, rule, wrap=True, top=False): + self.chain = chain + self.rule = rule + self.wrap = wrap + self.top = top + + def __eq__(self, other): + return ((self.chain == other.chain) and + (self.rule == other.rule) and + (self.top == other.top) and + (self.wrap == other.wrap)) + + def __ne__(self, other): + return not self == other + + def __str__(self): + if self.wrap: + chain = '%s-%s' % (binary_name, self.chain) + else: + chain = self.chain + return '-A %s %s' % (chain, self.rule) + + +class IptablesTable(object): + """An iptables table""" + + def __init__(self): + self.rules = [] + self.chains = set() + self.unwrapped_chains = set() + + def add_chain(self, name, wrap=True): + """Adds a named chain to the table + + The chain name is wrapped to be unique for the component creating + it, so different components of Nova can safely create identically + named chains without interfering with one another. + + At the moment, its wrapped name is <binary name>-<chain name>, + so if nova-compute creates a chain named "OUTPUT", it'll actually + end up named "nova-compute-OUTPUT". + """ + if wrap: + self.chains.add(name) + else: + self.unwrapped_chains.add(name) + + def remove_chain(self, name, wrap=True): + """Remove named chain + + This removal "cascades". All rule in the chain are removed, as are + all rules in other chains that jump to it. + + If the chain is not found, this is merely logged. + """ + if wrap: + chain_set = self.chains + else: + chain_set = self.unwrapped_chains + + if name not in chain_set: + LOG.debug(_("Attempted to remove chain %s which doesn't exist"), + name) + return + + chain_set.remove(name) + self.rules = filter(lambda r: r.chain != name, self.rules) + + if wrap: + jump_snippet = '-j %s-%s' % (binary_name, name) + else: + jump_snippet = '-j %s' % (name,) + + self.rules = filter(lambda r: jump_snippet not in r.rule, self.rules) + + def add_rule(self, chain, rule, wrap=True, top=False): + """Add a rule to the table + + This is just like what you'd feed to iptables, just without + the "-A <chain name>" bit at the start. + + However, if you need to jump to one of your wrapped chains, + prepend its name with a '$' which will ensure the wrapping + is applied correctly. + """ + if wrap and chain not in self.chains: + raise ValueError(_("Unknown chain: %r") % chain) + + if '$' in rule: + rule = ' '.join(map(self._wrap_target_chain, rule.split(' '))) + + self.rules.append(IptablesRule(chain, rule, wrap, top)) + + def _wrap_target_chain(self, s): + if s.startswith('$'): + return '%s-%s' % (binary_name, s[1:]) + return s + + def remove_rule(self, chain, rule, wrap=True, top=False): + """Remove a rule from a chain + + Note: The rule must be exactly identical to the one that was added. + You cannot switch arguments around like you can with the iptables + CLI tool. + """ + try: + self.rules.remove(IptablesRule(chain, rule, wrap, top)) + except ValueError: + LOG.debug(_("Tried to remove rule that wasn't there:" + " %(chain)r %(rule)r %(wrap)r %(top)r"), + {'chain': chain, 'rule': rule, + 'top': top, 'wrap': wrap}) + + +class IptablesManager(object): + """Wrapper for iptables + + See IptablesTable for some usage docs + + A number of chains are set up to begin with. + + First, nova-filter-top. It's added at the top of FORWARD and OUTPUT. Its + name is not wrapped, so it's shared between the various nova workers. It's + intended for rules that need to live at the top of the FORWARD and OUTPUT + chains. It's in both the ipv4 and ipv6 set of tables. + + For ipv4 and ipv6, the builtin INPUT, OUTPUT, and FORWARD filter chains are + wrapped, meaning that the "real" INPUT chain has a rule that jumps to the + wrapped INPUT chain, etc. Additionally, there's a wrapped chain named + "local" which is jumped to from nova-filter-top. + + For ipv4, the builtin PREROUTING, OUTPUT, and POSTROUTING nat chains are + wrapped in the same was as the builtin filter chains. Additionally, there's + a snat chain that is applied after the POSTROUTING chain. + """ + def __init__(self, execute=None): + if not execute: + if FLAGS.fake_network: + self.execute = lambda *args, **kwargs: ('', '') + else: + self.execute = utils.execute + else: + self.execute = execute + + self.ipv4 = {'filter': IptablesTable(), + 'nat': IptablesTable()} + self.ipv6 = {'filter': IptablesTable()} + + # Add a nova-filter-top chain. It's intended to be shared + # among the various nova components. It sits at the very top + # of FORWARD and OUTPUT. + for tables in [self.ipv4, self.ipv6]: + tables['filter'].add_chain('nova-filter-top', wrap=False) + tables['filter'].add_rule('FORWARD', '-j nova-filter-top', + wrap=False, top=True) + tables['filter'].add_rule('OUTPUT', '-j nova-filter-top', + wrap=False, top=True) + + tables['filter'].add_chain('local') + tables['filter'].add_rule('nova-filter-top', '-j $local', + wrap=False) + + # Wrap the builtin chains + builtin_chains = {4: {'filter': ['INPUT', 'OUTPUT', 'FORWARD'], + 'nat': ['PREROUTING', 'OUTPUT', 'POSTROUTING']}, + 6: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}} + + for ip_version in builtin_chains: + if ip_version == 4: + tables = self.ipv4 + elif ip_version == 6: + tables = self.ipv6 + + for table, chains in builtin_chains[ip_version].iteritems(): + for chain in chains: + tables[table].add_chain(chain) + tables[table].add_rule(chain, '-j $%s' % (chain,), + wrap=False) + + # Add a nova-postrouting-bottom chain. It's intended to be shared + # among the various nova components. We set it as the last chain + # of POSTROUTING chain. + self.ipv4['nat'].add_chain('nova-postrouting-bottom', wrap=False) + self.ipv4['nat'].add_rule('POSTROUTING', '-j nova-postrouting-bottom', + wrap=False) + + # We add a snat chain to the shared nova-postrouting-bottom chain + # so that it's applied last. + self.ipv4['nat'].add_chain('snat') + self.ipv4['nat'].add_rule('nova-postrouting-bottom', '-j $snat', + wrap=False) + + # And then we add a floating-snat chain and jump to first thing in + # the snat chain. + self.ipv4['nat'].add_chain('floating-snat') + self.ipv4['nat'].add_rule('snat', '-j $floating-snat') + + self.semaphore = semaphore.Semaphore() + + @utils.synchronized('iptables') + def apply(self): + """Apply the current in-memory set of iptables rules + + This will blow away any rules left over from previous runs of the + same component of Nova, and replace them with our current set of + rules. This happens atomically, thanks to iptables-restore. + + We wrap the call in a semaphore lock, so that we don't race with + ourselves. In the event of a race with another component running + an iptables-* command at the same time, we retry up to 5 times. + """ + with self.semaphore: + s = [('iptables', self.ipv4)] + if FLAGS.use_ipv6: + s += [('ip6tables', self.ipv6)] + + for cmd, tables in s: + for table in tables: + current_table, _ = self.execute('sudo', + '%s-save' % (cmd,), + '-t', '%s' % (table,), + attempts=5) + current_lines = current_table.split('\n') + new_filter = self._modify_rules(current_lines, + tables[table]) + self.execute('sudo', '%s-restore' % (cmd,), + process_input='\n'.join(new_filter), + attempts=5) + + def _modify_rules(self, current_lines, table, binary=None): + unwrapped_chains = table.unwrapped_chains + chains = table.chains + rules = table.rules + + # Remove any trace of our rules + new_filter = filter(lambda line: binary_name not in line, + current_lines) + + seen_chains = False + rules_index = 0 + for rules_index, rule in enumerate(new_filter): + if not seen_chains: + if rule.startswith(':'): + seen_chains = True + else: + if not rule.startswith(':'): + break + + our_rules = [] + for rule in rules: + rule_str = str(rule) + if rule.top: + # rule.top == True means we want this rule to be at the top. + # Further down, we weed out duplicates from the bottom of the + # list, so here we remove the dupes ahead of time. + new_filter = filter(lambda s: s.strip() != rule_str.strip(), + new_filter) + our_rules += [rule_str] + + new_filter[rules_index:rules_index] = our_rules + + new_filter[rules_index:rules_index] = [':%s - [0:0]' % \ + (name,) \ + for name in unwrapped_chains] + new_filter[rules_index:rules_index] = [':%s-%s - [0:0]' % \ + (binary_name, name,) \ + for name in chains] + + seen_lines = set() + + def _weed_out_duplicates(line): + line = line.strip() + if line in seen_lines: + return False + else: + seen_lines.add(line) + return True + + # We filter duplicates, letting the *last* occurrence take + # precendence. + new_filter.reverse() + new_filter = filter(_weed_out_duplicates, new_filter) + new_filter.reverse() + return new_filter + + +iptables_manager = IptablesManager() + + def metadata_forward(): """Create forwarding rule for metadata""" - _confirm_rule("PREROUTING", "-t nat -s 0.0.0.0/0 " - "-d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j DNAT " - "--to-destination %s:%s" % (FLAGS.ec2_dmz_host, FLAGS.ec2_port)) + iptables_manager.ipv4['nat'].add_rule("PREROUTING", + "-s 0.0.0.0/0 -d 169.254.169.254/32 " + "-p tcp -m tcp --dport 80 -j DNAT " + "--to-destination %s:%s" % \ + (FLAGS.ec2_dmz_host, FLAGS.ec2_port)) + iptables_manager.apply() def init_host(): """Basic networking setup goes here""" - - if FLAGS.use_nova_chains: - _execute("sudo iptables -N nova_input", check_exit_code=False) - _execute("sudo iptables -D %s -j nova_input" % FLAGS.input_chain, - check_exit_code=False) - _execute("sudo iptables -A %s -j nova_input" % FLAGS.input_chain) - - _execute("sudo iptables -N nova_forward", check_exit_code=False) - _execute("sudo iptables -D FORWARD -j nova_forward", - check_exit_code=False) - _execute("sudo iptables -A FORWARD -j nova_forward") - - _execute("sudo iptables -N nova_output", check_exit_code=False) - _execute("sudo iptables -D OUTPUT -j nova_output", - check_exit_code=False) - _execute("sudo iptables -A OUTPUT -j nova_output") - - _execute("sudo iptables -t nat -N nova_prerouting", - check_exit_code=False) - _execute("sudo iptables -t nat -D PREROUTING -j nova_prerouting", - check_exit_code=False) - _execute("sudo iptables -t nat -A PREROUTING -j nova_prerouting") - - _execute("sudo iptables -t nat -N nova_postrouting", - check_exit_code=False) - _execute("sudo iptables -t nat -D POSTROUTING -j nova_postrouting", - check_exit_code=False) - _execute("sudo iptables -t nat -A POSTROUTING -j nova_postrouting") - - _execute("sudo iptables -t nat -N nova_snatting", - check_exit_code=False) - _execute("sudo iptables -t nat -D POSTROUTING -j nova_snatting", - check_exit_code=False) - _execute("sudo iptables -t nat -A POSTROUTING -j nova_snatting") - - _execute("sudo iptables -t nat -N nova_output", check_exit_code=False) - _execute("sudo iptables -t nat -D OUTPUT -j nova_output", - check_exit_code=False) - _execute("sudo iptables -t nat -A OUTPUT -j nova_output") - else: - # NOTE(vish): This makes it easy to ensure snatting rules always - # come after the accept rules in the postrouting chain - _execute("sudo iptables -t nat -N SNATTING", - check_exit_code=False) - _execute("sudo iptables -t nat -D POSTROUTING -j SNATTING", - check_exit_code=False) - _execute("sudo iptables -t nat -A POSTROUTING -j SNATTING") - # NOTE(devcamcar): Cloud public SNAT entries and the default # SNAT rule for outbound traffic. - _confirm_rule("SNATTING", "-t nat -s %s " - "-j SNAT --to-source %s" - % (FLAGS.fixed_range, FLAGS.routing_source_ip), append=True) + iptables_manager.ipv4['nat'].add_rule("snat", + "-s %s -j SNAT --to-source %s" % \ + (FLAGS.fixed_range, + FLAGS.routing_source_ip)) + + iptables_manager.ipv4['nat'].add_rule("POSTROUTING", + "-s %s -d %s -j ACCEPT" % \ + (FLAGS.fixed_range, FLAGS.dmz_cidr)) - _confirm_rule("POSTROUTING", "-t nat -s %s -d %s -j ACCEPT" % - (FLAGS.fixed_range, FLAGS.dmz_cidr)) - _confirm_rule("POSTROUTING", "-t nat -s %(range)s -d %(range)s -j ACCEPT" % - {'range': FLAGS.fixed_range}) + iptables_manager.ipv4['nat'].add_rule("POSTROUTING", + "-s %(range)s -d %(range)s " + "-j ACCEPT" % \ + {'range': FLAGS.fixed_range}) + iptables_manager.apply() def bind_floating_ip(floating_ip, check_exit_code=True): """Bind ip to public interface""" - _execute("sudo ip addr add %s dev %s" % (floating_ip, - FLAGS.public_interface), + _execute('sudo', 'ip', 'addr', 'add', floating_ip, + 'dev', FLAGS.public_interface, check_exit_code=check_exit_code) def unbind_floating_ip(floating_ip): """Unbind a public ip from public interface""" - _execute("sudo ip addr del %s dev %s" % (floating_ip, - FLAGS.public_interface)) + _execute('sudo', 'ip', 'addr', 'del', floating_ip, + 'dev', FLAGS.public_interface) def ensure_vlan_forward(public_ip, port, private_ip): """Sets up forwarding rules for vlan""" - _confirm_rule("FORWARD", "-d %s -p udp --dport 1194 -j ACCEPT" % - private_ip) - _confirm_rule("PREROUTING", - "-t nat -d %s -p udp --dport %s -j DNAT --to %s:1194" - % (public_ip, port, private_ip)) + iptables_manager.ipv4['filter'].add_rule("FORWARD", + "-d %s -p udp " + "--dport 1194 " + "-j ACCEPT" % private_ip) + iptables_manager.ipv4['nat'].add_rule("PREROUTING", + "-d %s -p udp " + "--dport %s -j DNAT --to %s:1194" % + (public_ip, port, private_ip)) + iptables_manager.apply() def ensure_floating_forward(floating_ip, fixed_ip): """Ensure floating ip forwarding rule""" - _confirm_rule("PREROUTING", "-t nat -d %s -j DNAT --to %s" - % (floating_ip, fixed_ip)) - _confirm_rule("SNATTING", "-t nat -s %s -j SNAT --to %s" - % (fixed_ip, floating_ip)) + for chain, rule in floating_forward_rules(floating_ip, fixed_ip): + iptables_manager.ipv4['nat'].add_rule(chain, rule) + iptables_manager.apply() def remove_floating_forward(floating_ip, fixed_ip): """Remove forwarding for floating ip""" - _remove_rule("PREROUTING", "-t nat -d %s -j DNAT --to %s" - % (floating_ip, fixed_ip)) - _remove_rule("SNATTING", "-t nat -s %s -j SNAT --to %s" - % (fixed_ip, floating_ip)) + for chain, rule in floating_forward_rules(floating_ip, fixed_ip): + iptables_manager.ipv4['nat'].remove_rule(chain, rule) + iptables_manager.apply() + + +def floating_forward_rules(floating_ip, fixed_ip): + return [("PREROUTING", "-d %s -j DNAT --to %s" % (floating_ip, fixed_ip)), + ("OUTPUT", "-d %s -j DNAT --to %s" % (floating_ip, fixed_ip)), + ("floating-snat", + "-s %s -j SNAT --to %s" % (fixed_ip, floating_ip))] def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None): @@ -178,47 +449,90 @@ def ensure_vlan(vlan_num): interface = "vlan%s" % vlan_num if not _device_exists(interface): LOG.debug(_("Starting VLAN inteface %s"), interface) - _execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD") - _execute("sudo vconfig add %s %s" % (FLAGS.vlan_interface, vlan_num)) - _execute("sudo ifconfig %s up" % interface) + _execute('sudo', 'vconfig', 'set_name_type', 'VLAN_PLUS_VID_NO_PAD') + _execute('sudo', 'vconfig', 'add', FLAGS.vlan_interface, vlan_num) + _execute('sudo', 'ip', 'link', 'set', interface, 'up') return interface def ensure_bridge(bridge, interface, net_attrs=None): - """Create a bridge unless it already exists""" + """Create a bridge unless it already exists. + + :param interface: the interface to create the bridge on. + :param net_attrs: dictionary with attributes used to create the bridge. + + If net_attrs is set, it will add the net_attrs['gateway'] to the bridge + using net_attrs['broadcast'] and net_attrs['cidr']. It will also add + the ip_v6 address specified in net_attrs['cidr_v6'] if use_ipv6 is set. + + The code will attempt to move any ips that already exist on the interface + onto the bridge and reset the default gateway if necessary. + """ if not _device_exists(bridge): LOG.debug(_("Starting Bridge interface for %s"), interface) - _execute("sudo brctl addbr %s" % bridge) - _execute("sudo brctl setfd %s 0" % bridge) + _execute('sudo', 'brctl', 'addbr', bridge) + _execute('sudo', 'brctl', 'setfd', bridge, 0) # _execute("sudo brctl setageing %s 10" % bridge) - _execute("sudo brctl stp %s off" % bridge) - if interface: - _execute("sudo brctl addif %s %s" % (bridge, interface)) + _execute('sudo', 'brctl', 'stp', bridge, 'off') + _execute('sudo', 'ip', 'link', 'set', bridge, 'up') if net_attrs: - _execute("sudo ifconfig %s %s broadcast %s netmask %s up" % \ - (bridge, - net_attrs['gateway'], - net_attrs['broadcast'], - net_attrs['netmask'])) + # NOTE(vish): The ip for dnsmasq has to be the first address on the + # bridge for it to respond to reqests properly + suffix = net_attrs['cidr'].rpartition('/')[2] + out, err = _execute('sudo', 'ip', 'addr', 'add', + "%s/%s" % + (net_attrs['gateway'], suffix), + 'brd', + net_attrs['broadcast'], + 'dev', + bridge, + check_exit_code=False) + if err and err != "RTNETLINK answers: File exists\n": + raise exception.Error("Failed to add ip: %s" % err) if(FLAGS.use_ipv6): - _execute("sudo ip -f inet6 addr change %s dev %s" % - (net_attrs['cidr_v6'], bridge)) - _execute("sudo ifconfig %s up" % bridge) - else: - _execute("sudo ifconfig %s up" % bridge) - if FLAGS.use_nova_chains: - (out, err) = _execute("sudo iptables -N nova_forward", - check_exit_code=False) - if err != 'iptables: Chain already exists.\n': - # NOTE(vish): chain didn't exist link chain - _execute("sudo iptables -D FORWARD -j nova_forward", - check_exit_code=False) - _execute("sudo iptables -A FORWARD -j nova_forward") - - _confirm_rule("FORWARD", "--in-interface %s -j ACCEPT" % bridge) - _confirm_rule("FORWARD", "--out-interface %s -j ACCEPT" % bridge) - _execute("sudo iptables -N nova-local", check_exit_code=False) - _confirm_rule("FORWARD", "-j nova-local") + _execute('sudo', 'ip', '-f', 'inet6', 'addr', + 'change', net_attrs['cidr_v6'], + 'dev', bridge) + # NOTE(vish): If the public interface is the same as the + # bridge, then the bridge has to be in promiscuous + # to forward packets properly. + if(FLAGS.public_interface == bridge): + _execute('sudo', 'ip', 'link', 'set', + 'dev', bridge, 'promisc', 'on') + if interface: + # NOTE(vish): This will break if there is already an ip on the + # interface, so we move any ips to the bridge + gateway = None + out, err = _execute('sudo', 'route', '-n') + for line in out.split("\n"): + fields = line.split() + if fields and fields[0] == "0.0.0.0" and fields[-1] == interface: + gateway = fields[1] + out, err = _execute('sudo', 'ip', 'addr', 'show', 'dev', interface, + 'scope', 'global') + for line in out.split("\n"): + fields = line.split() + if fields and fields[0] == "inet": + params = ' '.join(fields[1:-1]) + _execute('sudo', 'ip', 'addr', + 'del', params, 'dev', fields[-1]) + _execute('sudo', 'ip', 'addr', + 'add', params, 'dev', bridge) + if gateway: + _execute('sudo', 'route', 'add', '0.0.0.0', 'gw', gateway) + out, err = _execute('sudo', 'brctl', 'addif', bridge, interface, + check_exit_code=False) + + if (err and err != "device %s is already a member of a bridge; can't " + "enslave it to bridge %s.\n" % (interface, bridge)): + raise exception.Error("Failed to add interface: %s" % err) + + iptables_manager.ipv4['filter'].add_rule("FORWARD", + "--in-interface %s -j ACCEPT" % \ + bridge) + iptables_manager.ipv4['filter'].add_rule("FORWARD", + "--out-interface %s -j ACCEPT" % \ + bridge) def get_dhcp_hosts(context, network_id): @@ -252,11 +566,11 @@ def update_dhcp(context, network_id): # if dnsmasq is already running, then tell it to reload if pid: - out, _err = _execute('cat /proc/%d/cmdline' % pid, + out, _err = _execute('cat', "/proc/%d/cmdline" % pid, check_exit_code=False) if conffile in out: try: - _execute('sudo kill -HUP %d' % pid) + _execute('sudo', 'kill', '-HUP', pid) return except Exception as exc: # pylint: disable-msg=W0703 LOG.debug(_("Hupping dnsmasq threw %s"), exc) @@ -267,7 +581,7 @@ def update_dhcp(context, network_id): env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile, 'DNSMASQ_INTERFACE': network_ref['bridge']} command = _dnsmasq_cmd(network_ref) - _execute(command, addl_env=env) + _execute(*command, addl_env=env) def update_ra(context, network_id): @@ -297,17 +611,17 @@ interface %s # if radvd is already running, then tell it to reload if pid: - out, _err = _execute('cat /proc/%d/cmdline' + out, _err = _execute('cat', '/proc/%d/cmdline' % pid, check_exit_code=False) if conffile in out: try: - _execute('sudo kill %d' % pid) + _execute('sudo', 'kill', pid) except Exception as exc: # pylint: disable-msg=W0703 LOG.debug(_("killing radvd threw %s"), exc) else: LOG.debug(_("Pid %d is stale, relaunching radvd"), pid) command = _ra_cmd(network_ref) - _execute(command) + _execute(*command) db.network_update(context, network_id, {"ra_server": utils.get_my_linklocal(network_ref['bridge'])}) @@ -322,67 +636,48 @@ def _host_dhcp(fixed_ip_ref): fixed_ip_ref['address']) -def _execute(cmd, *args, **kwargs): +def _execute(*cmd, **kwargs): """Wrapper around utils._execute for fake_network""" if FLAGS.fake_network: - LOG.debug("FAKE NET: %s", cmd) + LOG.debug("FAKE NET: %s", " ".join(map(str, cmd))) return "fake", 0 else: - return utils.execute(cmd, *args, **kwargs) + return utils.execute(*cmd, **kwargs) def _device_exists(device): """Check if ethernet device exists""" - (_out, err) = _execute("ifconfig %s" % device, check_exit_code=False) + (_out, err) = _execute('ip', 'link', 'show', 'dev', device, + check_exit_code=False) return not err -def _confirm_rule(chain, cmd, append=False): - """Delete and re-add iptables rule""" - if FLAGS.use_nova_chains: - chain = "nova_%s" % chain.lower() - if append: - loc = "-A" - else: - loc = "-I" - _execute("sudo iptables --delete %s %s" % (chain, cmd), - check_exit_code=False) - _execute("sudo iptables %s %s %s" % (loc, chain, cmd)) - - -def _remove_rule(chain, cmd): - """Remove iptables rule""" - if FLAGS.use_nova_chains: - chain = "%s" % chain.lower() - _execute("sudo iptables --delete %s %s" % (chain, cmd)) - - def _dnsmasq_cmd(net): """Builds dnsmasq command""" - cmd = ['sudo -E dnsmasq', - ' --strict-order', - ' --bind-interfaces', - ' --conf-file=', - ' --domain=%s' % FLAGS.dhcp_domain, - ' --pid-file=%s' % _dhcp_file(net['bridge'], 'pid'), - ' --listen-address=%s' % net['gateway'], - ' --except-interface=lo', - ' --dhcp-range=%s,static,120s' % net['dhcp_start'], - ' --dhcp-hostsfile=%s' % _dhcp_file(net['bridge'], 'conf'), - ' --dhcp-script=%s' % FLAGS.dhcpbridge, - ' --leasefile-ro'] + cmd = ['sudo', '-E', 'dnsmasq', + '--strict-order', + '--bind-interfaces', + '--conf-file=', + '--domain=%s' % FLAGS.dhcp_domain, + '--pid-file=%s' % _dhcp_file(net['bridge'], 'pid'), + '--listen-address=%s' % net['gateway'], + '--except-interface=lo', + '--dhcp-range=%s,static,120s' % net['dhcp_start'], + '--dhcp-hostsfile=%s' % _dhcp_file(net['bridge'], 'conf'), + '--dhcp-script=%s' % FLAGS.dhcpbridge, + '--leasefile-ro'] if FLAGS.dns_server: - cmd.append(' -h -R --server=%s' % FLAGS.dns_server) - return ''.join(cmd) + cmd += ['-h', '-R', '--server=%s' % FLAGS.dns_server] + return cmd def _ra_cmd(net): """Builds radvd command""" - cmd = ['sudo -E radvd', -# ' -u nobody', - ' -C %s' % _ra_file(net['bridge'], 'conf'), - ' -p %s' % _ra_file(net['bridge'], 'pid')] - return ''.join(cmd) + cmd = ['sudo', '-E', 'radvd', +# '-u', 'nobody', + '-C', '%s' % _ra_file(net['bridge'], 'conf'), + '-p', '%s' % _ra_file(net['bridge'], 'pid')] + return cmd def _stop_dnsmasq(network): @@ -391,7 +686,7 @@ def _stop_dnsmasq(network): if pid: try: - _execute('sudo kill -TERM %d' % pid) + _execute('sudo', 'kill', '-TERM', pid) except Exception as exc: # pylint: disable-msg=W0703 LOG.debug(_("Killing dnsmasq threw %s"), exc) diff --git a/nova/network/manager.py b/nova/network/manager.py index fbcbea131..3dfc48934 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -110,6 +110,7 @@ class NetworkManager(manager.Manager): This class must be subclassed to support specific topologies. """ + timeout_fixed_ips = True def __init__(self, network_driver=None, *args, **kwargs): if not network_driver: @@ -118,6 +119,10 @@ class NetworkManager(manager.Manager): super(NetworkManager, self).__init__(*args, **kwargs) def init_host(self): + """Do any initialization that needs to be run if this is a + standalone service. + """ + self.driver.init_host() # Set up networking for the projects for which we're already # the designated network host. ctxt = context.get_admin_context() @@ -134,6 +139,19 @@ class NetworkManager(manager.Manager): self.driver.ensure_floating_forward(floating_ip['address'], fixed_address) + def periodic_tasks(self, context=None): + """Tasks to be run at a periodic interval.""" + super(NetworkManager, self).periodic_tasks(context) + if self.timeout_fixed_ips: + now = utils.utcnow() + timeout = FLAGS.fixed_ip_disassociate_timeout + time = now - datetime.timedelta(seconds=timeout) + num = self.db.fixed_ip_disassociate_all_by_timeout(context, + self.host, + time) + if num: + LOG.debug(_("Dissassociated %s stale fixed ip(s)"), num) + def set_network_host(self, context, network_id): """Safely sets the host of the network.""" LOG.debug(_("setting network host"), context=context) @@ -145,11 +163,22 @@ class NetworkManager(manager.Manager): def allocate_fixed_ip(self, context, instance_id, *args, **kwargs): """Gets a fixed ip from the pool.""" - raise NotImplementedError() + # TODO(vish): when this is called by compute, we can associate compute + # with a network, or a cluster of computes with a network + # and use that network here with a method like + # network_get_by_compute_host + network_ref = self.db.network_get_by_bridge(context, + FLAGS.flat_network_bridge) + address = self.db.fixed_ip_associate_pool(context.elevated(), + network_ref['id'], + instance_id) + self.db.fixed_ip_update(context, address, {'allocated': True}) + return address def deallocate_fixed_ip(self, context, address, *args, **kwargs): """Returns a fixed ip to the pool.""" - raise NotImplementedError() + self.db.fixed_ip_update(context, address, {'allocated': False}) + self.db.fixed_ip_disassociate(context.elevated(), address) def setup_fixed_ip(self, context, address): """Sets up rules for fixed ip.""" @@ -239,12 +268,58 @@ class NetworkManager(manager.Manager): def get_network_host(self, context): """Get the network host for the current context.""" - raise NotImplementedError() + network_ref = self.db.network_get_by_bridge(context, + FLAGS.flat_network_bridge) + # NOTE(vish): If the network has no host, use the network_host flag. + # This could eventually be a a db lookup of some sort, but + # a flag is easy to handle for now. + host = network_ref['host'] + if not host: + topic = self.db.queue_get_for(context, + FLAGS.network_topic, + FLAGS.network_host) + if FLAGS.fake_call: + return self.set_network_host(context, network_ref['id']) + host = rpc.call(context, + FLAGS.network_topic, + {"method": "set_network_host", + "args": {"network_id": network_ref['id']}}) + return host def create_networks(self, context, cidr, num_networks, network_size, - cidr_v6, *args, **kwargs): + cidr_v6, label, *args, **kwargs): """Create networks based on parameters.""" - raise NotImplementedError() + fixed_net = IPy.IP(cidr) + fixed_net_v6 = IPy.IP(cidr_v6) + significant_bits_v6 = 64 + count = 1 + for index in range(num_networks): + start = index * network_size + significant_bits = 32 - int(math.log(network_size, 2)) + cidr = "%s/%s" % (fixed_net[start], significant_bits) + project_net = IPy.IP(cidr) + net = {} + net['bridge'] = FLAGS.flat_network_bridge + net['dns'] = FLAGS.flat_network_dns + net['cidr'] = cidr + net['netmask'] = str(project_net.netmask()) + net['gateway'] = str(project_net[1]) + net['broadcast'] = str(project_net.broadcast()) + net['dhcp_start'] = str(project_net[2]) + if num_networks > 1: + net['label'] = "%s_%d" % (label, count) + else: + net['label'] = label + count += 1 + + if(FLAGS.use_ipv6): + cidr_v6 = "%s/%s" % (fixed_net_v6[0], significant_bits_v6) + net['cidr_v6'] = cidr_v6 + + network_ref = self.db.network_create_safe(context, net) + + if network_ref: + self._create_fixed_ips(context, network_ref['id']) @property def _bottom_reserved_ips(self): # pylint: disable-msg=R0201 @@ -302,78 +377,22 @@ class FlatManager(NetworkManager): not do any setup in this mode, it must be done manually. Requests to 169.254.169.254 port 80 will need to be forwarded to the api server. """ + timeout_fixed_ips = False - def allocate_fixed_ip(self, context, instance_id, *args, **kwargs): - """Gets a fixed ip from the pool.""" - # TODO(vish): when this is called by compute, we can associate compute - # with a network, or a cluster of computes with a network - # and use that network here with a method like - # network_get_by_compute_host - network_ref = self.db.network_get_by_bridge(context, - FLAGS.flat_network_bridge) - address = self.db.fixed_ip_associate_pool(context.elevated(), - network_ref['id'], - instance_id) - self.db.fixed_ip_update(context, address, {'allocated': True}) - return address - - def deallocate_fixed_ip(self, context, address, *args, **kwargs): - """Returns a fixed ip to the pool.""" - self.db.fixed_ip_update(context, address, {'allocated': False}) - self.db.fixed_ip_disassociate(context.elevated(), address) + def init_host(self): + """Do any initialization that needs to be run if this is a + standalone service. + """ + #Fix for bug 723298 - do not call init_host on superclass + #Following code has been copied for NetworkManager.init_host + ctxt = context.get_admin_context() + for network in self.db.host_get_networks(ctxt, self.host): + self._on_set_network_host(ctxt, network['id']) def setup_compute_network(self, context, instance_id): """Network is created manually.""" pass - def create_networks(self, context, cidr, num_networks, network_size, - cidr_v6, *args, **kwargs): - """Create networks based on parameters.""" - fixed_net = IPy.IP(cidr) - fixed_net_v6 = IPy.IP(cidr_v6) - significant_bits_v6 = 64 - for index in range(num_networks): - start = index * network_size - significant_bits = 32 - int(math.log(network_size, 2)) - cidr = "%s/%s" % (fixed_net[start], significant_bits) - project_net = IPy.IP(cidr) - net = {} - net['bridge'] = FLAGS.flat_network_bridge - net['cidr'] = cidr - net['netmask'] = str(project_net.netmask()) - net['gateway'] = str(project_net[1]) - net['broadcast'] = str(project_net.broadcast()) - net['dhcp_start'] = str(project_net[2]) - - if(FLAGS.use_ipv6): - cidr_v6 = "%s/%s" % (fixed_net_v6[0], significant_bits_v6) - net['cidr_v6'] = cidr_v6 - - network_ref = self.db.network_create_safe(context, net) - - if network_ref: - self._create_fixed_ips(context, network_ref['id']) - - def get_network_host(self, context): - """Get the network host for the current context.""" - network_ref = self.db.network_get_by_bridge(context, - FLAGS.flat_network_bridge) - # NOTE(vish): If the network has no host, use the network_host flag. - # This could eventually be a a db lookup of some sort, but - # a flag is easy to handle for now. - host = network_ref['host'] - if not host: - topic = self.db.queue_get_for(context, - FLAGS.network_topic, - FLAGS.network_host) - if FLAGS.fake_call: - return self.set_network_host(context, network_ref['id']) - host = rpc.call(context, - FLAGS.network_topic, - {"method": "set_network_host", - "args": {"network_id": network_ref['id']}}) - return host - def _on_set_network_host(self, context, network_id): """Called when this host becomes the host for a network.""" net = {} @@ -381,8 +400,24 @@ class FlatManager(NetworkManager): net['dns'] = FLAGS.flat_network_dns self.db.network_update(context, network_id, net) + def allocate_floating_ip(self, context, project_id): + #Fix for bug 723298 + raise NotImplementedError() + + def associate_floating_ip(self, context, floating_address, fixed_address): + #Fix for bug 723298 + raise NotImplementedError() + + def disassociate_floating_ip(self, context, floating_address): + #Fix for bug 723298 + raise NotImplementedError() + + def deallocate_floating_ip(self, context, floating_address): + #Fix for bug 723298 + raise NotImplementedError() + -class FlatDHCPManager(FlatManager): +class FlatDHCPManager(NetworkManager): """Flat networking with dhcp. FlatDHCPManager will start up one dhcp server to give out addresses. @@ -395,7 +430,6 @@ class FlatDHCPManager(FlatManager): standalone service. """ super(FlatDHCPManager, self).init_host() - self.driver.init_host() self.driver.metadata_forward() def setup_compute_network(self, context, instance_id): @@ -448,24 +482,11 @@ class VlanManager(NetworkManager): instances in its subnet. """ - def periodic_tasks(self, context=None): - """Tasks to be run at a periodic interval.""" - super(VlanManager, self).periodic_tasks(context) - now = datetime.datetime.utcnow() - timeout = FLAGS.fixed_ip_disassociate_timeout - time = now - datetime.timedelta(seconds=timeout) - num = self.db.fixed_ip_disassociate_all_by_timeout(context, - self.host, - time) - if num: - LOG.debug(_("Dissassociated %s stale fixed ip(s)"), num) - def init_host(self): """Do any initialization that needs to be run if this is a standalone service. """ super(VlanManager, self).init_host() - self.driver.init_host() self.driver.metadata_forward() def allocate_fixed_ip(self, context, instance_id, *args, **kwargs): @@ -501,9 +522,20 @@ class VlanManager(NetworkManager): network_ref['bridge']) def create_networks(self, context, cidr, num_networks, network_size, - cidr_v6, vlan_start, vpn_start): + cidr_v6, vlan_start, vpn_start, **kwargs): """Create networks based on parameters.""" + # Check that num_networks + vlan_start is not > 4094, fixes lp708025 + if num_networks + vlan_start > 4094: + raise ValueError(_('The sum between the number of networks and' + ' the vlan start cannot be greater' + ' than 4094')) + fixed_net = IPy.IP(cidr) + if fixed_net.len() < num_networks * network_size: + raise ValueError(_('The network range is not big enough to fit ' + '%(num_networks)s. Network size is %(network_size)s' % + locals())) + fixed_net_v6 = IPy.IP(cidr_v6) network_size_v6 = 1 << 64 significant_bits_v6 = 64 @@ -531,6 +563,16 @@ class VlanManager(NetworkManager): # NOTE(vish): This makes ports unique accross the cloud, a more # robust solution would be to make them unique per ip net['vpn_public_port'] = vpn_start + index + network_ref = None + try: + network_ref = db.network_get_by_cidr(context, cidr) + except exception.NotFound: + pass + + if network_ref is not None: + raise ValueError(_('Network with cidr %s already exists' % + cidr)) + network_ref = self.db.network_create_safe(context, net) if network_ref: self._create_fixed_ips(context, network_ref['id']) diff --git a/nova/objectstore/bucket.py b/nova/objectstore/bucket.py index 82767e52f..b213e18e8 100644 --- a/nova/objectstore/bucket.py +++ b/nova/objectstore/bucket.py @@ -107,7 +107,7 @@ class Bucket(object): def is_authorized(self, context): try: - return context.user.is_admin() or \ + return context.is_admin or \ self.owner_id == context.project_id except Exception, e: return False diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py index 41e0abd80..c90b5b54b 100644 --- a/nova/objectstore/image.py +++ b/nova/objectstore/image.py @@ -37,8 +37,7 @@ from nova.objectstore import bucket FLAGS = flags.FLAGS -flags.DEFINE_string('images_path', '$state_path/images', - 'path to decrypted images') +flags.DECLARE('images_path', 'nova.image.local') class Image(object): @@ -69,7 +68,7 @@ class Image(object): # but only modified by admin or owner. try: return (self.metadata['isPublic'] and readonly) or \ - context.user.is_admin() or \ + context.is_admin or \ self.metadata['imageOwnerId'] == context.project_id except: return False @@ -254,25 +253,34 @@ class Image(object): @staticmethod def decrypt_image(encrypted_filename, encrypted_key, encrypted_iv, cloud_private_key, decrypted_filename): - key, err = utils.execute( - 'openssl rsautl -decrypt -inkey %s' % cloud_private_key, - process_input=encrypted_key, - check_exit_code=False) + key, err = utils.execute('openssl', + 'rsautl', + '-decrypt', + '-inkey', '%s' % cloud_private_key, + process_input=encrypted_key, + check_exit_code=False) if err: raise exception.Error(_("Failed to decrypt private key: %s") % err) - iv, err = utils.execute( - 'openssl rsautl -decrypt -inkey %s' % cloud_private_key, - process_input=encrypted_iv, - check_exit_code=False) + iv, err = utils.execute('openssl', + 'rsautl', + '-decrypt', + '-inkey', '%s' % cloud_private_key, + process_input=encrypted_iv, + check_exit_code=False) if err: raise exception.Error(_("Failed to decrypt initialization " "vector: %s") % err) - _out, err = utils.execute( - 'openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s' - % (encrypted_filename, key, iv, decrypted_filename), - check_exit_code=False) + _out, err = utils.execute('openssl', + 'enc', + '-d', + '-aes-128-cbc', + '-in', '%s' % (encrypted_filename,), + '-K', '%s' % (key,), + '-iv', '%s' % (iv,), + '-out', '%s' % (decrypted_filename,), + check_exit_code=False) if err: raise exception.Error(_("Failed to decrypt image file " "%(image_file)s: %(err)s") % diff --git a/nova/quota.py b/nova/quota.py index 3884eb308..6b52a97fa 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -35,6 +35,8 @@ flags.DEFINE_integer('quota_gigabytes', 1000, 'number of volume gigabytes allowed per project') flags.DEFINE_integer('quota_floating_ips', 10, 'number of floating ips allowed per project') +flags.DEFINE_integer('quota_metadata_items', 128, + 'number of metadata items allowed per instance') def get_quota(context, project_id): @@ -42,7 +44,8 @@ def get_quota(context, project_id): 'cores': FLAGS.quota_cores, 'volumes': FLAGS.quota_volumes, 'gigabytes': FLAGS.quota_gigabytes, - 'floating_ips': FLAGS.quota_floating_ips} + 'floating_ips': FLAGS.quota_floating_ips, + 'metadata_items': FLAGS.quota_metadata_items} try: quota = db.quota_get(context, project_id) for key in rval.keys(): @@ -94,6 +97,15 @@ def allowed_floating_ips(context, num_floating_ips): return min(num_floating_ips, allowed_floating_ips) +def allowed_metadata_items(context, num_metadata_items): + """Check quota; return min(num_metadata_items,allowed_metadata_items)""" + project_id = context.project_id + context = context.elevated() + quota = get_quota(context, project_id) + num_allowed_metadata_items = quota['metadata_items'] + return min(num_metadata_items, num_allowed_metadata_items) + + class QuotaError(exception.ApiError): """Quota Exceeeded""" pass diff --git a/nova/rpc.py b/nova/rpc.py index 01fc6d44b..fbb90299b 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -29,6 +29,7 @@ import uuid from carrot import connection as carrot_connection from carrot import messaging +from eventlet import greenpool from eventlet import greenthread from nova import context @@ -42,11 +43,13 @@ from nova import utils FLAGS = flags.FLAGS LOG = logging.getLogger('nova.rpc') +flags.DEFINE_integer('rpc_thread_pool_size', 1024, 'Size of RPC thread pool') + class Connection(carrot_connection.BrokerConnection): """Connection instance object""" @classmethod - def instance(cls, new=False): + def instance(cls, new=True): """Returns the instance""" if new or not hasattr(cls, '_instance'): params = dict(hostname=FLAGS.rabbit_host, @@ -88,18 +91,19 @@ class Consumer(messaging.Consumer): super(Consumer, self).__init__(*args, **kwargs) self.failed_connection = False break - except: # Catching all because carrot sucks + except Exception as e: # Catching all because carrot sucks fl_host = FLAGS.rabbit_host fl_port = FLAGS.rabbit_port fl_intv = FLAGS.rabbit_retry_interval - LOG.exception(_("AMQP server on %(fl_host)s:%(fl_port)d is" - " unreachable. Trying again in %(fl_intv)d seconds.") + LOG.error(_("AMQP server on %(fl_host)s:%(fl_port)d is" + " unreachable: %(e)s. Trying again in %(fl_intv)d" + " seconds.") % locals()) self.failed_connection = True if self.failed_connection: - LOG.exception(_("Unable to connect to AMQP server " - "after %d tries. Shutting down."), - FLAGS.rabbit_max_retries) + LOG.error(_("Unable to connect to AMQP server " + "after %d tries. Shutting down."), + FLAGS.rabbit_max_retries) sys.exit(1) def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): @@ -119,7 +123,7 @@ class Consumer(messaging.Consumer): LOG.error(_("Reconnected to queue")) self.failed_connection = False # NOTE(vish): This is catching all errors because we really don't - # exceptions to be logged 10 times a second if some + # want exceptions to be logged 10 times a second if some # persistent failure occurs. except Exception: # pylint: disable-msg=W0703 if not self.failed_connection: @@ -155,11 +159,15 @@ class AdapterConsumer(TopicConsumer): def __init__(self, connection=None, topic="broadcast", proxy=None): LOG.debug(_('Initing the Adapter Consumer for %s') % topic) self.proxy = proxy + self.pool = greenpool.GreenPool(FLAGS.rpc_thread_pool_size) super(AdapterConsumer, self).__init__(connection=connection, topic=topic) + def receive(self, *args, **kwargs): + self.pool.spawn_n(self._receive, *args, **kwargs) + @exception.wrap_exception - def receive(self, message_data, message): + def _receive(self, message_data, message): """Magically looks for a method on the proxy object and calls it Message data should be a dictionary with two keys: @@ -246,7 +254,7 @@ def msg_reply(msg_id, reply=None, failure=None): LOG.error(_("Returning exception %s to caller"), message) LOG.error(tb) failure = (failure[0].__name__, str(failure[1]), tb) - conn = Connection.instance(True) + conn = Connection.instance() publisher = DirectPublisher(connection=conn, msg_id=msg_id) try: publisher.send({'result': reply, 'failure': failure}) @@ -319,7 +327,7 @@ def call(context, topic, msg): self.result = data['result'] wait_msg = WaitMessage() - conn = Connection.instance(True) + conn = Connection.instance() consumer = DirectConsumer(connection=conn, msg_id=msg_id) consumer.register_callback(wait_msg) diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py new file mode 100644 index 000000000..2405f1343 --- /dev/null +++ b/nova/scheduler/api.py @@ -0,0 +1,49 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles all requests relating to schedulers. +""" + +from nova import flags +from nova import log as logging +from nova import rpc + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.scheduler.api') + + +class API(object): + """API for interacting with the scheduler.""" + + def _call_scheduler(self, method, context, params=None): + """Generic handler for RPC calls to the scheduler. + + :param params: Optional dictionary of arguments to be passed to the + scheduler worker + + :retval: Result returned by scheduler worker + """ + if not params: + params = {} + queue = FLAGS.scheduler_topic + kwargs = {'method': method, 'args': params} + return rpc.call(context, queue, kwargs) + + def get_zone_list(self, context): + items = self._call_scheduler('get_zone_list', context) + for item in items: + item['api_url'] = item['api_url'].replace('\\/', '/') + return items diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index e9b47512e..c94397210 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -29,6 +29,7 @@ from nova import log as logging from nova import manager from nova import rpc from nova import utils +from nova.scheduler import zone_manager LOG = logging.getLogger('nova.scheduler.manager') FLAGS = flags.FLAGS @@ -43,12 +44,21 @@ class SchedulerManager(manager.Manager): if not scheduler_driver: scheduler_driver = FLAGS.scheduler_driver self.driver = utils.import_object(scheduler_driver) + self.zone_manager = zone_manager.ZoneManager() super(SchedulerManager, self).__init__(*args, **kwargs) def __getattr__(self, key): """Converts all method calls to use the schedule method""" return functools.partial(self._schedule, key) + def periodic_tasks(self, context=None): + """Poll child zones periodically to get status.""" + self.zone_manager.ping(context) + + def get_zone_list(self, context=None): + """Get a list of zones from the ZoneManager.""" + return self.zone_manager.get_zone_list() + def _schedule(self, method, context, topic, *args, **kwargs): """Tries to call schedule_* method on the driver to retrieve host. diff --git a/nova/scheduler/zone_manager.py b/nova/scheduler/zone_manager.py new file mode 100644 index 000000000..edf9000cc --- /dev/null +++ b/nova/scheduler/zone_manager.py @@ -0,0 +1,143 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +ZoneManager oversees all communications with child Zones. +""" + +import novaclient +import thread +import traceback + +from datetime import datetime +from eventlet import greenpool + +from nova import db +from nova import flags +from nova import log as logging + +FLAGS = flags.FLAGS +flags.DEFINE_integer('zone_db_check_interval', 60, + 'Seconds between getting fresh zone info from db.') +flags.DEFINE_integer('zone_failures_to_offline', 3, + 'Number of consecutive errors before marking zone offline') + + +class ZoneState(object): + """Holds the state of all connected child zones.""" + def __init__(self): + self.is_active = True + self.name = None + self.capabilities = None + self.attempt = 0 + self.last_seen = datetime.min + self.last_exception = None + self.last_exception_time = None + + def update_credentials(self, zone): + """Update zone credentials from db""" + self.zone_id = zone.id + self.api_url = zone.api_url + self.username = zone.username + self.password = zone.password + + def update_metadata(self, zone_metadata): + """Update zone metadata after successful communications with + child zone.""" + self.last_seen = datetime.now() + self.attempt = 0 + self.name = zone_metadata["name"] + self.capabilities = zone_metadata["capabilities"] + self.is_active = True + + def to_dict(self): + return dict(name=self.name, capabilities=self.capabilities, + is_active=self.is_active, api_url=self.api_url, + id=self.zone_id) + + def log_error(self, exception): + """Something went wrong. Check to see if zone should be + marked as offline.""" + self.last_exception = exception + self.last_exception_time = datetime.now() + api_url = self.api_url + logging.warning(_("'%(exception)s' error talking to " + "zone %(api_url)s") % locals()) + + max_errors = FLAGS.zone_failures_to_offline + self.attempt += 1 + if self.attempt >= max_errors: + self.is_active = False + logging.error(_("No answer from zone %(api_url)s " + "after %(max_errors)d " + "attempts. Marking inactive.") % locals()) + + +def _call_novaclient(zone): + """Call novaclient. Broken out for testing purposes.""" + client = novaclient.OpenStack(zone.username, zone.password, zone.api_url) + return client.zones.info()._info + + +def _poll_zone(zone): + """Eventlet worker to poll a zone.""" + logging.debug(_("Polling zone: %s") % zone.api_url) + try: + zone.update_metadata(_call_novaclient(zone)) + except Exception, e: + zone.log_error(traceback.format_exc()) + + +class ZoneManager(object): + """Keeps the zone states updated.""" + def __init__(self): + self.last_zone_db_check = datetime.min + self.zone_states = {} + self.green_pool = greenpool.GreenPool() + + def get_zone_list(self): + """Return the list of zones we know about.""" + return [zone.to_dict() for zone in self.zone_states.values()] + + def _refresh_from_db(self, context): + """Make our zone state map match the db.""" + # Add/update existing zones ... + zones = db.zone_get_all(context) + existing = self.zone_states.keys() + db_keys = [] + for zone in zones: + db_keys.append(zone.id) + if zone.id not in existing: + self.zone_states[zone.id] = ZoneState() + self.zone_states[zone.id].update_credentials(zone) + + # Cleanup zones removed from db ... + keys = self.zone_states.keys() # since we're deleting + for zone_id in keys: + if zone_id not in db_keys: + del self.zone_states[zone_id] + + def _poll_zones(self, context): + """Try to connect to each child zone and get update.""" + self.green_pool.imap(_poll_zone, self.zone_states.values()) + + def ping(self, context=None): + """Ping should be called periodically to update zone status.""" + diff = datetime.now() - self.last_zone_db_check + if diff.seconds >= FLAGS.zone_db_check_interval: + logging.debug(_("Updating zone cache from db.")) + self.last_zone_db_check = datetime.now() + self._refresh_from_db(context) + self._poll_zones(context) diff --git a/nova/service.py b/nova/service.py index 59648adf2..af20db01c 100644 --- a/nova/service.py +++ b/nova/service.py @@ -2,6 +2,7 @@ # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -39,24 +40,24 @@ from nova import flags from nova import rpc from nova import utils from nova import version +from nova import wsgi FLAGS = flags.FLAGS flags.DEFINE_integer('report_interval', 10, 'seconds between nodes reporting state to datastore', lower_bound=1) - flags.DEFINE_integer('periodic_interval', 60, 'seconds between running periodic tasks', lower_bound=1) - -flags.DEFINE_string('pidfile', None, - 'pidfile to use for this service') - - -flags.DEFINE_flag(flags.HelpFlag()) -flags.DEFINE_flag(flags.HelpshortFlag()) -flags.DEFINE_flag(flags.HelpXMLFlag()) +flags.DEFINE_string('ec2_listen', "0.0.0.0", + 'IP address for EC2 API to listen') +flags.DEFINE_integer('ec2_listen_port', 8773, 'port for ec2 api to listen') +flags.DEFINE_string('osapi_listen', "0.0.0.0", + 'IP address for OpenStack API to listen') +flags.DEFINE_integer('osapi_listen_port', 8774, 'port for os api to listen') +flags.DEFINE_string('api_paste_config', "api-paste.ini", + 'File name for the paste.deploy config for nova-api') class Service(object): @@ -68,6 +69,8 @@ class Service(object): self.binary = binary self.topic = topic self.manager_class_name = manager + manager_class = utils.import_class(self.manager_class_name) + self.manager = manager_class(host=self.host, *args, **kwargs) self.report_interval = report_interval self.periodic_interval = periodic_interval super(Service, self).__init__(*args, **kwargs) @@ -75,9 +78,9 @@ class Service(object): self.timers = [] def start(self): - manager_class = utils.import_class(self.manager_class_name) - self.manager = manager_class(host=self.host, *self.saved_args, - **self.saved_kwargs) + vcs_string = version.version_string_with_vcs() + logging.audit(_("Starting %(topic)s node (version %(vcs_string)s)"), + {'topic': self.topic, 'vcs_string': vcs_string}) self.manager.init_host() self.model_disconnected = False ctxt = context.get_admin_context() @@ -157,9 +160,6 @@ class Service(object): report_interval = FLAGS.report_interval if not periodic_interval: periodic_interval = FLAGS.periodic_interval - vcs_string = version.version_string_with_vcs() - logging.audit(_("Starting %(topic)s node (version %(vcs_string)s)") - % locals()) service_obj = cls(host, binary, topic, manager, report_interval, periodic_interval) @@ -181,6 +181,13 @@ class Service(object): pass self.timers = [] + def wait(self): + for x in self.timers: + try: + x.wait() + except Exception: + pass + def periodic_tasks(self): """Tasks to be run at a periodic interval""" self.manager.periodic_tasks(context.get_admin_context()) @@ -213,12 +220,55 @@ class Service(object): logging.exception(_("model server went away")) -def serve(*services): - FLAGS(sys.argv) - logging.basicConfig() +class WsgiService(object): + """Base class for WSGI based services. + + For each api you define, you must also define these flags: + :<api>_listen: The address on which to listen + :<api>_listen_port: The port on which to listen + """ - if not services: - services = [Service.create()] + def __init__(self, conf, apis): + self.conf = conf + self.apis = apis + self.wsgi_app = None + + def start(self): + self.wsgi_app = _run_wsgi(self.conf, self.apis) + + def wait(self): + self.wsgi_app.wait() + + +class ApiService(WsgiService): + """Class for our nova-api service""" + @classmethod + def create(cls, conf=None): + if not conf: + conf = wsgi.paste_config_file(FLAGS.api_paste_config) + if not conf: + message = (_("No paste configuration found for: %s"), + FLAGS.api_paste_config) + raise exception.Error(message) + api_endpoints = ['ec2', 'osapi'] + service = cls(conf, api_endpoints) + return service + + +def serve(*services): + try: + if not services: + services = [Service.create()] + except Exception: + logging.exception('in Service.create()') + raise + finally: + # After we've loaded up all our dynamic bits, check + # whether we should print help + flags.DEFINE_flag(flags.HelpFlag()) + flags.DEFINE_flag(flags.HelpshortFlag()) + flags.DEFINE_flag(flags.HelpXMLFlag()) + FLAGS.ParseNewFlags() name = '_'.join(x.binary for x in services) logging.debug(_("Serving %s"), name) @@ -234,3 +284,46 @@ def serve(*services): def wait(): while True: greenthread.sleep(5) + + +def serve_wsgi(cls, conf=None): + try: + service = cls.create(conf) + except Exception: + logging.exception('in WsgiService.create()') + raise + finally: + # After we've loaded up all our dynamic bits, check + # whether we should print help + flags.DEFINE_flag(flags.HelpFlag()) + flags.DEFINE_flag(flags.HelpshortFlag()) + flags.DEFINE_flag(flags.HelpXMLFlag()) + FLAGS.ParseNewFlags() + + service.start() + + return service + + +def _run_wsgi(paste_config_file, apis): + logging.debug(_("Using paste.deploy config at: %s"), paste_config_file) + apps = [] + for api in apis: + config = wsgi.load_paste_configuration(paste_config_file, api) + if config is None: + logging.debug(_("No paste configuration for app: %s"), api) + continue + logging.debug(_("App Config: %(api)s\n%(config)r") % locals()) + logging.info(_("Running %s API"), api) + app = wsgi.load_paste_app(paste_config_file, api) + apps.append((app, getattr(FLAGS, "%s_listen_port" % api), + getattr(FLAGS, "%s_listen" % api))) + if len(apps) == 0: + logging.error(_("No known API applications configured in %s."), + paste_config_file) + return + + server = wsgi.Server() + for app in apps: + server.start(*app) + return server diff --git a/nova/test.py b/nova/test.py index a12cf9d32..d8a47464f 100644 --- a/nova/test.py +++ b/nova/test.py @@ -22,10 +22,15 @@ Allows overriding of flags for use of fakes, and some black magic for inline callbacks. """ + import datetime +import os +import shutil +import uuid import unittest import mox +import shutil import stubout from nova import context @@ -33,13 +38,12 @@ from nova import db from nova import fakerabbit from nova import flags from nova import rpc -from nova.network import manager as network_manager -from nova.tests import fake_flags +from nova import service FLAGS = flags.FLAGS -flags.DEFINE_bool('flush_db', True, - 'Flush the database before running fake tests') +flags.DEFINE_string('sqlite_clean_db', 'clean.sqlite', + 'File name of clean sqlite db') flags.DEFINE_bool('fake_tests', True, 'should we use everything for testing') @@ -64,15 +68,8 @@ class TestCase(unittest.TestCase): # now that we have some required db setup for the system # to work properly. self.start = datetime.datetime.utcnow() - ctxt = context.get_admin_context() - if db.network_count(ctxt) != 5: - network_manager.VlanManager().create_networks(ctxt, - FLAGS.fixed_range, - 5, 16, - FLAGS.fixed_range_v6, - FLAGS.vlan_start, - FLAGS.vpn_start, - ) + shutil.copyfile(os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db), + os.path.join(FLAGS.state_path, FLAGS.sqlite_db)) # emulate some of the mox stuff, we can't use the metaclass # because it screws with our generators @@ -80,6 +77,7 @@ class TestCase(unittest.TestCase): self.stubs = stubout.StubOutForTesting() self.flag_overrides = {} self.injected = [] + self._services = [] self._monkey_patch_attach() self._original_flags = FLAGS.FlagValuesDict() @@ -91,25 +89,31 @@ class TestCase(unittest.TestCase): self.stubs.UnsetAll() self.stubs.SmartUnsetAll() self.mox.VerifyAll() - # NOTE(vish): Clean up any ips associated during the test. - ctxt = context.get_admin_context() - db.fixed_ip_disassociate_all_by_timeout(ctxt, FLAGS.host, - self.start) - db.network_disassociate_all(ctxt) + super(TestCase, self).tearDown() + finally: + # Clean out fake_rabbit's queue if we used it + if FLAGS.fake_rabbit: + fakerabbit.reset_all() + + # Reset any overriden flags + self.reset_flags() + + # Reset our monkey-patches rpc.Consumer.attach_to_eventlet = self.originalAttach + + # Stop any timers for x in self.injected: try: x.stop() except AssertionError: pass - if FLAGS.fake_rabbit: - fakerabbit.reset_all() - - db.security_group_destroy_all(ctxt) - super(TestCase, self).tearDown() - finally: - self.reset_flags() + # Kill any services + for x in self._services: + try: + x.kill() + except Exception: + pass def flags(self, **kw): """Override flag variables for a test""" @@ -127,6 +131,15 @@ class TestCase(unittest.TestCase): for k, v in self._original_flags.iteritems(): setattr(FLAGS, k, v) + def start_service(self, name, host=None, **kwargs): + host = host and host or uuid.uuid4().hex + kwargs.setdefault('host', host) + kwargs.setdefault('binary', 'nova-%s' % name) + svc = service.Service.create(**kwargs) + svc.start() + self._services.append(svc) + return svc + def _monkey_patch_attach(self): self.originalAttach = rpc.Consumer.attach_to_eventlet diff --git a/nova/tests/__init__.py b/nova/tests/__init__.py index 592d5bea9..7fba02a93 100644 --- a/nova/tests/__init__.py +++ b/nova/tests/__init__.py @@ -37,5 +37,30 @@ setattr(__builtin__, '_', lambda x: x) def setup(): + import os + import shutil + + from nova import context + from nova import flags from nova.db import migration + from nova.network import manager as network_manager + from nova.tests import fake_flags + + FLAGS = flags.FLAGS + + testdb = os.path.join(FLAGS.state_path, FLAGS.sqlite_db) + if os.path.exists(testdb): + os.unlink(testdb) migration.db_sync() + ctxt = context.get_admin_context() + network_manager.VlanManager().create_networks(ctxt, + FLAGS.fixed_range, + FLAGS.num_networks, + FLAGS.network_size, + FLAGS.fixed_range_v6, + FLAGS.vlan_start, + FLAGS.vpn_start, + ) + + cleandb = os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db) + shutil.copyfile(testdb, cleandb) diff --git a/nova/tests/api/openstack/__init__.py b/nova/tests/api/openstack/__init__.py index 14eaaa62c..e18120285 100644 --- a/nova/tests/api/openstack/__init__.py +++ b/nova/tests/api/openstack/__init__.py @@ -16,7 +16,7 @@ # under the License. import webob.dec -import unittest +from nova import test from nova import context from nova import flags @@ -33,7 +33,7 @@ def simple_wsgi(req): return "" -class RateLimitingMiddlewareTest(unittest.TestCase): +class RateLimitingMiddlewareTest(test.TestCase): def test_get_action_name(self): middleware = RateLimitingMiddleware(simple_wsgi) @@ -92,31 +92,3 @@ class RateLimitingMiddlewareTest(unittest.TestCase): self.assertEqual(middleware.limiter.__class__.__name__, "Limiter") middleware = RateLimitingMiddleware(simple_wsgi, service_host='foobar') self.assertEqual(middleware.limiter.__class__.__name__, "WSGIAppProxy") - - -class LimiterTest(unittest.TestCase): - - def test_limiter(self): - items = range(2000) - req = Request.blank('/') - self.assertEqual(limited(items, req), items[:1000]) - req = Request.blank('/?offset=0') - self.assertEqual(limited(items, req), items[:1000]) - req = Request.blank('/?offset=3') - self.assertEqual(limited(items, req), items[3:1003]) - req = Request.blank('/?offset=2005') - self.assertEqual(limited(items, req), []) - req = Request.blank('/?limit=10') - self.assertEqual(limited(items, req), items[:10]) - req = Request.blank('/?limit=0') - self.assertEqual(limited(items, req), items[:1000]) - req = Request.blank('/?limit=3000') - self.assertEqual(limited(items, req), items[:1000]) - req = Request.blank('/?offset=1&limit=3') - self.assertEqual(limited(items, req), items[1:4]) - req = Request.blank('/?offset=3&limit=0') - self.assertEqual(limited(items, req), items[3:1003]) - req = Request.blank('/?offset=3&limit=1500') - self.assertEqual(limited(items, req), items[3:1003]) - req = Request.blank('/?offset=3000&limit=10') - self.assertEqual(limited(items, req), []) diff --git a/nova/tests/api/openstack/common.py b/nova/tests/api/openstack/common.py new file mode 100644 index 000000000..74bb8729a --- /dev/null +++ b/nova/tests/api/openstack/common.py @@ -0,0 +1,36 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json + +import webob + + +def webob_factory(url): + """Factory for removing duplicate webob code from tests""" + + base_url = url + + def web_request(url, method=None, body=None): + req = webob.Request.blank("%s%s" % (base_url, url)) + if method: + req.content_type = "application/json" + req.method = method + if body: + req.body = json.dumps(body) + return req + return web_request diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index fb282f1c9..2c4e57246 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -25,6 +25,7 @@ import webob.dec from paste import urlmap from glance import client as glance_client +from glance.common import exception as glance_exc from nova import auth from nova import context @@ -149,25 +150,26 @@ def stub_out_glance(stubs, initial_fixtures=None): for f in self.fixtures: if f['id'] == image_id: return f - return None + raise glance_exc.NotFound - def fake_add_image(self, image_meta): + def fake_add_image(self, image_meta, data=None): id = ''.join(random.choice(string.letters) for _ in range(20)) image_meta['id'] = id self.fixtures.append(image_meta) - return id + return image_meta - def fake_update_image(self, image_id, image_meta): + def fake_update_image(self, image_id, image_meta, data=None): f = self.fake_get_image_meta(image_id) if not f: - raise exc.NotFound + raise glance_exc.NotFound f.update(image_meta) + return f def fake_delete_image(self, image_id): f = self.fake_get_image_meta(image_id) if not f: - raise exc.NotFound + raise glance_exc.NotFound self.fixtures.remove(f) @@ -188,7 +190,11 @@ def stub_out_glance(stubs, initial_fixtures=None): class FakeToken(object): + id = 0 + def __init__(self, **kwargs): + FakeToken.id += 1 + self.id = FakeToken.id for k, v in kwargs.iteritems(): setattr(self, k, v) @@ -203,19 +209,22 @@ class FakeAuthDatabase(object): data = {} @staticmethod - def auth_get_token(context, token_hash): + def auth_token_get(context, token_hash): return FakeAuthDatabase.data.get(token_hash, None) @staticmethod - def auth_create_token(context, token): + def auth_token_create(context, token): fake_token = FakeToken(created_at=datetime.datetime.now(), **token) FakeAuthDatabase.data[fake_token.token_hash] = fake_token + FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token return fake_token @staticmethod - def auth_destroy_token(context, token): - if token.token_hash in FakeAuthDatabase.data: - del FakeAuthDatabase.data['token_hash'] + def auth_token_destroy(context, token_id): + token = FakeAuthDatabase.data.get('id_%i' % token_id) + if token and token.token_hash in FakeAuthDatabase.data: + del FakeAuthDatabase.data[token.token_hash] + del FakeAuthDatabase.data['id_%i' % token_id] class FakeAuthManager(object): diff --git a/nova/tests/api/openstack/test_adminapi.py b/nova/tests/api/openstack/test_adminapi.py index 73120c31d..dfce1b127 100644 --- a/nova/tests/api/openstack/test_adminapi.py +++ b/nova/tests/api/openstack/test_adminapi.py @@ -15,13 +15,13 @@ # License for the specific language governing permissions and limitations # under the License. -import unittest import stubout import webob from paste import urlmap from nova import flags +from nova import test from nova.api import openstack from nova.api.openstack import ratelimiting from nova.api.openstack import auth @@ -30,9 +30,10 @@ from nova.tests.api.openstack import fakes FLAGS = flags.FLAGS -class AdminAPITest(unittest.TestCase): +class AdminAPITest(test.TestCase): def setUp(self): + super(AdminAPITest, self).setUp() self.stubs = stubout.StubOutForTesting() fakes.FakeAuthManager.auth_data = {} fakes.FakeAuthDatabase.data = {} @@ -44,6 +45,7 @@ class AdminAPITest(unittest.TestCase): def tearDown(self): self.stubs.UnsetAll() FLAGS.allow_admin_api = self.allow_admin + super(AdminAPITest, self).tearDown() def test_admin_enabled(self): FLAGS.allow_admin_api = True @@ -58,8 +60,5 @@ class AdminAPITest(unittest.TestCase): # We should still be able to access public operations. req = webob.Request.blank('/v1.0/flavors') res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 200) # TODO: Confirm admin operations are unavailable. - -if __name__ == '__main__': - unittest.main() + self.assertEqual(res.status_int, 200) diff --git a/nova/tests/api/openstack/test_api.py b/nova/tests/api/openstack/test_api.py index db0fe1060..5112c486f 100644 --- a/nova/tests/api/openstack/test_api.py +++ b/nova/tests/api/openstack/test_api.py @@ -15,17 +15,17 @@ # License for the specific language governing permissions and limitations # under the License. -import unittest import webob.exc import webob.dec from webob import Request +from nova import test from nova.api import openstack from nova.api.openstack import faults -class APITest(unittest.TestCase): +class APITest(test.TestCase): def _wsgi_app(self, inner_app): # simpler version of the app than fakes.wsgi_app diff --git a/nova/tests/api/openstack/test_auth.py b/nova/tests/api/openstack/test_auth.py index 0dd65d321..ff8d42a14 100644 --- a/nova/tests/api/openstack/test_auth.py +++ b/nova/tests/api/openstack/test_auth.py @@ -16,7 +16,6 @@ # under the License. import datetime -import unittest import stubout import webob @@ -27,12 +26,15 @@ import nova.api.openstack.auth import nova.auth.manager from nova import auth from nova import context +from nova import db +from nova import test from nova.tests.api.openstack import fakes -class Test(unittest.TestCase): +class Test(test.TestCase): def setUp(self): + super(Test, self).setUp() self.stubs = stubout.StubOutForTesting() self.stubs.Set(nova.api.openstack.auth.AuthMiddleware, '__init__', fakes.fake_auth_init) @@ -45,6 +47,7 @@ class Test(unittest.TestCase): def tearDown(self): self.stubs.UnsetAll() fakes.fake_data_store = {} + super(Test, self).tearDown() def test_authorize_user(self): f = fakes.FakeAuthManager() @@ -97,10 +100,10 @@ class Test(unittest.TestCase): token_hash=token_hash, created_at=datetime.datetime(1990, 1, 1)) - self.stubs.Set(fakes.FakeAuthDatabase, 'auth_destroy_token', + self.stubs.Set(fakes.FakeAuthDatabase, 'auth_token_destroy', destroy_token_mock) - self.stubs.Set(fakes.FakeAuthDatabase, 'auth_get_token', + self.stubs.Set(fakes.FakeAuthDatabase, 'auth_token_get', bad_token) req = webob.Request.blank('/v1.0/') @@ -128,8 +131,36 @@ class Test(unittest.TestCase): self.assertEqual(result.status, '401 Unauthorized') -class TestLimiter(unittest.TestCase): +class TestFunctional(test.TestCase): + def test_token_expiry(self): + ctx = context.get_admin_context() + tok = db.auth_token_create(ctx, dict( + token_hash='bacon', + cdn_management_url='', + server_management_url='', + storage_url='', + user_id='ham', + )) + + db.auth_token_update(ctx, tok.token_hash, dict( + created_at=datetime.datetime(2000, 1, 1, 12, 0, 0), + )) + + req = webob.Request.blank('/v1.0/') + req.headers['X-Auth-Token'] = 'bacon' + result = req.get_response(fakes.wsgi_app()) + self.assertEqual(result.status, '401 Unauthorized') + + def test_token_doesnotexist(self): + req = webob.Request.blank('/v1.0/') + req.headers['X-Auth-Token'] = 'ham' + result = req.get_response(fakes.wsgi_app()) + self.assertEqual(result.status, '401 Unauthorized') + + +class TestLimiter(test.TestCase): def setUp(self): + super(TestLimiter, self).setUp() self.stubs = stubout.StubOutForTesting() self.stubs.Set(nova.api.openstack.auth.AuthMiddleware, '__init__', fakes.fake_auth_init) @@ -141,6 +172,7 @@ class TestLimiter(unittest.TestCase): def tearDown(self): self.stubs.UnsetAll() fakes.fake_data_store = {} + super(TestLimiter, self).tearDown() def test_authorize_token(self): f = fakes.FakeAuthManager() @@ -161,7 +193,3 @@ class TestLimiter(unittest.TestCase): result = req.get_response(fakes.wsgi_app()) self.assertEqual(result.status, '200 OK') self.assertEqual(result.headers['X-Test-Success'], 'True') - - -if __name__ == '__main__': - unittest.main() diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py new file mode 100644 index 000000000..8f57c5b67 --- /dev/null +++ b/nova/tests/api/openstack/test_common.py @@ -0,0 +1,171 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Test suites for 'common' code used throughout the OpenStack HTTP API. +""" + +import webob.exc + +from webob import Request + +from nova import test +from nova.api.openstack.common import limited + + +class LimiterTest(test.TestCase): + """ + Unit tests for the `nova.api.openstack.common.limited` method which takes + in a list of items and, depending on the 'offset' and 'limit' GET params, + returns a subset or complete set of the given items. + """ + + def setUp(self): + """ + Run before each test. + """ + super(LimiterTest, self).setUp() + self.tiny = range(1) + self.small = range(10) + self.medium = range(1000) + self.large = range(10000) + + def test_limiter_offset_zero(self): + """ + Test offset key works with 0. + """ + req = Request.blank('/?offset=0') + self.assertEqual(limited(self.tiny, req), self.tiny) + self.assertEqual(limited(self.small, req), self.small) + self.assertEqual(limited(self.medium, req), self.medium) + self.assertEqual(limited(self.large, req), self.large[:1000]) + + def test_limiter_offset_medium(self): + """ + Test offset key works with a medium sized number. + """ + req = Request.blank('/?offset=10') + self.assertEqual(limited(self.tiny, req), []) + self.assertEqual(limited(self.small, req), self.small[10:]) + self.assertEqual(limited(self.medium, req), self.medium[10:]) + self.assertEqual(limited(self.large, req), self.large[10:1010]) + + def test_limiter_offset_over_max(self): + """ + Test offset key works with a number over 1000 (max_limit). + """ + req = Request.blank('/?offset=1001') + self.assertEqual(limited(self.tiny, req), []) + self.assertEqual(limited(self.small, req), []) + self.assertEqual(limited(self.medium, req), []) + self.assertEqual(limited(self.large, req), self.large[1001:2001]) + + def test_limiter_offset_blank(self): + """ + Test offset key works with a blank offset. + """ + req = Request.blank('/?offset=') + self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req) + + def test_limiter_offset_bad(self): + """ + Test offset key works with a BAD offset. + """ + req = Request.blank(u'/?offset=\u0020aa') + self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req) + + def test_limiter_nothing(self): + """ + Test request with no offset or limit + """ + req = Request.blank('/') + self.assertEqual(limited(self.tiny, req), self.tiny) + self.assertEqual(limited(self.small, req), self.small) + self.assertEqual(limited(self.medium, req), self.medium) + self.assertEqual(limited(self.large, req), self.large[:1000]) + + def test_limiter_limit_zero(self): + """ + Test limit of zero. + """ + req = Request.blank('/?limit=0') + self.assertEqual(limited(self.tiny, req), self.tiny) + self.assertEqual(limited(self.small, req), self.small) + self.assertEqual(limited(self.medium, req), self.medium) + self.assertEqual(limited(self.large, req), self.large[:1000]) + + def test_limiter_limit_medium(self): + """ + Test limit of 10. + """ + req = Request.blank('/?limit=10') + self.assertEqual(limited(self.tiny, req), self.tiny) + self.assertEqual(limited(self.small, req), self.small) + self.assertEqual(limited(self.medium, req), self.medium[:10]) + self.assertEqual(limited(self.large, req), self.large[:10]) + + def test_limiter_limit_over_max(self): + """ + Test limit of 3000. + """ + req = Request.blank('/?limit=3000') + self.assertEqual(limited(self.tiny, req), self.tiny) + self.assertEqual(limited(self.small, req), self.small) + self.assertEqual(limited(self.medium, req), self.medium) + self.assertEqual(limited(self.large, req), self.large[:1000]) + + def test_limiter_limit_and_offset(self): + """ + Test request with both limit and offset. + """ + items = range(2000) + req = Request.blank('/?offset=1&limit=3') + self.assertEqual(limited(items, req), items[1:4]) + req = Request.blank('/?offset=3&limit=0') + self.assertEqual(limited(items, req), items[3:1003]) + req = Request.blank('/?offset=3&limit=1500') + self.assertEqual(limited(items, req), items[3:1003]) + req = Request.blank('/?offset=3000&limit=10') + self.assertEqual(limited(items, req), []) + + def test_limiter_custom_max_limit(self): + """ + Test a max_limit other than 1000. + """ + items = range(2000) + req = Request.blank('/?offset=1&limit=3') + self.assertEqual(limited(items, req, max_limit=2000), items[1:4]) + req = Request.blank('/?offset=3&limit=0') + self.assertEqual(limited(items, req, max_limit=2000), items[3:]) + req = Request.blank('/?offset=3&limit=2500') + self.assertEqual(limited(items, req, max_limit=2000), items[3:]) + req = Request.blank('/?offset=3000&limit=10') + self.assertEqual(limited(items, req, max_limit=2000), []) + + def test_limiter_negative_limit(self): + """ + Test a negative limit. + """ + req = Request.blank('/?limit=-3000') + self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req) + + def test_limiter_negative_offset(self): + """ + Test a negative offset. + """ + req = Request.blank('/?offset=-30') + self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req) diff --git a/nova/tests/api/openstack/test_faults.py b/nova/tests/api/openstack/test_faults.py index fda2b5ede..7667753f4 100644 --- a/nova/tests/api/openstack/test_faults.py +++ b/nova/tests/api/openstack/test_faults.py @@ -15,15 +15,15 @@ # License for the specific language governing permissions and limitations # under the License. -import unittest import webob import webob.dec import webob.exc +from nova import test from nova.api.openstack import faults -class TestFaults(unittest.TestCase): +class TestFaults(test.TestCase): def test_fault_parts(self): req = webob.Request.blank('/.xml') diff --git a/nova/tests/api/openstack/test_flavors.py b/nova/tests/api/openstack/test_flavors.py index 1bdaea161..319767bb5 100644 --- a/nova/tests/api/openstack/test_flavors.py +++ b/nova/tests/api/openstack/test_flavors.py @@ -15,34 +15,38 @@ # License for the specific language governing permissions and limitations # under the License. -import unittest - import stubout import webob +from nova import test import nova.api +from nova import context +from nova import db from nova.api.openstack import flavors from nova.tests.api.openstack import fakes -class FlavorsTest(unittest.TestCase): +class FlavorsTest(test.TestCase): def setUp(self): + super(FlavorsTest, self).setUp() self.stubs = stubout.StubOutForTesting() fakes.FakeAuthManager.auth_data = {} fakes.FakeAuthDatabase.data = {} fakes.stub_out_networking(self.stubs) fakes.stub_out_rate_limiting(self.stubs) fakes.stub_out_auth(self.stubs) + self.context = context.get_admin_context() def tearDown(self): self.stubs.UnsetAll() + super(FlavorsTest, self).tearDown() def test_get_flavor_list(self): req = webob.Request.blank('/v1.0/flavors') res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) def test_get_flavor_by_id(self): - pass - -if __name__ == '__main__': - unittest.main() + req = webob.Request.blank('/v1.0/flavors/1') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 8ab4d7569..eb5039bdb 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -22,7 +22,8 @@ and as a WSGI layer import json import datetime -import unittest +import shutil +import tempfile import stubout import webob @@ -30,6 +31,7 @@ import webob from nova import context from nova import exception from nova import flags +from nova import test from nova import utils import nova.api.openstack from nova.api.openstack import images @@ -54,7 +56,7 @@ class BaseImageServiceTests(object): num_images = len(self.service.index(self.context)) - id = self.service.create(self.context, fixture) + id = self.service.create(self.context, fixture)['id'] self.assertNotEquals(None, id) self.assertEquals(num_images + 1, @@ -71,7 +73,7 @@ class BaseImageServiceTests(object): num_images = len(self.service.index(self.context)) - id = self.service.create(self.context, fixture) + id = self.service.create(self.context, fixture)['id'] self.assertNotEquals(None, id) @@ -89,7 +91,7 @@ class BaseImageServiceTests(object): 'instance_id': None, 'progress': None} - id = self.service.create(self.context, fixture) + id = self.service.create(self.context, fixture)['id'] fixture['status'] = 'in progress' @@ -118,7 +120,7 @@ class BaseImageServiceTests(object): ids = [] for fixture in fixtures: - new_id = self.service.create(self.context, fixture) + new_id = self.service.create(self.context, fixture)['id'] ids.append(new_id) num_images = len(self.service.index(self.context)) @@ -130,29 +132,33 @@ class BaseImageServiceTests(object): self.assertEquals(1, num_images) -class LocalImageServiceTest(unittest.TestCase, +class LocalImageServiceTest(test.TestCase, BaseImageServiceTests): """Tests the local image service""" def setUp(self): + super(LocalImageServiceTest, self).setUp() + self.tempdir = tempfile.mkdtemp() + self.flags(images_path=self.tempdir) self.stubs = stubout.StubOutForTesting() service_class = 'nova.image.local.LocalImageService' self.service = utils.import_object(service_class) self.context = context.RequestContext(None, None) def tearDown(self): - self.service.delete_all() - self.service.delete_imagedir() + shutil.rmtree(self.tempdir) self.stubs.UnsetAll() + super(LocalImageServiceTest, self).tearDown() -class GlanceImageServiceTest(unittest.TestCase, +class GlanceImageServiceTest(test.TestCase, BaseImageServiceTests): """Tests the local image service""" def setUp(self): + super(GlanceImageServiceTest, self).setUp() self.stubs = stubout.StubOutForTesting() fakes.stub_out_glance(self.stubs) fakes.stub_out_compute_api_snapshot(self.stubs) @@ -163,9 +169,10 @@ class GlanceImageServiceTest(unittest.TestCase, def tearDown(self): self.stubs.UnsetAll() + super(GlanceImageServiceTest, self).tearDown() -class ImageControllerWithGlanceServiceTest(unittest.TestCase): +class ImageControllerWithGlanceServiceTest(test.TestCase): """Test of the OpenStack API /images application controller""" @@ -194,6 +201,7 @@ class ImageControllerWithGlanceServiceTest(unittest.TestCase): 'image_type': 'ramdisk'}] def setUp(self): + super(ImageControllerWithGlanceServiceTest, self).setUp() self.orig_image_service = FLAGS.image_service FLAGS.image_service = 'nova.image.glance.GlanceImageService' self.stubs = stubout.StubOutForTesting() @@ -208,6 +216,7 @@ class ImageControllerWithGlanceServiceTest(unittest.TestCase): def tearDown(self): self.stubs.UnsetAll() FLAGS.image_service = self.orig_image_service + super(ImageControllerWithGlanceServiceTest, self).tearDown() def test_get_image_index(self): req = webob.Request.blank('/v1.0/images') diff --git a/nova/tests/api/openstack/test_ratelimiting.py b/nova/tests/api/openstack/test_ratelimiting.py index 4c9d6bc23..9ae90ee20 100644 --- a/nova/tests/api/openstack/test_ratelimiting.py +++ b/nova/tests/api/openstack/test_ratelimiting.py @@ -1,15 +1,16 @@ import httplib import StringIO import time -import unittest import webob +from nova import test import nova.api.openstack.ratelimiting as ratelimiting -class LimiterTest(unittest.TestCase): +class LimiterTest(test.TestCase): def setUp(self): + super(LimiterTest, self).setUp() self.limits = { 'a': (5, ratelimiting.PER_SECOND), 'b': (5, ratelimiting.PER_MINUTE), @@ -83,9 +84,10 @@ class FakeLimiter(object): return self._delay -class WSGIAppTest(unittest.TestCase): +class WSGIAppTest(test.TestCase): def setUp(self): + super(WSGIAppTest, self).setUp() self.limiter = FakeLimiter(self) self.app = ratelimiting.WSGIApp(self.limiter) @@ -206,7 +208,7 @@ def wire_HTTPConnection_to_WSGI(host, app): httplib.HTTPConnection = HTTPConnectionDecorator(httplib.HTTPConnection) -class WSGIAppProxyTest(unittest.TestCase): +class WSGIAppProxyTest(test.TestCase): def setUp(self): """Our WSGIAppProxy is going to call across an HTTPConnection to a @@ -218,6 +220,7 @@ class WSGIAppProxyTest(unittest.TestCase): at the WSGIApp. And the limiter isn't real -- it's a fake that behaves the way we tell it to. """ + super(WSGIAppProxyTest, self).setUp() self.limiter = FakeLimiter(self) app = ratelimiting.WSGIApp(self.limiter) wire_HTTPConnection_to_WSGI('100.100.100.100:80', app) @@ -238,7 +241,3 @@ class WSGIAppProxyTest(unittest.TestCase): self.limiter.mock('murder', 'brutus', None) self.proxy.perform('stab', 'brutus') self.assertRaises(AssertionError, shouldRaise) - - -if __name__ == '__main__': - unittest.main() diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 724f14f19..c1e05b18a 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -1,6 +1,6 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2010 OpenStack LLC. +# Copyright 2010-2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -15,19 +15,23 @@ # License for the specific language governing permissions and limitations # under the License. +import datetime import json -import unittest import stubout import webob from nova import db from nova import flags +from nova import test import nova.api.openstack from nova.api.openstack import servers +import nova.compute.api import nova.db.api from nova.db.sqlalchemy.models import Instance +from nova.db.sqlalchemy.models import InstanceMetadata import nova.rpc +from nova.tests.api.openstack import common from nova.tests.api.openstack import fakes @@ -39,6 +43,13 @@ def return_server(context, id): return stub_instance(id) +def return_server_with_addresses(private, public): + def _return_server(context, id): + return stub_instance(id, private_address=private, + public_addresses=public) + return _return_server + + def return_servers(context, user_id=1): return [stub_instance(i, user_id) for i in xrange(5)] @@ -55,18 +66,59 @@ def instance_address(context, instance_id): return None -def stub_instance(id, user_id=1): - return Instance(id=id, state=0, image_id=10, user_id=user_id, - display_name='server%s' % id) +def stub_instance(id, user_id=1, private_address=None, public_addresses=None): + metadata = [] + metadata.append(InstanceMetadata(key='seq', value=id)) + + if public_addresses == None: + public_addresses = list() + + instance = { + "id": id, + "admin_pass": "", + "user_id": user_id, + "project_id": "", + "image_id": 10, + "kernel_id": "", + "ramdisk_id": "", + "launch_index": 0, + "key_name": "", + "key_data": "", + "state": 0, + "state_description": "", + "memory_mb": 0, + "vcpus": 0, + "local_gb": 0, + "hostname": "", + "host": None, + "instance_type": "", + "user_data": "", + "reservation_id": "", + "mac_address": "", + "scheduled_at": datetime.datetime.now(), + "launched_at": datetime.datetime.now(), + "terminated_at": datetime.datetime.now(), + "availability_zone": "", + "display_name": "server%s" % id, + "display_description": "", + "locked": False, + "metadata": metadata} + + instance["fixed_ip"] = { + "address": private_address, + "floating_ips": [{"address":ip} for ip in public_addresses]} + + return instance def fake_compute_api(cls, req, id): return True -class ServersTest(unittest.TestCase): +class ServersTest(test.TestCase): def setUp(self): + super(ServersTest, self).setUp() self.stubs = stubout.StubOutForTesting() fakes.FakeAuthManager.auth_data = {} fakes.FakeAuthDatabase.data = {} @@ -94,9 +146,12 @@ class ServersTest(unittest.TestCase): self.stubs.Set(nova.compute.API, "get_actions", fake_compute_api) self.allow_admin = FLAGS.allow_admin_api + self.webreq = common.webob_factory('/v1.0/servers') + def tearDown(self): self.stubs.UnsetAll() FLAGS.allow_admin_api = self.allow_admin + super(ServersTest, self).tearDown() def test_get_server_by_id(self): req = webob.Request.blank('/v1.0/servers/1') @@ -105,6 +160,22 @@ class ServersTest(unittest.TestCase): self.assertEqual(res_dict['server']['id'], '1') self.assertEqual(res_dict['server']['name'], 'server1') + def test_get_server_by_id_with_addresses(self): + private = "192.168.0.3" + public = ["1.2.3.4"] + new_return_server = return_server_with_addresses(private, public) + self.stubs.Set(nova.db.api, 'instance_get', new_return_server) + req = webob.Request.blank('/v1.0/servers/1') + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + self.assertEqual(res_dict['server']['id'], '1') + self.assertEqual(res_dict['server']['name'], 'server1') + addresses = res_dict['server']['addresses'] + self.assertEqual(len(addresses["public"]), len(public)) + self.assertEqual(addresses["public"][0], public[0]) + self.assertEqual(len(addresses["private"]), 1) + self.assertEqual(addresses["private"][0], private) + def test_get_server_list(self): req = webob.Request.blank('/v1.0/servers') res = req.get_response(fakes.wsgi_app()) @@ -117,9 +188,37 @@ class ServersTest(unittest.TestCase): self.assertEqual(s.get('imageId', None), None) i += 1 + def test_get_servers_with_limit(self): + req = webob.Request.blank('/v1.0/servers?limit=3') + res = req.get_response(fakes.wsgi_app()) + servers = json.loads(res.body)['servers'] + self.assertEqual([s['id'] for s in servers], [0, 1, 2]) + + req = webob.Request.blank('/v1.0/servers?limit=aaa') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + self.assertTrue('limit' in res.body) + + def test_get_servers_with_offset(self): + req = webob.Request.blank('/v1.0/servers?offset=2') + res = req.get_response(fakes.wsgi_app()) + servers = json.loads(res.body)['servers'] + self.assertEqual([s['id'] for s in servers], [2, 3, 4]) + + req = webob.Request.blank('/v1.0/servers?offset=aaa') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + self.assertTrue('offset' in res.body) + + def test_get_servers_with_limit_and_offset(self): + req = webob.Request.blank('/v1.0/servers?limit=2&offset=1') + res = req.get_response(fakes.wsgi_app()) + servers = json.loads(res.body)['servers'] + self.assertEqual([s['id'] for s in servers], [1, 2]) + def test_create_instance(self): def instance_create(context, inst): - return {'id': '1', 'display_name': ''} + return {'id': '1', 'display_name': 'server_test'} def server_update(context, id, params): return instance_create(context, id) @@ -154,14 +253,22 @@ class ServersTest(unittest.TestCase): "get_image_id_from_image_hash", image_id_from_hash) body = dict(server=dict( - name='server_test', imageId=2, flavorId=2, metadata={}, + name='server_test', imageId=2, flavorId=2, + metadata={'hello': 'world', 'open': 'stack'}, personality={})) req = webob.Request.blank('/v1.0/servers') req.method = 'POST' req.body = json.dumps(body) + req.headers["Content-Type"] = "application/json" res = req.get_response(fakes.wsgi_app()) + server = json.loads(res.body)['server'] + self.assertEqual('serv', server['adminPass'][:4]) + self.assertEqual(16, len(server['adminPass'])) + self.assertEqual('server_test', server['name']) + self.assertEqual('1', server['id']) + self.assertEqual(res.status_int, 200) def test_update_no_body(self): @@ -229,10 +336,45 @@ class ServersTest(unittest.TestCase): i = 0 for s in res_dict['servers']: self.assertEqual(s['id'], i) + self.assertEqual(s['hostId'], '') self.assertEqual(s['name'], 'server%d' % i) self.assertEqual(s['imageId'], 10) + self.assertEqual(s['metadata']['seq'], i) i += 1 + def test_get_all_server_details_with_host(self): + ''' + We want to make sure that if two instances are on the same host, then + they return the same hostId. If two instances are on different hosts, + they should return different hostId's. In this test, there are 5 + instances - 2 on one host and 3 on another. + ''' + + def stub_instance(id, user_id=1): + return Instance(id=id, state=0, image_id=10, user_id=user_id, + display_name='server%s' % id, host='host%s' % (id % 2)) + + def return_servers_with_host(context, user_id=1): + return [stub_instance(i) for i in xrange(5)] + + self.stubs.Set(nova.db.api, 'instance_get_all_by_user', + return_servers_with_host) + + req = webob.Request.blank('/v1.0/servers/detail') + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + server_list = res_dict['servers'] + host_ids = [server_list[0]['hostId'], server_list[1]['hostId']] + self.assertTrue(host_ids[0] and host_ids[1]) + self.assertNotEqual(host_ids[0], host_ids[1]) + + for i, s in enumerate(res_dict['servers']): + self.assertEqual(s['id'], i) + self.assertEqual(s['hostId'], host_ids[i % 2]) + self.assertEqual(s['name'], 'server%d' % i) + self.assertEqual(s['imageId'], 10) + def test_server_pause(self): FLAGS.allow_admin_api = True body = dict(server=dict( @@ -281,6 +423,30 @@ class ServersTest(unittest.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 202) + def test_server_reset_network(self): + FLAGS.allow_admin_api = True + body = dict(server=dict( + name='server_test', imageId=2, flavorId=2, metadata={}, + personality={})) + req = webob.Request.blank('/v1.0/servers/1/reset_network') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + + def test_server_inject_network_info(self): + FLAGS.allow_admin_api = True + body = dict(server=dict( + name='server_test', imageId=2, flavorId=2, metadata={}, + personality={})) + req = webob.Request.blank('/v1.0/servers/1/inject_network_info') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + def test_server_diagnostics(self): req = webob.Request.blank("/v1.0/servers/1/diagnostics") req.method = "GET" @@ -339,6 +505,98 @@ class ServersTest(unittest.TestCase): self.assertEqual(res.status, '202 Accepted') self.assertEqual(self.server_delete_called, True) + def test_resize_server(self): + req = self.webreq('/1/action', 'POST', dict(resize=dict(flavorId=3))) + + self.resize_called = False + + def resize_mock(*args): + self.resize_called = True + + self.stubs.Set(nova.compute.api.API, 'resize', resize_mock) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + self.assertEqual(self.resize_called, True) + + def test_resize_bad_flavor_fails(self): + req = self.webreq('/1/action', 'POST', dict(resize=dict(derp=3))) + + self.resize_called = False + + def resize_mock(*args): + self.resize_called = True + + self.stubs.Set(nova.compute.api.API, 'resize', resize_mock) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 422) + self.assertEqual(self.resize_called, False) + + def test_resize_raises_fails(self): + req = self.webreq('/1/action', 'POST', dict(resize=dict(flavorId=3))) + + def resize_mock(*args): + raise Exception('hurr durr') + + self.stubs.Set(nova.compute.api.API, 'resize', resize_mock) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_confirm_resize_server(self): + req = self.webreq('/1/action', 'POST', dict(confirmResize=None)) + + self.resize_called = False + + def confirm_resize_mock(*args): + self.resize_called = True + + self.stubs.Set(nova.compute.api.API, 'confirm_resize', + confirm_resize_mock) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 204) + self.assertEqual(self.resize_called, True) + + def test_confirm_resize_server_fails(self): + req = self.webreq('/1/action', 'POST', dict(confirmResize=None)) + + def confirm_resize_mock(*args): + raise Exception('hurr durr') + + self.stubs.Set(nova.compute.api.API, 'confirm_resize', + confirm_resize_mock) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_revert_resize_server(self): + req = self.webreq('/1/action', 'POST', dict(revertResize=None)) + + self.resize_called = False + + def revert_resize_mock(*args): + self.resize_called = True + + self.stubs.Set(nova.compute.api.API, 'revert_resize', + revert_resize_mock) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + self.assertEqual(self.resize_called, True) + + def test_revert_resize_server_fails(self): + req = self.webreq('/1/action', 'POST', dict(revertResize=None)) + + def revert_resize_mock(*args): + raise Exception('hurr durr') + + self.stubs.Set(nova.compute.api.API, 'revert_resize', + revert_resize_mock) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) if __name__ == "__main__": unittest.main() diff --git a/nova/tests/api/openstack/test_shared_ip_groups.py b/nova/tests/api/openstack/test_shared_ip_groups.py index c2fc3a203..b4de2ef41 100644 --- a/nova/tests/api/openstack/test_shared_ip_groups.py +++ b/nova/tests/api/openstack/test_shared_ip_groups.py @@ -15,19 +15,20 @@ # License for the specific language governing permissions and limitations # under the License. -import unittest - import stubout +from nova import test from nova.api.openstack import shared_ip_groups -class SharedIpGroupsTest(unittest.TestCase): +class SharedIpGroupsTest(test.TestCase): def setUp(self): + super(SharedIpGroupsTest, self).setUp() self.stubs = stubout.StubOutForTesting() def tearDown(self): self.stubs.UnsetAll() + super(SharedIpGroupsTest, self).tearDown() def test_get_shared_ip_groups(self): pass diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py new file mode 100644 index 000000000..4f4fabf12 --- /dev/null +++ b/nova/tests/api/openstack/test_zones.py @@ -0,0 +1,169 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import stubout +import webob +import json + +import nova.db +from nova import context +from nova import flags +from nova import test +from nova.api.openstack import zones +from nova.tests.api.openstack import fakes +from nova.scheduler import api + + +FLAGS = flags.FLAGS +FLAGS.verbose = True + + +def zone_get(context, zone_id): + return dict(id=1, api_url='http://example.com', username='bob', + password='xxx') + + +def zone_create(context, values): + zone = dict(id=1) + zone.update(values) + return zone + + +def zone_update(context, zone_id, values): + zone = dict(id=zone_id, api_url='http://example.com', username='bob', + password='xxx') + zone.update(values) + return zone + + +def zone_delete(context, zone_id): + pass + + +def zone_get_all_scheduler(*args): + return [ + dict(id=1, api_url='http://example.com', username='bob', + password='xxx'), + dict(id=2, api_url='http://example.org', username='alice', + password='qwerty') + ] + + +def zone_get_all_scheduler_empty(*args): + return [] + + +def zone_get_all_db(context): + return [ + dict(id=1, api_url='http://example.com', username='bob', + password='xxx'), + dict(id=2, api_url='http://example.org', username='alice', + password='qwerty') + ] + + +class ZonesTest(test.TestCase): + def setUp(self): + super(ZonesTest, self).setUp() + self.stubs = stubout.StubOutForTesting() + fakes.FakeAuthManager.auth_data = {} + fakes.FakeAuthDatabase.data = {} + fakes.stub_out_networking(self.stubs) + fakes.stub_out_rate_limiting(self.stubs) + fakes.stub_out_auth(self.stubs) + + self.allow_admin = FLAGS.allow_admin_api + FLAGS.allow_admin_api = True + + self.stubs.Set(nova.db, 'zone_get', zone_get) + self.stubs.Set(nova.db, 'zone_update', zone_update) + self.stubs.Set(nova.db, 'zone_create', zone_create) + self.stubs.Set(nova.db, 'zone_delete', zone_delete) + + def tearDown(self): + self.stubs.UnsetAll() + FLAGS.allow_admin_api = self.allow_admin + super(ZonesTest, self).tearDown() + + def test_get_zone_list_scheduler(self): + self.stubs.Set(api.API, '_call_scheduler', zone_get_all_scheduler) + req = webob.Request.blank('/v1.0/zones') + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 200) + self.assertEqual(len(res_dict['zones']), 2) + + def test_get_zone_list_db(self): + self.stubs.Set(api.API, '_call_scheduler', + zone_get_all_scheduler_empty) + self.stubs.Set(nova.db, 'zone_get_all', zone_get_all_db) + req = webob.Request.blank('/v1.0/zones') + req.headers["Content-Type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) + self.assertEqual(len(res_dict['zones']), 2) + + def test_get_zone_by_id(self): + req = webob.Request.blank('/v1.0/zones/1') + req.headers["Content-Type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) + self.assertEqual(res_dict['zone']['id'], 1) + self.assertEqual(res_dict['zone']['api_url'], 'http://example.com') + self.assertFalse('password' in res_dict['zone']) + + def test_zone_delete(self): + req = webob.Request.blank('/v1.0/zones/1') + req.headers["Content-Type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 200) + + def test_zone_create(self): + body = dict(zone=dict(api_url='http://example.com', username='fred', + password='fubar')) + req = webob.Request.blank('/v1.0/zones') + req.headers["Content-Type"] = "application/json" + req.method = 'POST' + req.body = json.dumps(body) + + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) + self.assertEqual(res_dict['zone']['id'], 1) + self.assertEqual(res_dict['zone']['api_url'], 'http://example.com') + self.assertFalse('username' in res_dict['zone']) + + def test_zone_update(self): + body = dict(zone=dict(username='zeb', password='sneaky')) + req = webob.Request.blank('/v1.0/zones/1') + req.headers["Content-Type"] = "application/json" + req.method = 'PUT' + req.body = json.dumps(body) + + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) + self.assertEqual(res_dict['zone']['id'], 1) + self.assertEqual(res_dict['zone']['api_url'], 'http://example.com') + self.assertFalse('username' in res_dict['zone']) diff --git a/nova/tests/api/test_wsgi.py b/nova/tests/api/test_wsgi.py index 44e2d615c..b1a849cf9 100644 --- a/nova/tests/api/test_wsgi.py +++ b/nova/tests/api/test_wsgi.py @@ -21,15 +21,17 @@ Test WSGI basics and provide some helper functions for other WSGI tests. """ -import unittest +import json +from nova import test import routes import webob +from nova import exception from nova import wsgi -class Test(unittest.TestCase): +class Test(test.TestCase): def test_debug(self): @@ -66,63 +68,164 @@ class Test(unittest.TestCase): result = webob.Request.blank('/bad').get_response(Router()) self.assertNotEqual(result.body, "Router result") - def test_controller(self): - class Controller(wsgi.Controller): - """Test controller to call from router.""" - test = self +class ControllerTest(test.TestCase): - def show(self, req, id): # pylint: disable-msg=W0622,C0103 - """Default action called for requests with an ID.""" - self.test.assertEqual(req.path_info, '/tests/123') - self.test.assertEqual(id, '123') - return id - - class Router(wsgi.Router): - """Test router.""" - - def __init__(self): - mapper = routes.Mapper() - mapper.resource("test", "tests", controller=Controller()) - super(Router, self).__init__(mapper) + class TestRouter(wsgi.Router): - result = webob.Request.blank('/tests/123').get_response(Router()) - self.assertEqual(result.body, "123") - result = webob.Request.blank('/test/123').get_response(Router()) - self.assertNotEqual(result.body, "123") + class TestController(wsgi.Controller): + _serialization_metadata = { + 'application/xml': { + "attributes": { + "test": ["id"]}}} -class SerializerTest(unittest.TestCase): - - def match(self, url, accept, expect): + def show(self, req, id): # pylint: disable-msg=W0622,C0103 + return {"test": {"id": id}} + + def __init__(self): + mapper = routes.Mapper() + mapper.resource("test", "tests", controller=self.TestController()) + wsgi.Router.__init__(self, mapper) + + def test_show(self): + request = wsgi.Request.blank('/tests/123') + result = request.get_response(self.TestRouter()) + self.assertEqual(json.loads(result.body), {"test": {"id": "123"}}) + + def test_response_content_type_from_accept_xml(self): + request = webob.Request.blank('/tests/123') + request.headers["Accept"] = "application/xml" + result = request.get_response(self.TestRouter()) + self.assertEqual(result.headers["Content-Type"], "application/xml") + + def test_response_content_type_from_accept_json(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = "application/json" + result = request.get_response(self.TestRouter()) + self.assertEqual(result.headers["Content-Type"], "application/json") + + def test_response_content_type_from_query_extension_xml(self): + request = wsgi.Request.blank('/tests/123.xml') + result = request.get_response(self.TestRouter()) + self.assertEqual(result.headers["Content-Type"], "application/xml") + + def test_response_content_type_from_query_extension_json(self): + request = wsgi.Request.blank('/tests/123.json') + result = request.get_response(self.TestRouter()) + self.assertEqual(result.headers["Content-Type"], "application/json") + + def test_response_content_type_default_when_unsupported(self): + request = wsgi.Request.blank('/tests/123.unsupported') + request.headers["Accept"] = "application/unsupported1" + result = request.get_response(self.TestRouter()) + self.assertEqual(result.status_int, 200) + self.assertEqual(result.headers["Content-Type"], "application/json") + + +class RequestTest(test.TestCase): + + def test_request_content_type_missing(self): + request = wsgi.Request.blank('/tests/123') + request.body = "<body />" + self.assertRaises(webob.exc.HTTPBadRequest, request.get_content_type) + + def test_request_content_type_unsupported(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Content-Type"] = "text/html" + request.body = "asdf<br />" + self.assertRaises(webob.exc.HTTPBadRequest, request.get_content_type) + + def test_content_type_from_accept_xml(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = "application/xml" + result = request.best_match_content_type() + self.assertEqual(result, "application/xml") + + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = "application/json" + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = "application/xml, application/json" + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = \ + "application/json; q=0.3, application/xml; q=0.9" + result = request.best_match_content_type() + self.assertEqual(result, "application/xml") + + def test_content_type_from_query_extension(self): + request = wsgi.Request.blank('/tests/123.xml') + result = request.best_match_content_type() + self.assertEqual(result, "application/xml") + + request = wsgi.Request.blank('/tests/123.json') + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + request = wsgi.Request.blank('/tests/123.invalid') + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + def test_content_type_accept_and_query_extension(self): + request = wsgi.Request.blank('/tests/123.xml') + request.headers["Accept"] = "application/json" + result = request.best_match_content_type() + self.assertEqual(result, "application/xml") + + def test_content_type_accept_default(self): + request = wsgi.Request.blank('/tests/123.unsupported') + request.headers["Accept"] = "application/unsupported1" + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + +class SerializerTest(test.TestCase): + + def test_xml(self): input_dict = dict(servers=dict(a=(2, 3))) expected_xml = '<servers><a>(2,3)</a></servers>' + serializer = wsgi.Serializer() + result = serializer.serialize(input_dict, "application/xml") + result = result.replace('\n', '').replace(' ', '') + self.assertEqual(result, expected_xml) + + def test_json(self): + input_dict = dict(servers=dict(a=(2, 3))) expected_json = '{"servers":{"a":[2,3]}}' - req = webob.Request.blank(url, headers=dict(Accept=accept)) - result = wsgi.Serializer(req.environ).to_content_type(input_dict) + serializer = wsgi.Serializer() + result = serializer.serialize(input_dict, "application/json") result = result.replace('\n', '').replace(' ', '') - if expect == 'xml': - self.assertEqual(result, expected_xml) - elif expect == 'json': - self.assertEqual(result, expected_json) - else: - raise "Bad expect value" - - def test_basic(self): - self.match('/servers/4.json', None, expect='json') - self.match('/servers/4', 'application/json', expect='json') - self.match('/servers/4', 'application/xml', expect='xml') - self.match('/servers/4.xml', None, expect='xml') - - def test_defaults_to_json(self): - self.match('/servers/4', None, expect='json') - self.match('/servers/4', 'text/html', expect='json') - - def test_suffix_takes_precedence_over_accept_header(self): - self.match('/servers/4.xml', 'application/json', expect='xml') - self.match('/servers/4.xml.', 'application/json', expect='json') - - def test_deserialize(self): + self.assertEqual(result, expected_json) + + def test_unsupported_content_type(self): + serializer = wsgi.Serializer() + self.assertRaises(exception.InvalidContentType, serializer.serialize, + {}, "text/null") + + def test_deserialize_json(self): + data = """{"a": { + "a1": "1", + "a2": "2", + "bs": ["1", "2", "3", {"c": {"c1": "1"}}], + "d": {"e": "1"}, + "f": "1"}}""" + as_dict = dict(a={ + 'a1': '1', + 'a2': '2', + 'bs': ['1', '2', '3', {'c': dict(c1='1')}], + 'd': {'e': '1'}, + 'f': '1'}) + metadata = {} + serializer = wsgi.Serializer(metadata) + self.assertEqual(serializer.deserialize(data, "application/json"), + as_dict) + + def test_deserialize_xml(self): xml = """ <a a1="1" a2="2"> <bs><b>1</b><b>2</b><b>3</b><b><c c1="1"/></b></bs> @@ -137,11 +240,13 @@ class SerializerTest(unittest.TestCase): 'd': {'e': '1'}, 'f': '1'}) metadata = {'application/xml': dict(plurals={'bs': 'b', 'ts': 't'})} - serializer = wsgi.Serializer({}, metadata) - self.assertEqual(serializer.deserialize(xml), as_dict) + serializer = wsgi.Serializer(metadata) + self.assertEqual(serializer.deserialize(xml, "application/xml"), + as_dict) def test_deserialize_empty_xml(self): xml = """<a></a>""" as_dict = {"a": {}} - serializer = wsgi.Serializer({}) - self.assertEqual(serializer.deserialize(xml), as_dict) + serializer = wsgi.Serializer() + self.assertEqual(serializer.deserialize(xml, "application/xml"), + as_dict) diff --git a/nova/tests/db/fakes.py b/nova/tests/db/fakes.py index 05bdd172e..d760dc456 100644 --- a/nova/tests/db/fakes.py +++ b/nova/tests/db/fakes.py @@ -20,13 +20,22 @@ import time from nova import db +from nova import test from nova import utils -from nova.compute import instance_types def stub_out_db_instance_api(stubs): """ Stubs out the db API for creating Instances """ + INSTANCE_TYPES = { + 'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1), + 'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2), + 'm1.medium': + dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3), + 'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4), + 'm1.xlarge': + dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)} + class FakeModel(object): """ Stubs out for model """ def __init__(self, values): @@ -41,10 +50,16 @@ def stub_out_db_instance_api(stubs): else: raise NotImplementedError() + def fake_instance_type_get_all(context, inactive=0): + return INSTANCE_TYPES + + def fake_instance_type_get_by_name(context, name): + return INSTANCE_TYPES[name] + def fake_instance_create(values): """ Stubs out the db.instance_create method """ - type_data = instance_types.INSTANCE_TYPES[values['instance_type']] + type_data = INSTANCE_TYPES[values['instance_type']] base_options = { 'name': values['name'], @@ -73,3 +88,5 @@ def stub_out_db_instance_api(stubs): stubs.Set(db, 'instance_create', fake_instance_create) stubs.Set(db, 'network_get_by_instance', fake_network_get_by_instance) + stubs.Set(db, 'instance_type_get_all', fake_instance_type_get_all) + stubs.Set(db, 'instance_type_get_by_name', fake_instance_type_get_by_name) diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py index 1097488ec..5d7ca98b5 100644 --- a/nova/tests/fake_flags.py +++ b/nova/tests/fake_flags.py @@ -29,9 +29,10 @@ FLAGS.auth_driver = 'nova.auth.dbdriver.DbDriver' flags.DECLARE('network_size', 'nova.network.manager') flags.DECLARE('num_networks', 'nova.network.manager') flags.DECLARE('fake_network', 'nova.network.manager') -FLAGS.network_size = 16 -FLAGS.num_networks = 5 +FLAGS.network_size = 8 +FLAGS.num_networks = 2 FLAGS.fake_network = True +FLAGS.image_service = 'nova.image.local.LocalImageService' flags.DECLARE('num_shelves', 'nova.volume.driver') flags.DECLARE('blades_per_shelf', 'nova.volume.driver') flags.DECLARE('iscsi_num_targets', 'nova.volume.driver') @@ -39,5 +40,5 @@ FLAGS.num_shelves = 2 FLAGS.blades_per_shelf = 4 FLAGS.iscsi_num_targets = 8 FLAGS.verbose = True -FLAGS.sql_connection = 'sqlite:///nova.sqlite' +FLAGS.sqlite_db = "tests.sqlite" FLAGS.use_ipv6 = True diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py index f182b857a..5872552ec 100644 --- a/nova/tests/glance/stubs.py +++ b/nova/tests/glance/stubs.py @@ -26,12 +26,45 @@ def stubout_glance_client(stubs, cls): class FakeGlance(object): + IMAGE_MACHINE = 1 + IMAGE_KERNEL = 2 + IMAGE_RAMDISK = 3 + IMAGE_RAW = 4 + IMAGE_VHD = 5 + + IMAGE_FIXTURES = { + IMAGE_MACHINE: { + 'image_meta': {'name': 'fakemachine', 'size': 0, + 'disk_format': 'ami', + 'container_format': 'ami'}, + 'image_data': StringIO.StringIO('')}, + IMAGE_KERNEL: { + 'image_meta': {'name': 'fakekernel', 'size': 0, + 'disk_format': 'aki', + 'container_format': 'aki'}, + 'image_data': StringIO.StringIO('')}, + IMAGE_RAMDISK: { + 'image_meta': {'name': 'fakeramdisk', 'size': 0, + 'disk_format': 'ari', + 'container_format': 'ari'}, + 'image_data': StringIO.StringIO('')}, + IMAGE_RAW: { + 'image_meta': {'name': 'fakeraw', 'size': 0, + 'disk_format': 'raw', + 'container_format': 'bare'}, + 'image_data': StringIO.StringIO('')}, + IMAGE_VHD: { + 'image_meta': {'name': 'fakevhd', 'size': 0, + 'disk_format': 'vhd', + 'container_format': 'ovf'}, + 'image_data': StringIO.StringIO('')}} + def __init__(self, host, port=None, use_ssl=False): pass - def get_image(self, image): - meta = { - 'size': 0, - } - image_file = StringIO.StringIO('') - return meta, image_file + def get_image_meta(self, image_id): + return self.IMAGE_FIXTURES[image_id]['image_meta'] + + def get_image(self, image_id): + image = self.IMAGE_FIXTURES[image_id] + return image['image_meta'], image['image_data'] diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py index da86e6e11..5a1be08eb 100644 --- a/nova/tests/objectstore_unittest.py +++ b/nova/tests/objectstore_unittest.py @@ -311,4 +311,5 @@ class S3APITestCase(test.TestCase): self.auth_manager.delete_user('admin') self.auth_manager.delete_project('admin') stop_listening = defer.maybeDeferred(self.listening_port.stopListening) + super(S3APITestCase, self).tearDown() return defer.DeferredList([stop_listening]) diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py index 2569e262b..d5c54a1c3 100644 --- a/nova/tests/test_api.py +++ b/nova/tests/test_api.py @@ -20,6 +20,7 @@ import boto from boto.ec2 import regioninfo +import datetime import httplib import random import StringIO @@ -127,6 +128,28 @@ class ApiEc2TestCase(test.TestCase): self.ec2.new_http_connection(host, is_secure).AndReturn(self.http) return self.http + def test_return_valid_isoformat(self): + """ + Ensure that the ec2 api returns datetime in xs:dateTime + (which apparently isn't datetime.isoformat()) + NOTE(ken-pepple): https://bugs.launchpad.net/nova/+bug/721297 + """ + conv = apirequest._database_to_isoformat + # sqlite database representation with microseconds + time_to_convert = datetime.datetime.strptime( + "2011-02-21 20:14:10.634276", + "%Y-%m-%d %H:%M:%S.%f") + self.assertEqual( + conv(time_to_convert), + '2011-02-21T20:14:10Z') + # mysqlite database representation + time_to_convert = datetime.datetime.strptime( + "2011-02-21 19:56:18", + "%Y-%m-%d %H:%M:%S") + self.assertEqual( + conv(time_to_convert), + '2011-02-21T19:56:18Z') + def test_xmlns_version_matches_request_version(self): self.expect_http(api_version='2010-10-30') self.mox.ReplayAll() @@ -248,16 +271,14 @@ class ApiEc2TestCase(test.TestCase): self.mox.ReplayAll() rv = self.ec2.get_all_security_groups() - # I don't bother checkng that we actually find it here, - # because the create/delete unit test further up should - # be good enough for that. - for group in rv: - if group.name == security_group_name: - self.assertEquals(len(group.rules), 1) - self.assertEquals(int(group.rules[0].from_port), 80) - self.assertEquals(int(group.rules[0].to_port), 81) - self.assertEquals(len(group.rules[0].grants), 1) - self.assertEquals(str(group.rules[0].grants[0]), '0.0.0.0/0') + + group = [grp for grp in rv if grp.name == security_group_name][0] + + self.assertEquals(len(group.rules), 1) + self.assertEquals(int(group.rules[0].from_port), 80) + self.assertEquals(int(group.rules[0].to_port), 81) + self.assertEquals(len(group.rules[0].grants), 1) + self.assertEquals(str(group.rules[0].grants[0]), '0.0.0.0/0') self.expect_http() self.mox.ReplayAll() @@ -314,16 +335,13 @@ class ApiEc2TestCase(test.TestCase): self.mox.ReplayAll() rv = self.ec2.get_all_security_groups() - # I don't bother checkng that we actually find it here, - # because the create/delete unit test further up should - # be good enough for that. - for group in rv: - if group.name == security_group_name: - self.assertEquals(len(group.rules), 1) - self.assertEquals(int(group.rules[0].from_port), 80) - self.assertEquals(int(group.rules[0].to_port), 81) - self.assertEquals(len(group.rules[0].grants), 1) - self.assertEquals(str(group.rules[0].grants[0]), '::/0') + + group = [grp for grp in rv if grp.name == security_group_name][0] + self.assertEquals(len(group.rules), 1) + self.assertEquals(int(group.rules[0].from_port), 80) + self.assertEquals(int(group.rules[0].to_port), 81) + self.assertEquals(len(group.rules[0].grants), 1) + self.assertEquals(str(group.rules[0].grants[0]), '::/0') self.expect_http() self.mox.ReplayAll() diff --git a/nova/tests/test_auth.py b/nova/tests/test_auth.py index 35ffffb67..2a7817032 100644 --- a/nova/tests/test_auth.py +++ b/nova/tests/test_auth.py @@ -327,15 +327,6 @@ class AuthManagerTestCase(object): class AuthManagerLdapTestCase(AuthManagerTestCase, test.TestCase): auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver' - def __init__(self, *args, **kwargs): - AuthManagerTestCase.__init__(self) - test.TestCase.__init__(self, *args, **kwargs) - import nova.auth.fakeldap as fakeldap - if FLAGS.flush_db: - LOG.info("Flushing datastore") - r = fakeldap.Store.instance() - r.flushdb() - class AuthManagerDbTestCase(AuthManagerTestCase, test.TestCase): auth_driver = 'nova.auth.dbdriver.DbDriver' diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 445cc6e8b..cf8ee7eff 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -38,6 +38,8 @@ from nova import test from nova.auth import manager from nova.compute import power_state from nova.api.ec2 import cloud +from nova.api.ec2 import ec2utils +from nova.image import local from nova.objectstore import image @@ -65,18 +67,27 @@ class CloudTestCase(test.TestCase): self.cloud = cloud.CloudController() # set up services - self.compute = service.Service.create(binary='nova-compute') - self.compute.start() - self.network = service.Service.create(binary='nova-network') - self.network.start() + self.compute = self.start_service('compute') + self.scheduter = self.start_service('scheduler') + self.network = self.start_service('network') self.manager = manager.AuthManager() self.user = self.manager.create_user('admin', 'admin', 'admin', True) self.project = self.manager.create_project('proj', 'admin', 'proj') self.context = context.RequestContext(user=self.user, project=self.project) + host = self.network.get_network_host(self.context.elevated()) + + def fake_show(meh, context, id): + return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}} + + self.stubs.Set(local.LocalImageService, 'show', fake_show) + self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show) def tearDown(self): + network_ref = db.project_get_network(self.context, + self.project.id) + db.network_disassociate(self.context, network_ref['id']) self.manager.delete_project(self.project) self.manager.delete_user(self.user) self.compute.kill() @@ -102,7 +113,7 @@ class CloudTestCase(test.TestCase): address = "10.10.10.10" db.floating_ip_create(self.context, {'address': address, - 'host': FLAGS.host}) + 'host': self.network.host}) self.cloud.allocate_address(self.context) self.cloud.describe_addresses(self.context) self.cloud.release_address(self.context, @@ -115,11 +126,11 @@ class CloudTestCase(test.TestCase): address = "10.10.10.10" db.floating_ip_create(self.context, {'address': address, - 'host': FLAGS.host}) + 'host': self.network.host}) self.cloud.allocate_address(self.context) - inst = db.instance_create(self.context, {'host': FLAGS.host}) + inst = db.instance_create(self.context, {'host': self.compute.host}) fixed = self.network.allocate_fixed_ip(self.context, inst['id']) - ec2_id = cloud.id_to_ec2_id(inst['id']) + ec2_id = ec2utils.id_to_ec2_id(inst['id']) self.cloud.associate_address(self.context, instance_id=ec2_id, public_ip=address) @@ -133,18 +144,34 @@ class CloudTestCase(test.TestCase): db.instance_destroy(self.context, inst['id']) db.floating_ip_destroy(self.context, address) + def test_describe_security_groups(self): + """Makes sure describe_security_groups works and filters results.""" + sec = db.security_group_create(self.context, + {'project_id': self.context.project_id, + 'name': 'test'}) + result = self.cloud.describe_security_groups(self.context) + # NOTE(vish): should have the default group as well + self.assertEqual(len(result['securityGroupInfo']), 2) + result = self.cloud.describe_security_groups(self.context, + group_name=[sec['name']]) + self.assertEqual(len(result['securityGroupInfo']), 1) + self.assertEqual( + result['securityGroupInfo'][0]['groupName'], + sec['name']) + db.security_group_destroy(self.context, sec['id']) + def test_describe_volumes(self): """Makes sure describe_volumes works and filters results.""" vol1 = db.volume_create(self.context, {}) vol2 = db.volume_create(self.context, {}) result = self.cloud.describe_volumes(self.context) self.assertEqual(len(result['volumeSet']), 2) - volume_id = cloud.id_to_ec2_id(vol2['id'], 'vol-%08x') + volume_id = ec2utils.id_to_ec2_id(vol2['id'], 'vol-%08x') result = self.cloud.describe_volumes(self.context, volume_id=[volume_id]) self.assertEqual(len(result['volumeSet']), 1) self.assertEqual( - cloud.ec2_id_to_id(result['volumeSet'][0]['volumeId']), + ec2utils.ec2_id_to_id(result['volumeSet'][0]['volumeId']), vol2['id']) db.volume_destroy(self.context, vol1['id']) db.volume_destroy(self.context, vol2['id']) @@ -169,8 +196,10 @@ class CloudTestCase(test.TestCase): def test_describe_instances(self): """Makes sure describe_instances works and filters results.""" inst1 = db.instance_create(self.context, {'reservation_id': 'a', + 'image_id': 1, 'host': 'host1'}) inst2 = db.instance_create(self.context, {'reservation_id': 'a', + 'image_id': 1, 'host': 'host2'}) comp1 = db.service_create(self.context, {'host': 'host1', 'availability_zone': 'zone1', @@ -181,7 +210,7 @@ class CloudTestCase(test.TestCase): result = self.cloud.describe_instances(self.context) result = result['reservationSet'][0] self.assertEqual(len(result['instancesSet']), 2) - instance_id = cloud.id_to_ec2_id(inst2['id']) + instance_id = ec2utils.id_to_ec2_id(inst2['id']) result = self.cloud.describe_instances(self.context, instance_id=[instance_id]) result = result['reservationSet'][0] @@ -196,34 +225,37 @@ class CloudTestCase(test.TestCase): db.service_destroy(self.context, comp2['id']) def test_console_output(self): - image_id = FLAGS.default_image instance_type = FLAGS.default_instance_type max_count = 1 - kwargs = {'image_id': image_id, + kwargs = {'image_id': 'ami-1', 'instance_type': instance_type, 'max_count': max_count} rv = self.cloud.run_instances(self.context, **kwargs) + greenthread.sleep(0.3) instance_id = rv['instancesSet'][0]['instanceId'] output = self.cloud.get_console_output(context=self.context, - instance_id=[instance_id]) + instance_id=[instance_id]) self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE OUTPUT') # TODO(soren): We need this until we can stop polling in the rpc code # for unit tests. greenthread.sleep(0.3) rv = self.cloud.terminate_instances(self.context, [instance_id]) + greenthread.sleep(0.3) def test_ajax_console(self): - kwargs = {'image_id': image_id} - rv = yield self.cloud.run_instances(self.context, **kwargs) + kwargs = {'image_id': 'ami-1'} + rv = self.cloud.run_instances(self.context, **kwargs) instance_id = rv['instancesSet'][0]['instanceId'] - output = yield self.cloud.get_console_output(context=self.context, - instance_id=[instance_id]) - self.assertEquals(b64decode(output['output']), - 'http://fakeajaxconsole.com/?token=FAKETOKEN') + greenthread.sleep(0.3) + output = self.cloud.get_ajax_console(context=self.context, + instance_id=[instance_id]) + self.assertEquals(output['url'], + '%s/?token=FAKETOKEN' % FLAGS.ajax_console_proxy_url) # TODO(soren): We need this until we can stop polling in the rpc code # for unit tests. greenthread.sleep(0.3) - rv = yield self.cloud.terminate_instances(self.context, [instance_id]) + rv = self.cloud.terminate_instances(self.context, [instance_id]) + greenthread.sleep(0.3) def test_key_generation(self): result = self._create_key('test') @@ -243,7 +275,7 @@ class CloudTestCase(test.TestCase): self._create_key('test1') self._create_key('test2') result = self.cloud.describe_key_pairs(self.context) - keys = result["keypairsSet"] + keys = result["keySet"] self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys)) self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys)) @@ -286,70 +318,6 @@ class CloudTestCase(test.TestCase): LOG.debug(_("Terminating instance %s"), instance_id) rv = self.compute.terminate_instance(instance_id) - def test_describe_instances(self): - """Makes sure describe_instances works.""" - instance1 = db.instance_create(self.context, {'host': 'host2'}) - comp1 = db.service_create(self.context, {'host': 'host2', - 'availability_zone': 'zone1', - 'topic': "compute"}) - result = self.cloud.describe_instances(self.context) - self.assertEqual(result['reservationSet'][0] - ['instancesSet'][0] - ['placement']['availabilityZone'], 'zone1') - db.instance_destroy(self.context, instance1['id']) - db.service_destroy(self.context, comp1['id']) - - def test_instance_update_state(self): - # TODO(termie): what is this code even testing? - def instance(num): - return { - 'reservation_id': 'r-1', - 'instance_id': 'i-%s' % num, - 'image_id': 'ami-%s' % num, - 'private_dns_name': '10.0.0.%s' % num, - 'dns_name': '10.0.0%s' % num, - 'ami_launch_index': str(num), - 'instance_type': 'fake', - 'availability_zone': 'fake', - 'key_name': None, - 'kernel_id': 'fake', - 'ramdisk_id': 'fake', - 'groups': ['default'], - 'product_codes': None, - 'state': 0x01, - 'user_data': ''} - rv = self.cloud._format_describe_instances(self.context) - logging.error(str(rv)) - self.assertEqual(len(rv['reservationSet']), 0) - - # simulate launch of 5 instances - # self.cloud.instances['pending'] = {} - #for i in xrange(5): - # inst = instance(i) - # self.cloud.instances['pending'][inst['instance_id']] = inst - - #rv = self.cloud._format_instances(self.admin) - #self.assert_(len(rv['reservationSet']) == 1) - #self.assert_(len(rv['reservationSet'][0]['instances_set']) == 5) - # report 4 nodes each having 1 of the instances - #for i in xrange(4): - # self.cloud.update_state('instances', - # {('node-%s' % i): {('i-%s' % i): - # instance(i)}}) - - # one instance should be pending still - #self.assert_(len(self.cloud.instances['pending'].keys()) == 1) - - # check that the reservations collapse - #rv = self.cloud._format_instances(self.admin) - #self.assert_(len(rv['reservationSet']) == 1) - #self.assert_(len(rv['reservationSet'][0]['instances_set']) == 5) - - # check that we can get metadata for each instance - #for i in xrange(4): - # data = self.cloud.get_metadata(instance(i)['private_dns_name']) - # self.assert_(data['meta-data']['ami-id'] == 'ami-%s' % i) - @staticmethod def _fake_set_image_description(ctxt, image_id, description): from nova.objectstore import handler @@ -387,7 +355,7 @@ class CloudTestCase(test.TestCase): def test_update_of_instance_display_fields(self): inst = db.instance_create(self.context, {}) - ec2_id = cloud.id_to_ec2_id(inst['id']) + ec2_id = ec2utils.id_to_ec2_id(inst['id']) self.cloud.update_instance(self.context, ec2_id, display_name='c00l 1m4g3') inst = db.instance_get(self.context, inst['id']) @@ -405,7 +373,7 @@ class CloudTestCase(test.TestCase): def test_update_of_volume_display_fields(self): vol = db.volume_create(self.context, {}) self.cloud.update_volume(self.context, - cloud.id_to_ec2_id(vol['id'], 'vol-%08x'), + ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x'), display_name='c00l v0lum3') vol = db.volume_get(self.context, vol['id']) self.assertEqual('c00l v0lum3', vol['display_name']) @@ -414,7 +382,7 @@ class CloudTestCase(test.TestCase): def test_update_of_volume_wont_update_private_fields(self): vol = db.volume_create(self.context, {}) self.cloud.update_volume(self.context, - cloud.id_to_ec2_id(vol['id'], 'vol-%08x'), + ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x'), mountpoint='/not/here') vol = db.volume_get(self.context, vol['id']) self.assertEqual(None, vol['mountpoint']) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 2aa0690e7..643b2e93a 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -30,7 +30,8 @@ from nova import log as logging from nova import test from nova import utils from nova.auth import manager - +from nova.compute import instance_types +from nova.image import local LOG = logging.getLogger('nova.tests.compute') FLAGS = flags.FLAGS @@ -51,15 +52,20 @@ class ComputeTestCase(test.TestCase): self.project = self.manager.create_project('fake', 'fake', 'fake') self.context = context.RequestContext('fake', 'fake', False) + def fake_show(meh, context, id): + return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}} + + self.stubs.Set(local.LocalImageService, 'show', fake_show) + def tearDown(self): self.manager.delete_user(self.user) self.manager.delete_project(self.project) super(ComputeTestCase, self).tearDown() - def _create_instance(self): + def _create_instance(self, params={}): """Create a test instance""" inst = {} - inst['image_id'] = 'ami-test' + inst['image_id'] = 1 inst['reservation_id'] = 'r-fakeres' inst['launch_time'] = '10' inst['user_id'] = self.user.id @@ -67,6 +73,7 @@ class ComputeTestCase(test.TestCase): inst['instance_type'] = 'm1.tiny' inst['mac_address'] = utils.generate_mac() inst['ami_launch_index'] = 0 + inst.update(params) return db.instance_create(self.context, inst)['id'] def _create_group(self): @@ -202,6 +209,14 @@ class ComputeTestCase(test.TestCase): self.compute.set_admin_password(self.context, instance_id) self.compute.terminate_instance(self.context, instance_id) + def test_inject_file(self): + """Ensure we can write a file to an instance""" + instance_id = self._create_instance() + self.compute.run_instance(self.context, instance_id) + self.compute.inject_file(self.context, instance_id, "/tmp/test", + "File Contents") + self.compute.terminate_instance(self.context, instance_id) + def test_snapshot(self): """Ensure instance can be snapshotted""" instance_id = self._create_instance() @@ -258,3 +273,31 @@ class ComputeTestCase(test.TestCase): self.assertEqual(ret_val, None) self.compute.terminate_instance(self.context, instance_id) + + def test_resize_instance(self): + """Ensure instance can be migrated/resized""" + instance_id = self._create_instance() + context = self.context.elevated() + self.compute.run_instance(self.context, instance_id) + db.instance_update(self.context, instance_id, {'host': 'foo'}) + self.compute.prep_resize(context, instance_id) + migration_ref = db.migration_get_by_instance_and_status(context, + instance_id, 'pre-migrating') + self.compute.resize_instance(context, instance_id, + migration_ref['id']) + self.compute.terminate_instance(context, instance_id) + + def test_get_by_flavor_id(self): + type = instance_types.get_by_flavor_id(1) + self.assertEqual(type, 'm1.tiny') + + def test_resize_same_source_fails(self): + """Ensure instance fails to migrate when source and destination are + the same host""" + instance_id = self._create_instance() + self.compute.run_instance(self.context, instance_id) + self.assertRaises(exception.Error, self.compute.prep_resize, + self.context, instance_id) + self.compute.terminate_instance(self.context, instance_id) + type = instance_types.get_by_flavor_id("1") + self.assertEqual(type, 'm1.tiny') diff --git a/nova/tests/test_console.py b/nova/tests/test_console.py index 85bf94458..d47c70d88 100644 --- a/nova/tests/test_console.py +++ b/nova/tests/test_console.py @@ -21,7 +21,6 @@ Tests For Console proxy. """ import datetime -import logging from nova import context from nova import db @@ -38,7 +37,6 @@ FLAGS = flags.FLAGS class ConsoleTestCase(test.TestCase): """Test case for console proxy""" def setUp(self): - logging.getLogger().setLevel(logging.DEBUG) super(ConsoleTestCase, self).setUp() self.flags(console_driver='nova.console.fake.FakeConsoleProxy', stub_compute=True) @@ -59,7 +57,7 @@ class ConsoleTestCase(test.TestCase): inst = {} #inst['host'] = self.host #inst['name'] = 'instance-1234' - inst['image_id'] = 'ami-test' + inst['image_id'] = 1 inst['reservation_id'] = 'r-fakeres' inst['launch_time'] = '10' inst['user_id'] = self.user.id diff --git a/nova/tests/test_direct.py b/nova/tests/test_direct.py index 8a74b2296..80e4d2e1f 100644 --- a/nova/tests/test_direct.py +++ b/nova/tests/test_direct.py @@ -19,7 +19,6 @@ """Tests for Direct API.""" import json -import logging import webob @@ -53,12 +52,14 @@ class DirectTestCase(test.TestCase): def tearDown(self): direct.ROUTES = {} + super(DirectTestCase, self).tearDown() def test_delegated_auth(self): req = webob.Request.blank('/fake/context') req.headers['X-OpenStack-User'] = 'user1' req.headers['X-OpenStack-Project'] = 'proj1' resp = req.get_response(self.auth_router) + self.assertEqual(resp.status_int, 200) data = json.loads(resp.body) self.assertEqual(data['user'], 'user1') self.assertEqual(data['project'], 'proj1') @@ -69,6 +70,7 @@ class DirectTestCase(test.TestCase): req.method = 'POST' req.body = 'json=%s' % json.dumps({'data': 'foo'}) resp = req.get_response(self.router) + self.assertEqual(resp.status_int, 200) resp_parsed = json.loads(resp.body) self.assertEqual(resp_parsed['data'], 'foo') @@ -78,6 +80,7 @@ class DirectTestCase(test.TestCase): req.method = 'POST' req.body = 'data=foo' resp = req.get_response(self.router) + self.assertEqual(resp.status_int, 200) resp_parsed = json.loads(resp.body) self.assertEqual(resp_parsed['data'], 'foo') @@ -90,8 +93,7 @@ class DirectTestCase(test.TestCase): class DirectCloudTestCase(test_cloud.CloudTestCase): def setUp(self): super(DirectCloudTestCase, self).setUp() - compute_handle = compute.API(image_service=self.cloud.image_service, - network_api=self.cloud.network_api, + compute_handle = compute.API(network_api=self.cloud.network_api, volume_api=self.cloud.volume_api) direct.register_service('compute', compute_handle) self.router = direct.JsonParamsMiddleware(direct.Router()) diff --git a/nova/tests/test_instance_types.py b/nova/tests/test_instance_types.py new file mode 100644 index 000000000..edc538879 --- /dev/null +++ b/nova/tests/test_instance_types.py @@ -0,0 +1,86 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Ken Pepple +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for instance types code +""" +import time + +from nova import context +from nova import db +from nova import exception +from nova import flags +from nova import log as logging +from nova import test +from nova import utils +from nova.compute import instance_types +from nova.db.sqlalchemy.session import get_session +from nova.db.sqlalchemy import models + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.compute') + + +class InstanceTypeTestCase(test.TestCase): + """Test cases for instance type code""" + def setUp(self): + super(InstanceTypeTestCase, self).setUp() + session = get_session() + max_flavorid = session.query(models.InstanceTypes).\ + order_by("flavorid desc").\ + first() + self.flavorid = max_flavorid["flavorid"] + 1 + self.name = str(int(time.time())) + + def test_instance_type_create_then_delete(self): + """Ensure instance types can be created""" + starting_inst_list = instance_types.get_all_types() + instance_types.create(self.name, 256, 1, 120, self.flavorid) + new = instance_types.get_all_types() + self.assertNotEqual(len(starting_inst_list), + len(new), + 'instance type was not created') + instance_types.destroy(self.name) + self.assertEqual(1, + instance_types.get_instance_type(self.name)["deleted"]) + self.assertEqual(starting_inst_list, instance_types.get_all_types()) + instance_types.purge(self.name) + self.assertEqual(len(starting_inst_list), + len(instance_types.get_all_types()), + 'instance type not purged') + + def test_get_all_instance_types(self): + """Ensures that all instance types can be retrieved""" + session = get_session() + total_instance_types = session.query(models.InstanceTypes).\ + count() + inst_types = instance_types.get_all_types() + self.assertEqual(total_instance_types, len(inst_types)) + + def test_invalid_create_args_should_fail(self): + """Ensures that instance type creation fails with invalid args""" + self.assertRaises( + exception.InvalidInputException, + instance_types.create, self.name, 0, 1, 120, self.flavorid) + self.assertRaises( + exception.InvalidInputException, + instance_types.create, self.name, 256, -1, 120, self.flavorid) + self.assertRaises( + exception.InvalidInputException, + instance_types.create, self.name, 256, 1, "aa", self.flavorid) + + def test_non_existant_inst_type_shouldnt_delete(self): + """Ensures that instance type creation fails with invalid args""" + self.assertRaises(exception.ApiError, + instance_types.destroy, "sfsfsdfdfs") diff --git a/nova/tests/test_localization.py b/nova/tests/test_localization.py index 6992773f5..393d71038 100644 --- a/nova/tests/test_localization.py +++ b/nova/tests/test_localization.py @@ -15,7 +15,6 @@ # under the License. import glob -import logging import os import re import sys diff --git a/nova/tests/test_log.py b/nova/tests/test_log.py index 868a5ead3..122351ff6 100644 --- a/nova/tests/test_log.py +++ b/nova/tests/test_log.py @@ -1,9 +1,12 @@ import cStringIO from nova import context +from nova import flags from nova import log from nova import test +FLAGS = flags.FLAGS + def _fake_context(): return context.RequestContext(1, 1) @@ -14,15 +17,11 @@ class RootLoggerTestCase(test.TestCase): super(RootLoggerTestCase, self).setUp() self.log = log.logging.root - def tearDown(self): - super(RootLoggerTestCase, self).tearDown() - log.NovaLogger.manager.loggerDict = {} - def test_is_nova_instance(self): self.assert_(isinstance(self.log, log.NovaLogger)) - def test_name_is_nova_root(self): - self.assertEqual("nova.root", self.log.name) + def test_name_is_nova(self): + self.assertEqual("nova", self.log.name) def test_handlers_have_nova_formatter(self): formatters = [] @@ -45,6 +44,38 @@ class RootLoggerTestCase(test.TestCase): log.audit("foo", context=_fake_context()) self.assert_(True) # didn't raise exception + def test_will_be_verbose_if_verbose_flag_set(self): + self.flags(verbose=True) + log.reset() + self.assertEqual(log.DEBUG, self.log.level) + + def test_will_not_be_verbose_if_verbose_flag_not_set(self): + self.flags(verbose=False) + log.reset() + self.assertEqual(log.INFO, self.log.level) + + +class LogHandlerTestCase(test.TestCase): + def test_log_path_logdir(self): + self.flags(logdir='/some/path', logfile=None) + self.assertEquals(log._get_log_file_path(binary='foo-bar'), + '/some/path/foo-bar.log') + + def test_log_path_logfile(self): + self.flags(logfile='/some/path/foo-bar.log') + self.assertEquals(log._get_log_file_path(binary='foo-bar'), + '/some/path/foo-bar.log') + + def test_log_path_none(self): + self.flags(logdir=None, logfile=None) + self.assertTrue(log._get_log_file_path(binary='foo-bar') is None) + + def test_log_path_logfile_overrides_logdir(self): + self.flags(logdir='/some/other/path', + logfile='/some/path/foo-bar.log') + self.assertEquals(log._get_log_file_path(binary='foo-bar'), + '/some/path/foo-bar.log') + class NovaFormatterTestCase(test.TestCase): def setUp(self): @@ -55,13 +86,15 @@ class NovaFormatterTestCase(test.TestCase): logging_debug_format_suffix="--DBG") self.log = log.logging.root self.stream = cStringIO.StringIO() - handler = log.StreamHandler(self.stream) - self.log.addHandler(handler) + self.handler = log.StreamHandler(self.stream) + self.log.addHandler(self.handler) + self.level = self.log.level self.log.setLevel(log.DEBUG) def tearDown(self): + self.log.setLevel(self.level) + self.log.removeHandler(self.handler) super(NovaFormatterTestCase, self).tearDown() - log.NovaLogger.manager.loggerDict = {} def test_uncontextualized_log(self): self.log.info("foo") @@ -81,30 +114,15 @@ class NovaFormatterTestCase(test.TestCase): class NovaLoggerTestCase(test.TestCase): def setUp(self): super(NovaLoggerTestCase, self).setUp() - self.flags(default_log_levels=["nova-test=AUDIT"], verbose=False) + levels = FLAGS.default_log_levels + levels.append("nova-test=AUDIT") + self.flags(default_log_levels=levels, + verbose=True) self.log = log.getLogger('nova-test') - def tearDown(self): - super(NovaLoggerTestCase, self).tearDown() - log.NovaLogger.manager.loggerDict = {} - def test_has_level_from_flags(self): self.assertEqual(log.AUDIT, self.log.level) def test_child_log_has_level_of_parent_flag(self): l = log.getLogger('nova-test.foo') self.assertEqual(log.AUDIT, l.level) - - -class VerboseLoggerTestCase(test.TestCase): - def setUp(self): - super(VerboseLoggerTestCase, self).setUp() - self.flags(default_log_levels=["nova.test=AUDIT"], verbose=True) - self.log = log.getLogger('nova.test') - - def tearDown(self): - super(VerboseLoggerTestCase, self).tearDown() - log.NovaLogger.manager.loggerDict = {} - - def test_will_be_verbose_if_named_nova_and_verbose_flag_set(self): - self.assertEqual(log.DEBUG, self.log.level) diff --git a/nova/tests/test_misc.py b/nova/tests/test_misc.py index 33c1777d5..a658e4978 100644 --- a/nova/tests/test_misc.py +++ b/nova/tests/test_misc.py @@ -14,10 +14,12 @@ # License for the specific language governing permissions and limitations # under the License. +import errno import os +import select from nova import test -from nova.utils import parse_mailmap, str_dict_replace +from nova.utils import parse_mailmap, str_dict_replace, synchronized class ProjectTestCase(test.TestCase): @@ -46,6 +48,8 @@ class ProjectTestCase(test.TestCase): missing = set() for contributor in contributors: + if contributor == 'nova-core': + continue if not contributor in authors_file: missing.add(contributor) @@ -53,3 +57,47 @@ class ProjectTestCase(test.TestCase): '%r not listed in Authors' % missing) finally: tree.unlock() + + +class LockTestCase(test.TestCase): + def test_synchronized_wrapped_function_metadata(self): + @synchronized('whatever') + def foo(): + """Bar""" + pass + self.assertEquals(foo.__doc__, 'Bar', "Wrapped function's docstring " + "got lost") + self.assertEquals(foo.__name__, 'foo', "Wrapped function's name " + "got mangled") + + def test_synchronized(self): + rpipe1, wpipe1 = os.pipe() + rpipe2, wpipe2 = os.pipe() + + @synchronized('testlock') + def f(rpipe, wpipe): + try: + os.write(wpipe, "foo") + except OSError, e: + self.assertEquals(e.errno, errno.EPIPE) + return + + rfds, _, __ = select.select([rpipe], [], [], 1) + self.assertEquals(len(rfds), 0, "The other process, which was" + " supposed to be locked, " + "wrote on its end of the " + "pipe") + os.close(rpipe) + + pid = os.fork() + if pid > 0: + os.close(wpipe1) + os.close(rpipe2) + + f(rpipe1, wpipe2) + else: + os.close(rpipe1) + os.close(wpipe2) + + f(rpipe2, wpipe1) + os._exit(0) diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index 00f9323f3..53e35ce7e 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -29,11 +29,153 @@ from nova import log as logging from nova import test from nova import utils from nova.auth import manager +from nova.network import linux_net FLAGS = flags.FLAGS LOG = logging.getLogger('nova.tests.network') +class IptablesManagerTestCase(test.TestCase): + sample_filter = ['#Generated by iptables-save on Fri Feb 18 15:17:05 2011', + '*filter', + ':INPUT ACCEPT [2223527:305688874]', + ':FORWARD ACCEPT [0:0]', + ':OUTPUT ACCEPT [2172501:140856656]', + ':nova-compute-FORWARD - [0:0]', + ':nova-compute-INPUT - [0:0]', + ':nova-compute-local - [0:0]', + ':nova-compute-OUTPUT - [0:0]', + ':nova-filter-top - [0:0]', + '-A FORWARD -j nova-filter-top ', + '-A OUTPUT -j nova-filter-top ', + '-A nova-filter-top -j nova-compute-local ', + '-A INPUT -j nova-compute-INPUT ', + '-A OUTPUT -j nova-compute-OUTPUT ', + '-A FORWARD -j nova-compute-FORWARD ', + '-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT ', + '-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT ', + '-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT ', + '-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ', + '-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ', + '-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ', + '-A FORWARD -o virbr0 -j REJECT --reject-with ' + 'icmp-port-unreachable ', + '-A FORWARD -i virbr0 -j REJECT --reject-with ' + 'icmp-port-unreachable ', + 'COMMIT', + '# Completed on Fri Feb 18 15:17:05 2011'] + + sample_nat = ['# Generated by iptables-save on Fri Feb 18 15:17:05 2011', + '*nat', + ':PREROUTING ACCEPT [3936:762355]', + ':INPUT ACCEPT [2447:225266]', + ':OUTPUT ACCEPT [63491:4191863]', + ':POSTROUTING ACCEPT [63112:4108641]', + ':nova-compute-OUTPUT - [0:0]', + ':nova-compute-floating-ip-snat - [0:0]', + ':nova-compute-SNATTING - [0:0]', + ':nova-compute-PREROUTING - [0:0]', + ':nova-compute-POSTROUTING - [0:0]', + ':nova-postrouting-bottom - [0:0]', + '-A PREROUTING -j nova-compute-PREROUTING ', + '-A OUTPUT -j nova-compute-OUTPUT ', + '-A POSTROUTING -j nova-compute-POSTROUTING ', + '-A POSTROUTING -j nova-postrouting-bottom ', + '-A nova-postrouting-bottom -j nova-compute-SNATTING ', + '-A nova-compute-SNATTING -j nova-compute-floating-ip-snat ', + 'COMMIT', + '# Completed on Fri Feb 18 15:17:05 2011'] + + def setUp(self): + super(IptablesManagerTestCase, self).setUp() + self.manager = linux_net.IptablesManager() + + def test_filter_rules_are_wrapped(self): + current_lines = self.sample_filter + + table = self.manager.ipv4['filter'] + table.add_rule('FORWARD', '-s 1.2.3.4/5 -j DROP') + new_lines = self.manager._modify_rules(current_lines, table) + self.assertTrue('-A run_tests.py-FORWARD ' + '-s 1.2.3.4/5 -j DROP' in new_lines) + + table.remove_rule('FORWARD', '-s 1.2.3.4/5 -j DROP') + new_lines = self.manager._modify_rules(current_lines, table) + self.assertTrue('-A run_tests.py-FORWARD ' + '-s 1.2.3.4/5 -j DROP' not in new_lines) + + def test_nat_rules(self): + current_lines = self.sample_nat + new_lines = self.manager._modify_rules(current_lines, + self.manager.ipv4['nat']) + + for line in [':nova-compute-OUTPUT - [0:0]', + ':nova-compute-floating-ip-snat - [0:0]', + ':nova-compute-SNATTING - [0:0]', + ':nova-compute-PREROUTING - [0:0]', + ':nova-compute-POSTROUTING - [0:0]']: + self.assertTrue(line in new_lines, "One of nova-compute's chains " + "went missing.") + + seen_lines = set() + for line in new_lines: + line = line.strip() + self.assertTrue(line not in seen_lines, + "Duplicate line: %s" % line) + seen_lines.add(line) + + last_postrouting_line = '' + + for line in new_lines: + if line.startswith('-A POSTROUTING'): + last_postrouting_line = line + + self.assertTrue('-j nova-postrouting-bottom' in last_postrouting_line, + "Last POSTROUTING rule does not jump to " + "nova-postouting-bottom: %s" % last_postrouting_line) + + for chain in ['POSTROUTING', 'PREROUTING', 'OUTPUT']: + self.assertTrue('-A %s -j run_tests.py-%s' \ + % (chain, chain) in new_lines, + "Built-in chain %s not wrapped" % (chain,)) + + def test_filter_rules(self): + current_lines = self.sample_filter + new_lines = self.manager._modify_rules(current_lines, + self.manager.ipv4['filter']) + + for line in [':nova-compute-FORWARD - [0:0]', + ':nova-compute-INPUT - [0:0]', + ':nova-compute-local - [0:0]', + ':nova-compute-OUTPUT - [0:0]']: + self.assertTrue(line in new_lines, "One of nova-compute's chains" + " went missing.") + + seen_lines = set() + for line in new_lines: + line = line.strip() + self.assertTrue(line not in seen_lines, + "Duplicate line: %s" % line) + seen_lines.add(line) + + for chain in ['FORWARD', 'OUTPUT']: + for line in new_lines: + if line.startswith('-A %s' % chain): + self.assertTrue('-j nova-filter-top' in line, + "First %s rule does not " + "jump to nova-filter-top" % chain) + break + + self.assertTrue('-A nova-filter-top ' + '-j run_tests.py-local' in new_lines, + "nova-filter-top does not jump to wrapped local chain") + + for chain in ['INPUT', 'OUTPUT', 'FORWARD']: + self.assertTrue('-A %s -j run_tests.py-%s' \ + % (chain, chain) in new_lines, + "Built-in chain %s not wrapped" % (chain,)) + + class NetworkTestCase(test.TestCase): """Test cases for network code""" def setUp(self): @@ -42,15 +184,13 @@ class NetworkTestCase(test.TestCase): # flags in the corresponding section in nova-dhcpbridge self.flags(connection_type='fake', fake_call=True, - fake_network=True, - network_size=16, - num_networks=5) + fake_network=True) self.manager = manager.AuthManager() self.user = self.manager.create_user('netuser', 'netuser', 'netuser') self.projects = [] self.network = utils.import_object(FLAGS.network_manager) self.context = context.RequestContext(project=None, user=self.user) - for i in range(5): + for i in range(FLAGS.num_networks): name = 'project%s' % i project = self.manager.create_project(name, 'netuser', name) self.projects.append(project) @@ -117,6 +257,9 @@ class NetworkTestCase(test.TestCase): utils.to_global_ipv6( network_ref['cidr_v6'], instance_ref['mac_address'])) + self._deallocate_address(0, address) + db.instance_destroy(context.get_admin_context(), + instance_ref['id']) def test_public_network_association(self): """Makes sure that we can allocaate a public ip""" @@ -192,7 +335,7 @@ class NetworkTestCase(test.TestCase): first = self._create_address(0) lease_ip(first) instance_ids = [] - for i in range(1, 5): + for i in range(1, FLAGS.num_networks): instance_ref = self._create_instance(i, mac=utils.generate_mac()) instance_ids.append(instance_ref['id']) address = self._create_address(i, instance_ref['id']) @@ -342,13 +485,13 @@ def lease_ip(private_ip): private_ip) instance_ref = db.fixed_ip_get_instance(context.get_admin_context(), private_ip) - cmd = "%s add %s %s fake" % (binpath('nova-dhcpbridge'), - instance_ref['mac_address'], - private_ip) + cmd = (binpath('nova-dhcpbridge'), 'add', + instance_ref['mac_address'], + private_ip, 'fake') env = {'DNSMASQ_INTERFACE': network_ref['bridge'], 'TESTING': '1', 'FLAGFILE': FLAGS.dhcpbridge_flagfile} - (out, err) = utils.execute(cmd, addl_env=env) + (out, err) = utils.execute(*cmd, addl_env=env) LOG.debug("ISSUE_IP: %s, %s ", out, err) @@ -358,11 +501,11 @@ def release_ip(private_ip): private_ip) instance_ref = db.fixed_ip_get_instance(context.get_admin_context(), private_ip) - cmd = "%s del %s %s fake" % (binpath('nova-dhcpbridge'), - instance_ref['mac_address'], - private_ip) + cmd = (binpath('nova-dhcpbridge'), 'del', + instance_ref['mac_address'], + private_ip, 'fake') env = {'DNSMASQ_INTERFACE': network_ref['bridge'], 'TESTING': '1', 'FLAGFILE': FLAGS.dhcpbridge_flagfile} - (out, err) = utils.execute(cmd, addl_env=env) + (out, err) = utils.execute(*cmd, addl_env=env) LOG.debug("RELEASE_IP: %s, %s ", out, err) diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py index 9548a8c13..45b544753 100644 --- a/nova/tests/test_quota.py +++ b/nova/tests/test_quota.py @@ -16,14 +16,16 @@ # License for the specific language governing permissions and limitations # under the License. +from nova import compute from nova import context from nova import db from nova import flags +from nova import network from nova import quota from nova import test from nova import utils +from nova import volume from nova.auth import manager -from nova.api.ec2 import cloud from nova.compute import instance_types @@ -40,7 +42,6 @@ class QuotaTestCase(test.TestCase): quota_gigabytes=20, quota_floating_ips=1) - self.cloud = cloud.CloudController() self.manager = manager.AuthManager() self.user = self.manager.create_user('admin', 'admin', 'admin', True) self.project = self.manager.create_project('admin', 'admin', 'admin') @@ -56,7 +57,7 @@ class QuotaTestCase(test.TestCase): def _create_instance(self, cores=2): """Create a test instance""" inst = {} - inst['image_id'] = 'ami-test' + inst['image_id'] = 1 inst['reservation_id'] = 'r-fakeres' inst['user_id'] = self.user.id inst['project_id'] = self.project.id @@ -73,20 +74,43 @@ class QuotaTestCase(test.TestCase): vol['size'] = size return db.volume_create(self.context, vol)['id'] + def _get_instance_type(self, name): + instance_types = { + 'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1), + 'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2), + 'm1.medium': + dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3), + 'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4), + 'm1.xlarge': + dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)} + return instance_types[name] + def test_quota_overrides(self): """Make sure overriding a projects quotas works""" num_instances = quota.allowed_instances(self.context, 100, - instance_types.INSTANCE_TYPES['m1.small']) + self._get_instance_type('m1.small')) self.assertEqual(num_instances, 2) db.quota_create(self.context, {'project_id': self.project.id, 'instances': 10}) num_instances = quota.allowed_instances(self.context, 100, - instance_types.INSTANCE_TYPES['m1.small']) + self._get_instance_type('m1.small')) self.assertEqual(num_instances, 4) db.quota_update(self.context, self.project.id, {'cores': 100}) num_instances = quota.allowed_instances(self.context, 100, - instance_types.INSTANCE_TYPES['m1.small']) + self._get_instance_type('m1.small')) self.assertEqual(num_instances, 10) + + # metadata_items + too_many_items = FLAGS.quota_metadata_items + 1000 + num_metadata_items = quota.allowed_metadata_items(self.context, + too_many_items) + self.assertEqual(num_metadata_items, FLAGS.quota_metadata_items) + db.quota_update(self.context, self.project.id, {'metadata_items': 5}) + num_metadata_items = quota.allowed_metadata_items(self.context, + too_many_items) + self.assertEqual(num_metadata_items, 5) + + # Cleanup db.quota_destroy(self.context, self.project.id) def test_too_many_instances(self): @@ -94,12 +118,12 @@ class QuotaTestCase(test.TestCase): for i in range(FLAGS.quota_instances): instance_id = self._create_instance() instance_ids.append(instance_id) - self.assertRaises(quota.QuotaError, self.cloud.run_instances, + self.assertRaises(quota.QuotaError, compute.API().create, self.context, min_count=1, max_count=1, instance_type='m1.small', - image_id='fake') + image_id=1) for instance_id in instance_ids: db.instance_destroy(self.context, instance_id) @@ -107,12 +131,12 @@ class QuotaTestCase(test.TestCase): instance_ids = [] instance_id = self._create_instance(cores=4) instance_ids.append(instance_id) - self.assertRaises(quota.QuotaError, self.cloud.run_instances, + self.assertRaises(quota.QuotaError, compute.API().create, self.context, min_count=1, max_count=1, instance_type='m1.small', - image_id='fake') + image_id=1) for instance_id in instance_ids: db.instance_destroy(self.context, instance_id) @@ -121,9 +145,12 @@ class QuotaTestCase(test.TestCase): for i in range(FLAGS.quota_volumes): volume_id = self._create_volume() volume_ids.append(volume_id) - self.assertRaises(quota.QuotaError, self.cloud.create_volume, - self.context, - size=10) + self.assertRaises(quota.QuotaError, + volume.API().create, + self.context, + size=10, + name='', + description='') for volume_id in volume_ids: db.volume_destroy(self.context, volume_id) @@ -132,9 +159,11 @@ class QuotaTestCase(test.TestCase): volume_id = self._create_volume(size=20) volume_ids.append(volume_id) self.assertRaises(quota.QuotaError, - self.cloud.create_volume, + volume.API().create, self.context, - size=10) + size=10, + name='', + description='') for volume_id in volume_ids: db.volume_destroy(self.context, volume_id) @@ -148,6 +177,19 @@ class QuotaTestCase(test.TestCase): # make an rpc.call, the test just finishes with OK. It # appears to be something in the magic inline callbacks # that is breaking. - self.assertRaises(quota.QuotaError, self.cloud.allocate_address, + self.assertRaises(quota.QuotaError, + network.API().allocate_floating_ip, self.context) db.floating_ip_destroy(context.get_admin_context(), address) + + def test_too_many_metadata_items(self): + metadata = {} + for i in range(FLAGS.quota_metadata_items + 1): + metadata['key%s' % i] = 'value%s' % i + self.assertRaises(quota.QuotaError, compute.API().create, + self.context, + min_count=1, + max_count=1, + instance_type='m1.small', + image_id='fake', + metadata=metadata) diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index 9d458244b..bb279ac4b 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -150,11 +150,12 @@ class SimpleDriverTestCase(test.TestCase): def tearDown(self): self.manager.delete_user(self.user) self.manager.delete_project(self.project) + super(SimpleDriverTestCase, self).tearDown() def _create_instance(self, **kwargs): """Create a test instance""" inst = {} - inst['image_id'] = 'ami-test' + inst['image_id'] = 1 inst['reservation_id'] = 'r-fakeres' inst['user_id'] = self.user.id inst['project_id'] = self.project.id @@ -168,26 +169,14 @@ class SimpleDriverTestCase(test.TestCase): def _create_volume(self): """Create a test volume""" vol = {} - vol['image_id'] = 'ami-test' - vol['reservation_id'] = 'r-fakeres' vol['size'] = 1 vol['availability_zone'] = 'test' return db.volume_create(self.context, vol)['id'] def test_doesnt_report_disabled_hosts_as_up(self): """Ensures driver doesn't find hosts before they are enabled""" - # NOTE(vish): constructing service without create method - # because we are going to use it without queue - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() + compute1 = self.start_service('compute', host='host1') + compute2 = self.start_service('compute', host='host2') s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') db.service_update(self.context, s1['id'], {'disabled': True}) @@ -199,18 +188,8 @@ class SimpleDriverTestCase(test.TestCase): def test_reports_enabled_hosts_as_up(self): """Ensures driver can find the hosts that are up""" - # NOTE(vish): constructing service without create method - # because we are going to use it without queue - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() + compute1 = self.start_service('compute', host='host1') + compute2 = self.start_service('compute', host='host2') hosts = self.scheduler.driver.hosts_up(self.context, 'compute') self.assertEqual(2, len(hosts)) compute1.kill() @@ -218,16 +197,8 @@ class SimpleDriverTestCase(test.TestCase): def test_least_busy_host_gets_instance(self): """Ensures the host with less cores gets the next one""" - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() + compute1 = self.start_service('compute', host='host1') + compute2 = self.start_service('compute', host='host2') instance_id1 = self._create_instance() compute1.run_instance(self.context, instance_id1) instance_id2 = self._create_instance() @@ -241,16 +212,8 @@ class SimpleDriverTestCase(test.TestCase): def test_specific_host_gets_instance(self): """Ensures if you set availability_zone it launches on that zone""" - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() + compute1 = self.start_service('compute', host='host1') + compute2 = self.start_service('compute', host='host2') instance_id1 = self._create_instance() compute1.run_instance(self.context, instance_id1) instance_id2 = self._create_instance(availability_zone='nova:host1') @@ -263,11 +226,7 @@ class SimpleDriverTestCase(test.TestCase): compute2.kill() def test_wont_sechedule_if_specified_host_is_down(self): - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() + compute1 = self.start_service('compute', host='host1') s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') now = datetime.datetime.utcnow() delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) @@ -282,11 +241,7 @@ class SimpleDriverTestCase(test.TestCase): compute1.kill() def test_will_schedule_on_disabled_host_if_specified(self): - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() + compute1 = self.start_service('compute', host='host1') s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') db.service_update(self.context, s1['id'], {'disabled': True}) instance_id2 = self._create_instance(availability_zone='nova:host1') @@ -298,16 +253,8 @@ class SimpleDriverTestCase(test.TestCase): def test_too_many_cores(self): """Ensures we don't go over max cores""" - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() + compute1 = self.start_service('compute', host='host1') + compute2 = self.start_service('compute', host='host2') instance_ids1 = [] instance_ids2 = [] for index in xrange(FLAGS.max_cores): @@ -322,6 +269,7 @@ class SimpleDriverTestCase(test.TestCase): self.scheduler.driver.schedule_run_instance, self.context, instance_id) + db.instance_destroy(self.context, instance_id) for instance_id in instance_ids1: compute1.terminate_instance(self.context, instance_id) for instance_id in instance_ids2: @@ -331,16 +279,8 @@ class SimpleDriverTestCase(test.TestCase): def test_least_busy_host_gets_volume(self): """Ensures the host with less gigabytes gets the next one""" - volume1 = service.Service('host1', - 'nova-volume', - 'volume', - FLAGS.volume_manager) - volume1.start() - volume2 = service.Service('host2', - 'nova-volume', - 'volume', - FLAGS.volume_manager) - volume2.start() + volume1 = self.start_service('volume', host='host1') + volume2 = self.start_service('volume', host='host2') volume_id1 = self._create_volume() volume1.create_volume(self.context, volume_id1) volume_id2 = self._create_volume() @@ -354,16 +294,8 @@ class SimpleDriverTestCase(test.TestCase): def test_too_many_gigabytes(self): """Ensures we don't go over max gigabytes""" - volume1 = service.Service('host1', - 'nova-volume', - 'volume', - FLAGS.volume_manager) - volume1.start() - volume2 = service.Service('host2', - 'nova-volume', - 'volume', - FLAGS.volume_manager) - volume2.start() + volume1 = self.start_service('volume', host='host1') + volume2 = self.start_service('volume', host='host2') volume_ids1 = [] volume_ids2 = [] for index in xrange(FLAGS.max_gigabytes): diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py index a67c8d1e8..45d9afa6c 100644 --- a/nova/tests/test_service.py +++ b/nova/tests/test_service.py @@ -50,13 +50,6 @@ class ExtendedService(service.Service): class ServiceManagerTestCase(test.TestCase): """Test cases for Services""" - def test_attribute_error_for_no_manager(self): - serv = service.Service('test', - 'test', - 'test', - 'nova.tests.test_service.FakeManager') - self.assertRaises(AttributeError, getattr, serv, 'test_method') - def test_message_gets_to_manager(self): serv = service.Service('test', 'test', diff --git a/nova/tests/test_test.py b/nova/tests/test_test.py new file mode 100644 index 000000000..e237674e6 --- /dev/null +++ b/nova/tests/test_test.py @@ -0,0 +1,40 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Tests for the testing base code.""" + +from nova import rpc +from nova import test + + +class IsolationTestCase(test.TestCase): + """Ensure that things are cleaned up after failed tests. + + These tests don't really do much here, but if isolation fails a bunch + of other tests should fail. + + """ + def test_service_isolation(self): + self.start_service('compute') + + def test_rpc_consumer_isolation(self): + connection = rpc.Connection.instance(new=True) + consumer = rpc.TopicConsumer(connection, topic='compute') + consumer.register_callback( + lambda x, y: self.fail('I should never be called')) + consumer.attach_to_eventlet() diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py new file mode 100644 index 000000000..34a407f1a --- /dev/null +++ b/nova/tests/test_utils.py @@ -0,0 +1,174 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import test +from nova import utils +from nova import exception + + +class GetFromPathTestCase(test.TestCase): + def test_tolerates_nones(self): + f = utils.get_from_path + + input = [] + self.assertEquals([], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [None] + self.assertEquals([], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': None}] + self.assertEquals([], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': None}}] + self.assertEquals([{'b': None}], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': None}}}] + self.assertEquals([{'b': {'c': None}}], f(input, "a")) + self.assertEquals([{'c': None}], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': None}}}, {'a': None}] + self.assertEquals([{'b': {'c': None}}], f(input, "a")) + self.assertEquals([{'c': None}], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': None}}}, {'a': {'b': None}}] + self.assertEquals([{'b': {'c': None}}, {'b': None}], f(input, "a")) + self.assertEquals([{'c': None}], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + def test_does_select(self): + f = utils.get_from_path + + input = [{'a': 'a_1'}] + self.assertEquals(['a_1'], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': 'b_1'}}] + self.assertEquals([{'b': 'b_1'}], f(input, "a")) + self.assertEquals(['b_1'], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': 'c_1'}}}] + self.assertEquals([{'b': {'c': 'c_1'}}], f(input, "a")) + self.assertEquals([{'c': 'c_1'}], f(input, "a/b")) + self.assertEquals(['c_1'], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': 'c_1'}}}, {'a': None}] + self.assertEquals([{'b': {'c': 'c_1'}}], f(input, "a")) + self.assertEquals([{'c': 'c_1'}], f(input, "a/b")) + self.assertEquals(['c_1'], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': 'c_1'}}}, + {'a': {'b': None}}] + self.assertEquals([{'b': {'c': 'c_1'}}, {'b': None}], f(input, "a")) + self.assertEquals([{'c': 'c_1'}], f(input, "a/b")) + self.assertEquals(['c_1'], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': 'c_1'}}}, + {'a': {'b': {'c': 'c_2'}}}] + self.assertEquals([{'b': {'c': 'c_1'}}, {'b': {'c': 'c_2'}}], + f(input, "a")) + self.assertEquals([{'c': 'c_1'}, {'c': 'c_2'}], f(input, "a/b")) + self.assertEquals(['c_1', 'c_2'], f(input, "a/b/c")) + + self.assertEquals([], f(input, "a/b/c/d")) + self.assertEquals([], f(input, "c/a/b/d")) + self.assertEquals([], f(input, "i/r/t")) + + def test_flattens_lists(self): + f = utils.get_from_path + + input = [{'a': [1, 2, 3]}] + self.assertEquals([1, 2, 3], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': [1, 2, 3]}}] + self.assertEquals([{'b': [1, 2, 3]}], f(input, "a")) + self.assertEquals([1, 2, 3], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': [1, 2, 3]}}, {'a': {'b': [4, 5, 6]}}] + self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]}] + self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': [1, 2, {'b': 'b_1'}]}] + self.assertEquals([1, 2, {'b': 'b_1'}], f(input, "a")) + self.assertEquals(['b_1'], f(input, "a/b")) + + def test_bad_xpath(self): + f = utils.get_from_path + + self.assertRaises(exception.Error, f, [], None) + self.assertRaises(exception.Error, f, [], "") + self.assertRaises(exception.Error, f, [], "/") + self.assertRaises(exception.Error, f, [], "/a") + self.assertRaises(exception.Error, f, [], "/a/") + self.assertRaises(exception.Error, f, [], "//") + self.assertRaises(exception.Error, f, [], "//a") + self.assertRaises(exception.Error, f, [], "a//a") + self.assertRaises(exception.Error, f, [], "a//a/") + self.assertRaises(exception.Error, f, [], "a/a/") + + def test_real_failure1(self): + # Real world failure case... + # We weren't coping when the input was a Dictionary instead of a List + # This led to test_accepts_dictionaries + f = utils.get_from_path + + inst = {'fixed_ip': {'floating_ips': [{'address': '1.2.3.4'}], + 'address': '192.168.0.3'}, + 'hostname': ''} + + private_ips = f(inst, 'fixed_ip/address') + public_ips = f(inst, 'fixed_ip/floating_ips/address') + self.assertEquals(['192.168.0.3'], private_ips) + self.assertEquals(['1.2.3.4'], public_ips) + + def test_accepts_dictionaries(self): + f = utils.get_from_path + + input = {'a': [1, 2, 3]} + self.assertEquals([1, 2, 3], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = {'a': {'b': [1, 2, 3]}} + self.assertEquals([{'b': [1, 2, 3]}], f(input, "a")) + self.assertEquals([1, 2, 3], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = {'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]} + self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = {'a': [1, 2, {'b': 'b_1'}]} + self.assertEquals([1, 2, {'b': 'b_1'}], f(input, "a")) + self.assertEquals(['b_1'], f(input, "a/b")) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 6e5a0114b..648de3b77 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -14,6 +14,10 @@ # License for the specific language governing permissions and limitations # under the License. +import re +import os + +import eventlet from xml.etree.ElementTree import fromstring as xml_to_tree from xml.dom.minidom import parseString as xml_to_dom @@ -30,6 +34,70 @@ FLAGS = flags.FLAGS flags.DECLARE('instances_path', 'nova.compute.manager') +def _concurrency(wait, done, target): + wait.wait() + done.send() + + +class CacheConcurrencyTestCase(test.TestCase): + def setUp(self): + super(CacheConcurrencyTestCase, self).setUp() + + def fake_exists(fname): + basedir = os.path.join(FLAGS.instances_path, '_base') + if fname == basedir: + return True + return False + + def fake_execute(*args, **kwargs): + pass + + self.stubs.Set(os.path, 'exists', fake_exists) + self.stubs.Set(utils, 'execute', fake_execute) + + def test_same_fname_concurrency(self): + """Ensures that the same fname cache runs at a sequentially""" + conn = libvirt_conn.LibvirtConnection + wait1 = eventlet.event.Event() + done1 = eventlet.event.Event() + eventlet.spawn(conn._cache_image, _concurrency, + 'target', 'fname', False, wait1, done1) + wait2 = eventlet.event.Event() + done2 = eventlet.event.Event() + eventlet.spawn(conn._cache_image, _concurrency, + 'target', 'fname', False, wait2, done2) + wait2.send() + eventlet.sleep(0) + try: + self.assertFalse(done2.ready()) + self.assertTrue('fname' in conn._image_sems) + finally: + wait1.send() + done1.wait() + eventlet.sleep(0) + self.assertTrue(done2.ready()) + self.assertFalse('fname' in conn._image_sems) + + def test_different_fname_concurrency(self): + """Ensures that two different fname caches are concurrent""" + conn = libvirt_conn.LibvirtConnection + wait1 = eventlet.event.Event() + done1 = eventlet.event.Event() + eventlet.spawn(conn._cache_image, _concurrency, + 'target', 'fname2', False, wait1, done1) + wait2 = eventlet.event.Event() + done2 = eventlet.event.Event() + eventlet.spawn(conn._cache_image, _concurrency, + 'target', 'fname1', False, wait2, done2) + wait2.send() + eventlet.sleep(0) + try: + self.assertTrue(done2.ready()) + finally: + wait1.send() + eventlet.sleep(0) + + class LibvirtConnTestCase(test.TestCase): def setUp(self): super(LibvirtConnTestCase, self).setUp() @@ -204,11 +272,12 @@ class LibvirtConnTestCase(test.TestCase): conn = libvirt_conn.LibvirtConnection(True) uri = conn.get_uri() self.assertEquals(uri, testuri) + db.instance_destroy(user_context, instance_ref['id']) def tearDown(self): - super(LibvirtConnTestCase, self).tearDown() self.manager.delete_project(self.project) self.manager.delete_user(self.user) + super(LibvirtConnTestCase, self).tearDown() class IptablesFirewallTestCase(test.TestCase): @@ -233,16 +302,22 @@ class IptablesFirewallTestCase(test.TestCase): self.manager.delete_user(self.user) super(IptablesFirewallTestCase, self).tearDown() - in_rules = [ + in_nat_rules = [ + '# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011', + '*nat', + ':PREROUTING ACCEPT [1170:189210]', + ':INPUT ACCEPT [844:71028]', + ':OUTPUT ACCEPT [5149:405186]', + ':POSTROUTING ACCEPT [5063:386098]' + ] + + in_filter_rules = [ '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010', '*filter', ':INPUT ACCEPT [969615:281627771]', ':FORWARD ACCEPT [0:0]', ':OUTPUT ACCEPT [915599:63811649]', ':nova-block-ipv4 - [0:0]', - '-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT ', - '-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT ', - '-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT ', '-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ', '-A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED' ',ESTABLISHED -j ACCEPT ', @@ -254,7 +329,7 @@ class IptablesFirewallTestCase(test.TestCase): '# Completed on Mon Dec 6 11:54:13 2010', ] - in6_rules = [ + in6_filter_rules = [ '# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011', '*filter', ':INPUT ACCEPT [349155:75810423]', @@ -314,23 +389,34 @@ class IptablesFirewallTestCase(test.TestCase): instance_ref = db.instance_get(admin_ctxt, instance_ref['id']) # self.fw.add_instance(instance_ref) - def fake_iptables_execute(cmd, process_input=None): - if cmd == 'sudo ip6tables-save -t filter': - return '\n'.join(self.in6_rules), None - if cmd == 'sudo iptables-save -t filter': - return '\n'.join(self.in_rules), None - if cmd == 'sudo iptables-restore': - self.out_rules = process_input.split('\n') + def fake_iptables_execute(*cmd, **kwargs): + process_input = kwargs.get('process_input', None) + if cmd == ('sudo', 'ip6tables-save', '-t', 'filter'): + return '\n'.join(self.in6_filter_rules), None + if cmd == ('sudo', 'iptables-save', '-t', 'filter'): + return '\n'.join(self.in_filter_rules), None + if cmd == ('sudo', 'iptables-save', '-t', 'nat'): + return '\n'.join(self.in_nat_rules), None + if cmd == ('sudo', 'iptables-restore'): + lines = process_input.split('\n') + if '*filter' in lines: + self.out_rules = lines return '', '' - if cmd == 'sudo ip6tables-restore': - self.out6_rules = process_input.split('\n') + if cmd == ('sudo', 'ip6tables-restore'): + lines = process_input.split('\n') + if '*filter' in lines: + self.out6_rules = lines return '', '' - self.fw.execute = fake_iptables_execute + print cmd, kwargs + + from nova.network import linux_net + linux_net.iptables_manager.execute = fake_iptables_execute self.fw.prepare_instance_filter(instance_ref) self.fw.apply_instance_filter(instance_ref) - in_rules = filter(lambda l: not l.startswith('#'), self.in_rules) + in_rules = filter(lambda l: not l.startswith('#'), + self.in_filter_rules) for rule in in_rules: if not 'nova' in rule: self.assertTrue(rule in self.out_rules, @@ -353,18 +439,20 @@ class IptablesFirewallTestCase(test.TestCase): self.assertTrue(security_group_chain, "The security group chain wasn't added") - self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -j ACCEPT' % \ - security_group_chain in self.out_rules, + regex = re.compile('-A .* -p icmp -s 192.168.11.0/24 -j ACCEPT') + self.assertTrue(len(filter(regex.match, self.out_rules)) > 0, "ICMP acceptance rule wasn't added") - self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -m icmp --icmp-type ' - '8 -j ACCEPT' % security_group_chain in self.out_rules, + regex = re.compile('-A .* -p icmp -s 192.168.11.0/24 -m icmp ' + '--icmp-type 8 -j ACCEPT') + self.assertTrue(len(filter(regex.match, self.out_rules)) > 0, "ICMP Echo Request acceptance rule wasn't added") - self.assertTrue('-A %s -p tcp -s 192.168.10.0/24 -m multiport ' - '--dports 80:81 -j ACCEPT' % security_group_chain \ - in self.out_rules, + regex = re.compile('-A .* -p tcp -s 192.168.10.0/24 -m multiport ' + '--dports 80:81 -j ACCEPT') + self.assertTrue(len(filter(regex.match, self.out_rules)) > 0, "TCP port 80/81 acceptance rule wasn't added") + db.instance_destroy(admin_ctxt, instance_ref['id']) class NWFilterTestCase(test.TestCase): @@ -388,6 +476,7 @@ class NWFilterTestCase(test.TestCase): def tearDown(self): self.manager.delete_project(self.project) self.manager.delete_user(self.user) + super(NWFilterTestCase, self).tearDown() def test_cidr_rule_nwfilter_xml(self): cloud_controller = cloud.CloudController() @@ -514,3 +603,4 @@ class NWFilterTestCase(test.TestCase): self.fw.apply_instance_filter(instance) _ensure_all_called() self.teardown_security_group() + db.instance_destroy(admin_ctxt, instance_ref['id']) diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py index b40ca004b..f698c85b5 100644 --- a/nova/tests/test_volume.py +++ b/nova/tests/test_volume.py @@ -99,7 +99,7 @@ class VolumeTestCase(test.TestCase): def test_run_attach_detach_volume(self): """Make sure volume can be attached and detached from instance.""" inst = {} - inst['image_id'] = 'ami-test' + inst['image_id'] = 1 inst['reservation_id'] = 'r-fakeres' inst['launch_time'] = '10' inst['user_id'] = 'fake' diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 9f5b266f3..c26dc8639 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -31,7 +31,9 @@ from nova.compute import power_state from nova.virt import xenapi_conn from nova.virt.xenapi import fake as xenapi_fake from nova.virt.xenapi import volume_utils +from nova.virt.xenapi import vm_utils from nova.virt.xenapi.vmops import SimpleDH +from nova.virt.xenapi.vmops import VMOps from nova.tests.db import fakes as db_fakes from nova.tests.xenapi import stubs from nova.tests.glance import stubs as glance_stubs @@ -141,6 +143,10 @@ class XenAPIVolumeTestCase(test.TestCase): self.stubs.UnsetAll() +def reset_network(*args): + pass + + class XenAPIVMTestCase(test.TestCase): """ Unit tests for VM operations @@ -162,6 +168,8 @@ class XenAPIVMTestCase(test.TestCase): stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) stubs.stubout_get_this_vm_uuid(self.stubs) stubs.stubout_stream_disk(self.stubs) + stubs.stubout_is_vdi_pv(self.stubs) + self.stubs.Set(VMOps, 'reset_network', reset_network) glance_stubs.stubout_glance_client(self.stubs, glance_stubs.FakeGlance) self.conn = xenapi_conn.get_connection(False) @@ -225,7 +233,7 @@ class XenAPIVMTestCase(test.TestCase): vm = vms[0] # Check that m1.large above turned into the right thing. - instance_type = instance_types.INSTANCE_TYPES['m1.large'] + instance_type = db.instance_type_get_by_name(conn, 'm1.large') mem_kib = long(instance_type['memory_mb']) << 10 mem_bytes = str(mem_kib << 10) vcpus = instance_type['vcpus'] @@ -243,7 +251,8 @@ class XenAPIVMTestCase(test.TestCase): # Check that the VM is running according to XenAPI. self.assertEquals(vm['power_state'], 'Running') - def _test_spawn(self, image_id, kernel_id, ramdisk_id): + def _test_spawn(self, image_id, kernel_id, ramdisk_id, + instance_type="m1.large"): stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) values = {'name': 1, 'id': 1, @@ -252,7 +261,7 @@ class XenAPIVMTestCase(test.TestCase): 'image_id': image_id, 'kernel_id': kernel_id, 'ramdisk_id': ramdisk_id, - 'instance_type': 'm1.large', + 'instance_type': instance_type, 'mac_address': 'aa:bb:cc:dd:ee:ff', } conn = xenapi_conn.get_connection(False) @@ -260,6 +269,12 @@ class XenAPIVMTestCase(test.TestCase): conn.spawn(instance) self.check_vm_record(conn) + def test_spawn_not_enough_memory(self): + FLAGS.xenapi_image_service = 'glance' + self.assertRaises(Exception, + self._test_spawn, + 1, 2, 3, "m1.xlarge") + def test_spawn_raw_objectstore(self): FLAGS.xenapi_image_service = 'objectstore' self._test_spawn(1, None, None) @@ -270,11 +285,17 @@ class XenAPIVMTestCase(test.TestCase): def test_spawn_raw_glance(self): FLAGS.xenapi_image_service = 'glance' - self._test_spawn(1, None, None) + self._test_spawn(glance_stubs.FakeGlance.IMAGE_RAW, None, None) + + def test_spawn_vhd_glance(self): + FLAGS.xenapi_image_service = 'glance' + self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None) def test_spawn_glance(self): FLAGS.xenapi_image_service = 'glance' - self._test_spawn(1, 2, 3) + self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE, + glance_stubs.FakeGlance.IMAGE_KERNEL, + glance_stubs.FakeGlance.IMAGE_RAMDISK) def tearDown(self): super(XenAPIVMTestCase, self).tearDown() @@ -323,3 +344,113 @@ class XenAPIDiffieHellmanTestCase(test.TestCase): def tearDown(self): super(XenAPIDiffieHellmanTestCase, self).tearDown() + + +class XenAPIMigrateInstance(test.TestCase): + """ + Unit test for verifying migration-related actions + """ + + def setUp(self): + super(XenAPIMigrateInstance, self).setUp() + self.stubs = stubout.StubOutForTesting() + FLAGS.target_host = '127.0.0.1' + FLAGS.xenapi_connection_url = 'test_url' + FLAGS.xenapi_connection_password = 'test_pass' + db_fakes.stub_out_db_instance_api(self.stubs) + stubs.stub_out_get_target(self.stubs) + xenapi_fake.reset() + self.manager = manager.AuthManager() + self.user = self.manager.create_user('fake', 'fake', 'fake', + admin=True) + self.project = self.manager.create_project('fake', 'fake', 'fake') + self.values = {'name': 1, 'id': 1, + 'project_id': self.project.id, + 'user_id': self.user.id, + 'image_id': 1, + 'kernel_id': None, + 'ramdisk_id': None, + 'instance_type': 'm1.large', + 'mac_address': 'aa:bb:cc:dd:ee:ff', + } + stubs.stub_out_migration_methods(self.stubs) + glance_stubs.stubout_glance_client(self.stubs, + glance_stubs.FakeGlance) + + def tearDown(self): + super(XenAPIMigrateInstance, self).tearDown() + self.manager.delete_project(self.project) + self.manager.delete_user(self.user) + self.stubs.UnsetAll() + + def test_migrate_disk_and_power_off(self): + instance = db.instance_create(self.values) + stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests) + conn = xenapi_conn.get_connection(False) + conn.migrate_disk_and_power_off(instance, '127.0.0.1') + + def test_finish_resize(self): + instance = db.instance_create(self.values) + stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests) + conn = xenapi_conn.get_connection(False) + conn.finish_resize(instance, dict(base_copy='hurr', cow='durr')) + + +class XenAPIDetermineDiskImageTestCase(test.TestCase): + """ + Unit tests for code that detects the ImageType + """ + def setUp(self): + super(XenAPIDetermineDiskImageTestCase, self).setUp() + glance_stubs.stubout_glance_client(self.stubs, + glance_stubs.FakeGlance) + + class FakeInstance(object): + pass + + self.fake_instance = FakeInstance() + self.fake_instance.id = 42 + + def assert_disk_type(self, disk_type): + dt = vm_utils.VMHelper.determine_disk_image_type( + self.fake_instance) + self.assertEqual(disk_type, dt) + + def test_instance_disk(self): + """ + If a kernel is specified then the image type is DISK (aka machine) + """ + FLAGS.xenapi_image_service = 'objectstore' + self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_MACHINE + self.fake_instance.kernel_id = glance_stubs.FakeGlance.IMAGE_KERNEL + self.assert_disk_type(vm_utils.ImageType.DISK) + + def test_instance_disk_raw(self): + """ + If the kernel isn't specified, and we're not using Glance, then + DISK_RAW is assumed. + """ + FLAGS.xenapi_image_service = 'objectstore' + self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW + self.fake_instance.kernel_id = None + self.assert_disk_type(vm_utils.ImageType.DISK_RAW) + + def test_glance_disk_raw(self): + """ + If we're using Glance, then defer to the image_type field, which in + this case will be 'raw'. + """ + FLAGS.xenapi_image_service = 'glance' + self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW + self.fake_instance.kernel_id = None + self.assert_disk_type(vm_utils.ImageType.DISK_RAW) + + def test_glance_disk_vhd(self): + """ + If we're using Glance, then defer to the image_type field, which in + this case will be 'vhd'. + """ + FLAGS.xenapi_image_service = 'glance' + self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_VHD + self.fake_instance.kernel_id = None + self.assert_disk_type(vm_utils.ImageType.DISK_VHD) diff --git a/nova/tests/test_zones.py b/nova/tests/test_zones.py new file mode 100644 index 000000000..5a52a0506 --- /dev/null +++ b/nova/tests/test_zones.py @@ -0,0 +1,172 @@ +# Copyright 2010 United States Government as represented by the +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For ZoneManager +""" + +import datetime +import mox +import novaclient + +from nova import context +from nova import db +from nova import flags +from nova import service +from nova import test +from nova import rpc +from nova import utils +from nova.auth import manager as auth_manager +from nova.scheduler import zone_manager + +FLAGS = flags.FLAGS + + +class FakeZone: + """Represents a fake zone from the db""" + def __init__(self, *args, **kwargs): + for k, v in kwargs.iteritems(): + setattr(self, k, v) + + +def exploding_novaclient(zone): + """Used when we want to simulate a novaclient call failing.""" + raise Exception("kaboom") + + +class ZoneManagerTestCase(test.TestCase): + """Test case for zone manager""" + def test_ping(self): + zm = zone_manager.ZoneManager() + self.mox.StubOutWithMock(zm, '_refresh_from_db') + self.mox.StubOutWithMock(zm, '_poll_zones') + zm._refresh_from_db(mox.IgnoreArg()) + zm._poll_zones(mox.IgnoreArg()) + + self.mox.ReplayAll() + zm.ping(None) + self.mox.VerifyAll() + + def test_refresh_from_db_new(self): + zm = zone_manager.ZoneManager() + + self.mox.StubOutWithMock(db, 'zone_get_all') + db.zone_get_all(mox.IgnoreArg()).AndReturn([ + FakeZone(id=1, api_url='http://foo.com', username='user1', + password='pass1'), + ]) + + self.assertEquals(len(zm.zone_states), 0) + + self.mox.ReplayAll() + zm._refresh_from_db(None) + self.mox.VerifyAll() + + self.assertEquals(len(zm.zone_states), 1) + self.assertEquals(zm.zone_states[1].username, 'user1') + + def test_refresh_from_db_replace_existing(self): + zm = zone_manager.ZoneManager() + zone_state = zone_manager.ZoneState() + zone_state.update_credentials(FakeZone(id=1, api_url='http://foo.com', + username='user1', password='pass1')) + zm.zone_states[1] = zone_state + + self.mox.StubOutWithMock(db, 'zone_get_all') + db.zone_get_all(mox.IgnoreArg()).AndReturn([ + FakeZone(id=1, api_url='http://foo.com', username='user2', + password='pass2'), + ]) + + self.assertEquals(len(zm.zone_states), 1) + + self.mox.ReplayAll() + zm._refresh_from_db(None) + self.mox.VerifyAll() + + self.assertEquals(len(zm.zone_states), 1) + self.assertEquals(zm.zone_states[1].username, 'user2') + + def test_refresh_from_db_missing(self): + zm = zone_manager.ZoneManager() + zone_state = zone_manager.ZoneState() + zone_state.update_credentials(FakeZone(id=1, api_url='http://foo.com', + username='user1', password='pass1')) + zm.zone_states[1] = zone_state + + self.mox.StubOutWithMock(db, 'zone_get_all') + db.zone_get_all(mox.IgnoreArg()).AndReturn([]) + + self.assertEquals(len(zm.zone_states), 1) + + self.mox.ReplayAll() + zm._refresh_from_db(None) + self.mox.VerifyAll() + + self.assertEquals(len(zm.zone_states), 0) + + def test_refresh_from_db_add_and_delete(self): + zm = zone_manager.ZoneManager() + zone_state = zone_manager.ZoneState() + zone_state.update_credentials(FakeZone(id=1, api_url='http://foo.com', + username='user1', password='pass1')) + zm.zone_states[1] = zone_state + + self.mox.StubOutWithMock(db, 'zone_get_all') + + db.zone_get_all(mox.IgnoreArg()).AndReturn([ + FakeZone(id=2, api_url='http://foo.com', username='user2', + password='pass2'), + ]) + self.assertEquals(len(zm.zone_states), 1) + + self.mox.ReplayAll() + zm._refresh_from_db(None) + self.mox.VerifyAll() + + self.assertEquals(len(zm.zone_states), 1) + self.assertEquals(zm.zone_states[2].username, 'user2') + + def test_poll_zone(self): + self.mox.StubOutWithMock(zone_manager, '_call_novaclient') + zone_manager._call_novaclient(mox.IgnoreArg()).AndReturn( + dict(name='zohan', capabilities='hairdresser')) + + zone_state = zone_manager.ZoneState() + zone_state.update_credentials(FakeZone(id=2, + api_url='http://foo.com', username='user2', + password='pass2')) + zone_state.attempt = 1 + + self.mox.ReplayAll() + zone_manager._poll_zone(zone_state) + self.mox.VerifyAll() + self.assertEquals(zone_state.attempt, 0) + self.assertEquals(zone_state.name, 'zohan') + + def test_poll_zone_fails(self): + self.stubs.Set(zone_manager, "_call_novaclient", exploding_novaclient) + + zone_state = zone_manager.ZoneState() + zone_state.update_credentials(FakeZone(id=2, + api_url='http://foo.com', username='user2', + password='pass2')) + zone_state.attempt = FLAGS.zone_failures_to_offline - 1 + + self.mox.ReplayAll() + zone_manager._poll_zone(zone_state) + self.mox.VerifyAll() + self.assertEquals(zone_state.attempt, 3) + self.assertFalse(zone_state.is_active) + self.assertEquals(zone_state.name, None) diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 624995ada..70d46a1fb 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -20,6 +20,7 @@ from nova.virt import xenapi_conn from nova.virt.xenapi import fake from nova.virt.xenapi import volume_utils from nova.virt.xenapi import vm_utils +from nova.virt.xenapi import vmops def stubout_instance_snapshot(stubs): @@ -27,7 +28,7 @@ def stubout_instance_snapshot(stubs): def fake_fetch_image(cls, session, instance_id, image, user, project, type): # Stubout wait_for_task - def fake_wait_for_task(self, id, task): + def fake_wait_for_task(self, task, id): class FakeEvent: def send(self, value): @@ -130,6 +131,12 @@ def stubout_stream_disk(stubs): stubs.Set(vm_utils, '_stream_disk', f) +def stubout_is_vdi_pv(stubs): + def f(_1): + return False + stubs.Set(vm_utils, '_is_vdi_pv', f) + + class FakeSessionForVMTests(fake.SessionBase): """ Stubs out a XenAPISession for VM tests """ def __init__(self, uri): @@ -171,6 +178,12 @@ class FakeSessionForVMTests(fake.SessionBase): def VM_destroy(self, session_ref, vm_ref): fake.destroy_vm(vm_ref) + def SR_scan(self, session_ref, sr_ref): + pass + + def VDI_set_name_label(self, session_ref, vdi_ref, name_label): + pass + class FakeSessionForVolumeTests(fake.SessionBase): """ Stubs out a XenAPISession for Volume tests """ @@ -205,3 +218,60 @@ class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests): def SR_forget(self, _1, ref): pass + + +class FakeSessionForMigrationTests(fake.SessionBase): + """Stubs out a XenAPISession for Migration tests""" + def __init__(self, uri): + super(FakeSessionForMigrationTests, self).__init__(uri) + + def VDI_get_by_uuid(*args): + return 'hurr' + + def VM_start(self, _1, ref, _2, _3): + vm = fake.get_record('VM', ref) + if vm['power_state'] != 'Halted': + raise fake.Failure(['VM_BAD_POWER_STATE', ref, 'Halted', + vm['power_state']]) + vm['power_state'] = 'Running' + vm['is_a_template'] = False + vm['is_control_domain'] = False + + +def stub_out_migration_methods(stubs): + def fake_get_snapshot(self, instance): + return 'foo', 'bar' + + @classmethod + def fake_get_vdi(cls, session, vm_ref): + vdi_ref = fake.create_vdi(name_label='derp', read_only=False, + sr_ref='herp', sharable=False) + vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) + return vdi_ref, {'uuid': vdi_rec['uuid'], } + + def fake_shutdown(self, inst, vm, method='clean'): + pass + + @classmethod + def fake_sr(cls, session, *args): + pass + + @classmethod + def fake_get_sr_path(cls, *args): + return "fake" + + def fake_destroy(*args, **kwargs): + pass + + def fake_reset_network(*args, **kwargs): + pass + + stubs.Set(vmops.VMOps, '_destroy', fake_destroy) + stubs.Set(vm_utils.VMHelper, 'scan_default_sr', fake_sr) + stubs.Set(vm_utils.VMHelper, 'scan_sr', fake_sr) + stubs.Set(vmops.VMOps, '_get_snapshot', fake_get_snapshot) + stubs.Set(vm_utils.VMHelper, 'get_vdi_for_vm_safely', fake_get_vdi) + stubs.Set(xenapi_conn.XenAPISession, 'wait_for_task', lambda x, y, z: None) + stubs.Set(vm_utils.VMHelper, 'get_sr_path', fake_get_sr_path) + stubs.Set(vmops.VMOps, 'reset_network', fake_reset_network) + stubs.Set(vmops.VMOps, '_shutdown', fake_shutdown) diff --git a/nova/twistd.py b/nova/twistd.py index 6390a8144..c07ed991f 100644 --- a/nova/twistd.py +++ b/nova/twistd.py @@ -43,8 +43,6 @@ else: FLAGS = flags.FLAGS -flags.DEFINE_string('logdir', None, 'directory to keep log files in ' - '(will be prepended to $logfile)') class TwistdServerOptions(ServerOptions): @@ -150,6 +148,7 @@ def WrapTwistedOptions(wrapped): options.insert(0, '') args = FLAGS(options) + logging.setup() argv = args[1:] # ignore subcommands @@ -260,7 +259,6 @@ def serve(filename): print 'usage: %s [options] [start|stop|restart]' % argv[0] sys.exit(1) - logging.basicConfig() logging.debug(_("Full set of FLAGS:")) for flag in FLAGS: logging.debug("%s : %s" % (flag, FLAGS.get(flag, None))) diff --git a/nova/utils.py b/nova/utils.py index 5f5225289..87e726394 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -2,6 +2,7 @@ # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -20,30 +21,37 @@ System-level utilities and helper functions. """ +import base64 import datetime +import functools import inspect import json +import lockfile +import netaddr import os import random -import subprocess +import re import socket +import string import struct import sys import time +import types from xml.sax import saxutils -import re -import netaddr from eventlet import event from eventlet import greenthread - +from eventlet.green import subprocess +None from nova import exception from nova.exception import ProcessExecutionError +from nova import flags from nova import log as logging LOG = logging.getLogger("nova.utils") TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ" +FLAGS = flags.FLAGS def import_class(import_str): @@ -53,7 +61,7 @@ def import_class(import_str): __import__(mod_str) return getattr(sys.modules[mod_str], class_str) except (ImportError, ValueError, AttributeError), exc: - logging.debug(_('Inner Exception: %s'), exc) + LOG.debug(_('Inner Exception: %s'), exc) raise exception.NotFound(_('Class %s cannot be found') % class_str) @@ -121,35 +129,90 @@ def fetchfile(url, target): # c.perform() # c.close() # fp.close() - execute("curl --fail %s -o %s" % (url, target)) + execute("curl", "--fail", url, "-o", target) + + +def execute(*cmd, **kwargs): + process_input = kwargs.get('process_input', None) + addl_env = kwargs.get('addl_env', None) + check_exit_code = kwargs.get('check_exit_code', 0) + stdin = kwargs.get('stdin', subprocess.PIPE) + stdout = kwargs.get('stdout', subprocess.PIPE) + stderr = kwargs.get('stderr', subprocess.PIPE) + attempts = kwargs.get('attempts', 1) + cmd = map(str, cmd) + + while attempts > 0: + attempts -= 1 + try: + LOG.debug(_("Running cmd (subprocess): %s"), ' '.join(cmd)) + env = os.environ.copy() + if addl_env: + env.update(addl_env) + obj = subprocess.Popen(cmd, stdin=stdin, + stdout=stdout, stderr=stderr, env=env) + result = None + if process_input != None: + result = obj.communicate(process_input) + else: + result = obj.communicate() + obj.stdin.close() + if obj.returncode: + LOG.debug(_("Result was %s") % obj.returncode) + if type(check_exit_code) == types.IntType \ + and obj.returncode != check_exit_code: + (stdout, stderr) = result + raise ProcessExecutionError(exit_code=obj.returncode, + stdout=stdout, + stderr=stderr, + cmd=' '.join(cmd)) + # NOTE(termie): this appears to be necessary to let the subprocess + # call clean something up in between calls, without + # it two execute calls in a row hangs the second one + greenthread.sleep(0) + return result + except ProcessExecutionError: + if not attempts: + raise + else: + LOG.debug(_("%r failed. Retrying."), cmd) + greenthread.sleep(random.randint(20, 200) / 100.0) -def execute(cmd, process_input=None, addl_env=None, check_exit_code=True): - LOG.debug(_("Running cmd (subprocess): %s"), cmd) - env = os.environ.copy() +def ssh_execute(ssh, cmd, process_input=None, + addl_env=None, check_exit_code=True): + LOG.debug(_("Running cmd (SSH): %s"), ' '.join(cmd)) if addl_env: - env.update(addl_env) - obj = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, - stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) - result = None - if process_input != None: - result = obj.communicate(process_input) - else: - result = obj.communicate() - obj.stdin.close() - if obj.returncode: - LOG.debug(_("Result was %s") % obj.returncode) - if check_exit_code and obj.returncode != 0: - (stdout, stderr) = result - raise ProcessExecutionError(exit_code=obj.returncode, - stdout=stdout, - stderr=stderr, - cmd=cmd) - # NOTE(termie): this appears to be necessary to let the subprocess call - # clean something up in between calls, without it two - # execute calls in a row hangs the second one - greenthread.sleep(0) - return result + raise exception.Error("Environment not supported over SSH") + + if process_input: + # This is (probably) fixable if we need it... + raise exception.Error("process_input not supported over SSH") + + stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd) + channel = stdout_stream.channel + + #stdin.write('process_input would go here') + #stdin.flush() + + # NOTE(justinsb): This seems suspicious... + # ...other SSH clients have buffering issues with this approach + stdout = stdout_stream.read() + stderr = stderr_stream.read() + stdin_stream.close() + + exit_status = channel.recv_exit_status() + + # exit_status == -1 if no exit code was returned + if exit_status != -1: + LOG.debug(_("Result was %s") % exit_status) + if check_exit_code and exit_status != 0: + raise exception.ProcessExecutionError(exit_code=exit_status, + stdout=stdout, + stderr=stderr, + cmd=' '.join(cmd)) + + return (stdout, stderr) def abspath(s): @@ -180,9 +243,9 @@ def debug(arg): return arg -def runthis(prompt, cmd, check_exit_code=True): - LOG.debug(_("Running %s"), (cmd)) - rv, err = execute(cmd, check_exit_code=check_exit_code) +def runthis(prompt, *cmd, **kwargs): + LOG.debug(_("Running %s"), (" ".join(cmd))) + rv, err = execute(*cmd, **kwargs) def generate_uid(topic, size=8): @@ -199,13 +262,22 @@ def generate_mac(): return ':'.join(map(lambda x: "%02x" % x, mac)) +def generate_password(length=20): + """Generate a random sequence of letters and digits + to be used as a password. Note that this is not intended + to represent the ultimate in security. + """ + chrs = string.letters + string.digits + return "".join([random.choice(chrs) for i in xrange(length)]) + + def last_octet(address): return int(address.split(".")[-1]) def get_my_linklocal(interface): try: - if_str = execute("ip -f inet6 -o addr show %s" % interface) + if_str = execute("ip", "-f", "inet6", "-o", "addr", "show", interface) condition = "\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link" links = [re.search(condition, x) for x in if_str[0].split('\n')] address = [w.group(1) for w in links if w is not None] @@ -440,3 +512,76 @@ def dumps(value): def loads(s): return json.loads(s) + + +def synchronized(name): + def wrap(f): + @functools.wraps(f) + def inner(*args, **kwargs): + lock = lockfile.FileLock(os.path.join(FLAGS.lock_path, + 'nova-%s.lock' % name)) + with lock: + return f(*args, **kwargs) + return inner + return wrap + + +def ensure_b64_encoding(val): + """Safety method to ensure that values expected to be base64-encoded + actually are. If they are, the value is returned unchanged. Otherwise, + the encoded value is returned. + """ + try: + dummy = base64.decode(val) + return val + except TypeError: + return base64.b64encode(val) + + +def get_from_path(items, path): + """ Returns a list of items matching the specified path. Takes an + XPath-like expression e.g. prop1/prop2/prop3, and for each item in items, + looks up items[prop1][prop2][prop3]. Like XPath, if any of the + intermediate results are lists it will treat each list item individually. + A 'None' in items or any child expressions will be ignored, this function + will not throw because of None (anywhere) in items. The returned list + will contain no None values.""" + + if path is None: + raise exception.Error("Invalid mini_xpath") + + (first_token, sep, remainder) = path.partition("/") + + if first_token == "": + raise exception.Error("Invalid mini_xpath") + + results = [] + + if items is None: + return results + + if not isinstance(items, types.ListType): + # Wrap single objects in a list + items = [items] + + for item in items: + if item is None: + continue + get_method = getattr(item, "get", None) + if get_method is None: + continue + child = get_method(first_token) + if child is None: + continue + if isinstance(child, types.ListType): + # Flatten intermediate lists + for x in child: + results.append(x) + else: + results.append(child) + + if not sep: + # No more tokens + return results + else: + return get_from_path(results, remainder) diff --git a/nova/virt/disk.py b/nova/virt/disk.py index c5565abfa..5d499c42c 100644 --- a/nova/virt/disk.py +++ b/nova/virt/disk.py @@ -38,6 +38,10 @@ flags.DEFINE_integer('minimum_root_size', 1024 * 1024 * 1024 * 10, 'minimum size in bytes of root partition') flags.DEFINE_integer('block_size', 1024 * 1024 * 256, 'block_size to use for dd') +flags.DEFINE_integer('timeout_nbd', 10, + 'time to wait for a NBD device coming up') +flags.DEFINE_integer('max_nbd_devices', 16, + 'maximum number of possible nbd devices') def extend(image, size): @@ -45,10 +49,10 @@ def extend(image, size): file_size = os.path.getsize(image) if file_size >= size: return - utils.execute('truncate -s %s %s' % (size, image)) + utils.execute('truncate', '-s', size, image) # NOTE(vish): attempts to resize filesystem - utils.execute('e2fsck -fp %s' % image, check_exit_code=False) - utils.execute('resize2fs %s' % image, check_exit_code=False) + utils.execute('e2fsck', '-fp', image, check_exit_code=False) + utils.execute('resize2fs', image, check_exit_code=False) def inject_data(image, key=None, net=None, partition=None, nbd=False): @@ -64,7 +68,7 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False): try: if not partition is None: # create partition - out, err = utils.execute('sudo kpartx -a %s' % device) + out, err = utils.execute('sudo', 'kpartx', '-a', device) if err: raise exception.Error(_('Failed to load partition: %s') % err) mapped_device = '/dev/mapper/%sp%s' % (device.split('/')[-1], @@ -80,13 +84,14 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False): mapped_device) # Configure ext2fs so that it doesn't auto-check every N boots - out, err = utils.execute('sudo tune2fs -c 0 -i 0 %s' % mapped_device) + out, err = utils.execute('sudo', 'tune2fs', + '-c', 0, '-i', 0, mapped_device) tmpdir = tempfile.mkdtemp() try: # mount loopback to dir out, err = utils.execute( - 'sudo mount %s %s' % (mapped_device, tmpdir)) + 'sudo', 'mount', mapped_device, tmpdir) if err: raise exception.Error(_('Failed to mount filesystem: %s') % err) @@ -99,13 +104,13 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False): _inject_net_into_fs(net, tmpdir) finally: # unmount device - utils.execute('sudo umount %s' % mapped_device) + utils.execute('sudo', 'umount', mapped_device) finally: # remove temporary directory - utils.execute('rmdir %s' % tmpdir) + utils.execute('rmdir', tmpdir) if not partition is None: # remove partitions - utils.execute('sudo kpartx -d %s' % device) + utils.execute('sudo', 'kpartx', '-d', device) finally: _unlink_device(device, nbd) @@ -114,16 +119,16 @@ def _link_device(image, nbd): """Link image to device using loopback or nbd""" if nbd: device = _allocate_device() - utils.execute('sudo qemu-nbd -c %s %s' % (device, image)) + utils.execute('sudo', 'qemu-nbd', '-c', device, image) # NOTE(vish): this forks into another process, so give it a chance # to set up before continuuing - for i in xrange(10): + for i in xrange(FLAGS.timeout_nbd): if os.path.exists("/sys/block/%s/pid" % os.path.basename(device)): return device time.sleep(1) raise exception.Error(_('nbd device %s did not show up') % device) else: - out, err = utils.execute('sudo losetup --find --show %s' % image) + out, err = utils.execute('sudo', 'losetup', '--find', '--show', image) if err: raise exception.Error(_('Could not attach image to loopback: %s') % err) @@ -133,13 +138,13 @@ def _link_device(image, nbd): def _unlink_device(device, nbd): """Unlink image from device using loopback or nbd""" if nbd: - utils.execute('sudo qemu-nbd -d %s' % device) + utils.execute('sudo', 'qemu-nbd', '-d', device) _free_device(device) else: - utils.execute('sudo losetup --detach %s' % device) + utils.execute('sudo', 'losetup', '--detach', device) -_DEVICES = ['/dev/nbd%s' % i for i in xrange(16)] +_DEVICES = ['/dev/nbd%s' % i for i in xrange(FLAGS.max_nbd_devices)] def _allocate_device(): @@ -166,11 +171,12 @@ def _inject_key_into_fs(key, fs): fs is the path to the base of the filesystem into which to inject the key. """ sshdir = os.path.join(fs, 'root', '.ssh') - utils.execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter - utils.execute('sudo chown root %s' % sshdir) - utils.execute('sudo chmod 700 %s' % sshdir) + utils.execute('sudo', 'mkdir', '-p', sshdir) # existing dir doesn't matter + utils.execute('sudo', 'chown', 'root', sshdir) + utils.execute('sudo', 'chmod', '700', sshdir) keyfile = os.path.join(sshdir, 'authorized_keys') - utils.execute('sudo tee -a %s' % keyfile, '\n' + key.strip() + '\n') + utils.execute('sudo', 'tee', '-a', keyfile, + process_input='\n' + key.strip() + '\n') def _inject_net_into_fs(net, fs): @@ -179,8 +185,8 @@ def _inject_net_into_fs(net, fs): net is the contents of /etc/network/interfaces. """ netdir = os.path.join(os.path.join(fs, 'etc'), 'network') - utils.execute('sudo mkdir -p %s' % netdir) # existing dir doesn't matter - utils.execute('sudo chown root:root %s' % netdir) - utils.execute('sudo chmod 755 %s' % netdir) + utils.execute('sudo', 'mkdir', '-p', netdir) # existing dir doesn't matter + utils.execute('sudo', 'chown', 'root:root', netdir) + utils.execute('sudo', 'chmod', 755, netdir) netfile = os.path.join(netdir, 'interfaces') - utils.execute('sudo tee %s' % netfile, net) + utils.execute('sudo', 'tee', netfile, net) diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 161445b86..c744acf91 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -139,6 +139,24 @@ class FakeConnection(object): """ pass + def get_host_ip_addr(self): + """ + Retrieves the IP address of the dom0 + """ + pass + + def resize(self, instance, flavor): + """ + Resizes/Migrates the specified instance. + + The flavor parameter determines whether or not the instance RAM and + disk space are modified, and if so, to what size. + + The work will be done asynchronously. This function returns a task + that allows the caller to detect when it is complete. + """ + pass + def set_admin_password(self, instance, new_pass): """ Set the root password on the specified instance. @@ -152,6 +170,21 @@ class FakeConnection(object): """ pass + def inject_file(self, instance, b64_path, b64_contents): + """ + Writes a file on the specified instance. + + The first parameter is an instance of nova.compute.service.Instance, + and so the instance is being specified as instance.name. The second + parameter is the base64-encoded path to which the file is to be + written on the instance; the third is the contents of the file, also + base64-encoded. + + The work will be done asynchronously. This function returns a + task that allows the caller to detect when it is complete. + """ + pass + def rescue(self, instance): """ Rescue the specified instance. @@ -164,6 +197,19 @@ class FakeConnection(object): """ pass + def migrate_disk_and_power_off(self, instance, dest): + """ + Transfers the disk of a running instance in multiple phases, turning + off the instance before the end. + """ + pass + + def attach_disk(self, instance, disk_info): + """ + Attaches the disk to an instance given the metadata disk_info + """ + pass + def pause(self, instance, callback): """ Pause the specified instance. @@ -304,7 +350,9 @@ class FakeConnection(object): return 'FAKE CONSOLE OUTPUT' def get_ajax_console(self, instance): - return 'http://fakeajaxconsole.com/?token=FAKETOKEN' + return {'token': 'FAKETOKEN', + 'host': 'fakeajaxconsole.com', + 'port': 6969} def get_console_pool_info(self, console_type): return {'address': '127.0.0.1', diff --git a/nova/virt/images.py b/nova/virt/images.py index 7a6fef330..2e3f2ee4d 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -28,29 +28,32 @@ import time import urllib2 import urlparse +from nova import context from nova import flags from nova import log as logging from nova import utils from nova.auth import manager from nova.auth import signer -from nova.objectstore import image FLAGS = flags.FLAGS -flags.DEFINE_bool('use_s3', True, - 'whether to get images from s3 or use local copy') - LOG = logging.getLogger('nova.virt.images') -def fetch(image, path, user, project): - if FLAGS.use_s3: - f = _fetch_s3_image - else: - f = _fetch_local_image - return f(image, path, user, project) +def fetch(image_id, path, _user, _project): + # TODO(vish): Improve context handling and add owner and auth data + # when it is added to glance. Right now there is no + # auth checking in glance, so we assume that access was + # checked before we got here. + image_service = utils.import_object(FLAGS.image_service) + with open(path, "wb") as image_file: + elevated = context.get_admin_context() + metadata = image_service.get(elevated, image_id, image_file) + return metadata +# NOTE(vish): The methods below should be unnecessary, but I'm leaving +# them in case the glance client does not work on windows. def _fetch_image_no_curl(url, path, headers): request = urllib2.Request(url) for (k, v) in headers.iteritems(): @@ -94,8 +97,7 @@ def _fetch_s3_image(image, path, user, project): cmd += ['-H', '\'%s: %s\'' % (k, v)] cmd += ['-o', path] - cmd_out = ' '.join(cmd) - return utils.execute(cmd_out) + return utils.execute(*cmd) def _fetch_local_image(image, path, user, project): @@ -103,13 +105,15 @@ def _fetch_local_image(image, path, user, project): if sys.platform.startswith('win'): return shutil.copy(source, path) else: - return utils.execute('cp %s %s' % (source, path)) + return utils.execute('cp', source, path) def _image_path(path): return os.path.join(FLAGS.images_path, path) +# TODO(vish): xenapi should use the glance client code directly instead +# of retrieving the image using this method. def image_url(image): if FLAGS.image_service == "nova.image.glance.GlanceImageService": return "http://%s:%s/images/%s" % (FLAGS.glance_host, diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 4e0fd106f..61ef256f9 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -44,9 +44,8 @@ import uuid from xml.dom import minidom -from eventlet import greenthread -from eventlet import event from eventlet import tpool +from eventlet import semaphore import IPy @@ -55,8 +54,8 @@ from nova import db from nova import exception from nova import flags from nova import log as logging +#from nova import test from nova import utils -#from nova.api import context from nova.auth import manager from nova.compute import instance_types from nova.compute import power_state @@ -362,7 +361,7 @@ class LibvirtConnection(object): raise exception.APIError("resume not supported for libvirt") @exception.wrap_exception - def rescue(self, instance): + def rescue(self, instance, callback=None): self.destroy(instance, False) xml = self.to_xml(instance, rescue=True) @@ -392,7 +391,7 @@ class LibvirtConnection(object): return timer.start(interval=0.5, now=True) @exception.wrap_exception - def unrescue(self, instance): + def unrescue(self, instance, callback=None): # NOTE(vish): Because reboot destroys and recreates an instance using # the normal xml file, we can just call reboot here self.reboot(instance) @@ -438,8 +437,10 @@ class LibvirtConnection(object): if virsh_output.startswith('/dev/'): LOG.info(_("cool, it's a device")) - out, err = utils.execute("sudo dd if=%s iflag=nonblock" % - virsh_output, check_exit_code=False) + out, err = utils.execute('sudo', 'dd', + "if=%s" % virsh_output, + 'iflag=nonblock', + check_exit_code=False) return out else: return '' @@ -461,11 +462,11 @@ class LibvirtConnection(object): console_log = os.path.join(FLAGS.instances_path, instance['name'], 'console.log') - utils.execute('sudo chown %d %s' % (os.getuid(), console_log)) + utils.execute('sudo', 'chown', os.getuid(), console_log) if FLAGS.libvirt_type == 'xen': # Xen is special - virsh_output = utils.execute("virsh ttyconsole %s" % + virsh_output = utils.execute('virsh', 'ttyconsole', instance['name']) data = self._flush_xen_console(virsh_output) fpath = self._append_to_file(data, console_log) @@ -482,9 +483,10 @@ class LibvirtConnection(object): port = random.randint(int(start_port), int(end_port)) # netcat will exit with 0 only if the port is in use, # so a nonzero return value implies it is unused - cmd = 'netcat 0.0.0.0 %s -w 1 </dev/null || echo free' % (port) - stdout, stderr = utils.execute(cmd) - if stdout.strip() == 'free': + cmd = 'netcat', '0.0.0.0', port, '-w', '1' + try: + stdout, stderr = utils.execute(*cmd, process_input='') + except ProcessExecutionError: return port raise Exception(_('Unable to find an open port')) @@ -511,7 +513,10 @@ class LibvirtConnection(object): subprocess.Popen(cmd, shell=True) return {'token': token, 'host': host, 'port': port} - def _cache_image(self, fn, target, fname, cow=False, *args, **kwargs): + _image_sems = {} + + @staticmethod + def _cache_image(fn, target, fname, cow=False, *args, **kwargs): """Wrapper for a method that creates an image that caches the image. This wrapper will save the image into a common store and create a @@ -530,14 +535,21 @@ class LibvirtConnection(object): if not os.path.exists(base_dir): os.mkdir(base_dir) base = os.path.join(base_dir, fname) - if not os.path.exists(base): - fn(target=base, *args, **kwargs) + + if fname not in LibvirtConnection._image_sems: + LibvirtConnection._image_sems[fname] = semaphore.Semaphore() + with LibvirtConnection._image_sems[fname]: + if not os.path.exists(base): + fn(target=base, *args, **kwargs) + if not LibvirtConnection._image_sems[fname].locked(): + del LibvirtConnection._image_sems[fname] + if cow: - utils.execute('qemu-img create -f qcow2 -o ' - 'cluster_size=2M,backing_file=%s %s' - % (base, target)) + utils.execute('qemu-img', 'create', '-f', 'qcow2', '-o', + 'cluster_size=2M,backing_file=%s' % base, + target) else: - utils.execute('cp %s %s' % (base, target)) + utils.execute('cp', base, target) def _fetch_image(self, target, image_id, user, project, size=None): """Grab image and optionally attempt to resize it""" @@ -547,7 +559,7 @@ class LibvirtConnection(object): def _create_local(self, target, local_gb): """Create a blank image of specified size""" - utils.execute('truncate %s -s %dG' % (target, local_gb)) + utils.execute('truncate', target, '-s', "%dG" % local_gb) # TODO(vish): should we format disk by default? def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None): @@ -558,7 +570,7 @@ class LibvirtConnection(object): fname + suffix) # ensure directories exist and are writable - utils.execute('mkdir -p %s' % basepath(suffix='')) + utils.execute('mkdir', '-p', basepath(suffix='')) LOG.info(_('instance %s: Creating image'), inst['name']) f = open(basepath('libvirt.xml'), 'w') @@ -578,21 +590,23 @@ class LibvirtConnection(object): 'ramdisk_id': inst['ramdisk_id']} if disk_images['kernel_id']: + fname = '%08x' % int(disk_images['kernel_id']) self._cache_image(fn=self._fetch_image, target=basepath('kernel'), - fname=disk_images['kernel_id'], + fname=fname, image_id=disk_images['kernel_id'], user=user, project=project) if disk_images['ramdisk_id']: + fname = '%08x' % int(disk_images['ramdisk_id']) self._cache_image(fn=self._fetch_image, target=basepath('ramdisk'), - fname=disk_images['ramdisk_id'], + fname=fname, image_id=disk_images['ramdisk_id'], user=user, project=project) - root_fname = disk_images['image_id'] + root_fname = '%08x' % int(disk_images['image_id']) size = FLAGS.minimum_root_size if inst['instance_type'] == 'm1.tiny' or suffix == '.rescue': size = None @@ -606,7 +620,7 @@ class LibvirtConnection(object): user=user, project=project, size=size) - type_data = instance_types.INSTANCE_TYPES[inst['instance_type']] + type_data = instance_types.get_instance_type(inst['instance_type']) if type_data['local_gb']: self._cache_image(fn=self._create_local, @@ -658,7 +672,7 @@ class LibvirtConnection(object): ' data into image %(img_id)s (%(e)s)') % locals()) if FLAGS.libvirt_type == 'uml': - utils.execute('sudo chown root %s' % basepath('disk')) + utils.execute('sudo', 'chown', 'root', basepath('disk')) def to_xml(self, instance, rescue=False): # TODO(termie): cache? @@ -667,7 +681,8 @@ class LibvirtConnection(object): instance['id']) # FIXME(vish): stick this in db instance_type = instance['instance_type'] - instance_type = instance_types.INSTANCE_TYPES[instance_type] + # instance_type = test.INSTANCE_TYPES[instance_type] + instance_type = instance_types.get_instance_type(instance_type) ip_address = db.instance_get_fixed_address(context.get_admin_context(), instance['id']) # Assume that the gateway also acts as the dhcp server. @@ -1206,10 +1221,14 @@ class NWFilterFirewall(FirewallDriver): class IptablesFirewallDriver(FirewallDriver): def __init__(self, execute=None, **kwargs): - self.execute = execute or utils.execute + from nova.network import linux_net + self.iptables = linux_net.iptables_manager self.instances = {} self.nwfilter = NWFilterFirewall(kwargs['get_connection']) + self.iptables.ipv4['filter'].add_chain('sg-fallback') + self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP') + def setup_basic_filtering(self, instance): """Use NWFilter from libvirt for this.""" return self.nwfilter.setup_basic_filtering(instance) @@ -1218,126 +1237,97 @@ class IptablesFirewallDriver(FirewallDriver): """No-op. Everything is done in prepare_instance_filter""" pass - def remove_instance(self, instance): + def unfilter_instance(self, instance): if instance['id'] in self.instances: del self.instances[instance['id']] + self.remove_filters_for_instance(instance) + self.iptables.apply() else: LOG.info(_('Attempted to unfilter instance %s which is not ' 'filtered'), instance['id']) - def add_instance(self, instance): + def prepare_instance_filter(self, instance): self.instances[instance['id']] = instance + self.add_filters_for_instance(instance) + self.iptables.apply() - def unfilter_instance(self, instance): - self.remove_instance(instance) - self.apply_ruleset() + def add_filters_for_instance(self, instance): + chain_name = self._instance_chain_name(instance) - def prepare_instance_filter(self, instance): - self.add_instance(instance) - self.apply_ruleset() - - def apply_ruleset(self): - current_filter, _ = self.execute('sudo iptables-save -t filter') - current_lines = current_filter.split('\n') - new_filter = self.modify_rules(current_lines, 4) - self.execute('sudo iptables-restore', - process_input='\n'.join(new_filter)) - if(FLAGS.use_ipv6): - current_filter, _ = self.execute('sudo ip6tables-save -t filter') - current_lines = current_filter.split('\n') - new_filter = self.modify_rules(current_lines, 6) - self.execute('sudo ip6tables-restore', - process_input='\n'.join(new_filter)) + self.iptables.ipv4['filter'].add_chain(chain_name) + ipv4_address = self._ip_for_instance(instance) + self.iptables.ipv4['filter'].add_rule('local', + '-d %s -j $%s' % + (ipv4_address, chain_name)) + + if FLAGS.use_ipv6: + self.iptables.ipv6['filter'].add_chain(chain_name) + ipv6_address = self._ip_for_instance_v6(instance) + self.iptables.ipv6['filter'].add_rule('local', + '-d %s -j $%s' % + (ipv6_address, + chain_name)) + + ipv4_rules, ipv6_rules = self.instance_rules(instance) + + for rule in ipv4_rules: + self.iptables.ipv4['filter'].add_rule(chain_name, rule) + + if FLAGS.use_ipv6: + for rule in ipv6_rules: + self.iptables.ipv6['filter'].add_rule(chain_name, rule) + + def remove_filters_for_instance(self, instance): + chain_name = self._instance_chain_name(instance) + + self.iptables.ipv4['filter'].remove_chain(chain_name) + if FLAGS.use_ipv6: + self.iptables.ipv6['filter'].remove_chain(chain_name) - def modify_rules(self, current_lines, ip_version=4): + def instance_rules(self, instance): ctxt = context.get_admin_context() - # Remove any trace of nova rules. - new_filter = filter(lambda l: 'nova-' not in l, current_lines) - - seen_chains = False - for rules_index in range(len(new_filter)): - if not seen_chains: - if new_filter[rules_index].startswith(':'): - seen_chains = True - elif seen_chains == 1: - if not new_filter[rules_index].startswith(':'): - break - our_chains = [':nova-fallback - [0:0]'] - our_rules = ['-A nova-fallback -j DROP'] - - our_chains += [':nova-local - [0:0]'] - our_rules += ['-A FORWARD -j nova-local'] - our_rules += ['-A OUTPUT -j nova-local'] - - security_groups = {} - # Add our chains - # First, we add instance chains and rules - for instance_id in self.instances: - instance = self.instances[instance_id] - chain_name = self._instance_chain_name(instance) - if(ip_version == 4): - ip_address = self._ip_for_instance(instance) - elif(ip_version == 6): - ip_address = self._ip_for_instance_v6(instance) - - our_chains += [':%s - [0:0]' % chain_name] - - # Jump to the per-instance chain - our_rules += ['-A nova-local -d %s -j %s' % (ip_address, - chain_name)] - - # Always drop invalid packets - our_rules += ['-A %s -m state --state ' - 'INVALID -j DROP' % (chain_name,)] - - # Allow established connections - our_rules += ['-A %s -m state --state ' - 'ESTABLISHED,RELATED -j ACCEPT' % (chain_name,)] - - # Jump to each security group chain in turn - for security_group in \ - db.security_group_get_by_instance(ctxt, - instance['id']): - security_groups[security_group['id']] = security_group - - sg_chain_name = self._security_group_chain_name( - security_group['id']) + ipv4_rules = [] + ipv6_rules = [] - our_rules += ['-A %s -j %s' % (chain_name, sg_chain_name)] - - if(ip_version == 4): - # Allow DHCP responses - dhcp_server = self._dhcp_server_for_instance(instance) - our_rules += ['-A %s -s %s -p udp --sport 67 --dport 68 ' - '-j ACCEPT ' % (chain_name, dhcp_server)] - #Allow project network traffic - if (FLAGS.allow_project_net_traffic): - cidr = self._project_cidr_for_instance(instance) - our_rules += ['-A %s -s %s -j ACCEPT' % (chain_name, cidr)] - elif(ip_version == 6): - # Allow RA responses - ra_server = self._ra_server_for_instance(instance) - if ra_server: - our_rules += ['-A %s -s %s -p icmpv6 -j ACCEPT' % - (chain_name, ra_server + "/128")] - #Allow project network traffic - if (FLAGS.allow_project_net_traffic): - cidrv6 = self._project_cidrv6_for_instance(instance) - our_rules += ['-A %s -s %s -j ACCEPT' % - (chain_name, cidrv6)] - - # If nothing matches, jump to the fallback chain - our_rules += ['-A %s -j nova-fallback' % (chain_name,)] + # Always drop invalid packets + ipv4_rules += ['-m state --state ' 'INVALID -j DROP'] + ipv6_rules += ['-m state --state ' 'INVALID -j DROP'] - # then, security group chains and rules - for security_group_id in security_groups: - chain_name = self._security_group_chain_name(security_group_id) - our_chains += [':%s - [0:0]' % chain_name] + # Allow established connections + ipv4_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT'] + ipv6_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT'] - rules = \ - db.security_group_rule_get_by_security_group(ctxt, - security_group_id) + dhcp_server = self._dhcp_server_for_instance(instance) + ipv4_rules += ['-s %s -p udp --sport 67 --dport 68 ' + '-j ACCEPT' % (dhcp_server,)] + + #Allow project network traffic + if FLAGS.allow_project_net_traffic: + cidr = self._project_cidr_for_instance(instance) + ipv4_rules += ['-s %s -j ACCEPT' % (cidr,)] + + # We wrap these in FLAGS.use_ipv6 because they might cause + # a DB lookup. The other ones are just list operations, so + # they're not worth the clutter. + if FLAGS.use_ipv6: + # Allow RA responses + ra_server = self._ra_server_for_instance(instance) + if ra_server: + ipv6_rules += ['-s %s/128 -p icmpv6 -j ACCEPT' % (ra_server,)] + + #Allow project network traffic + if FLAGS.allow_project_net_traffic: + cidrv6 = self._project_cidrv6_for_instance(instance) + ipv6_rules += ['-s %s -j ACCEPT' % (cidrv6,)] + + security_groups = db.security_group_get_by_instance(ctxt, + instance['id']) + + # then, security group chains and rules + for security_group in security_groups: + rules = db.security_group_rule_get_by_security_group(ctxt, + security_group['id']) for rule in rules: logging.info('%r', rule) @@ -1348,14 +1338,16 @@ class IptablesFirewallDriver(FirewallDriver): continue version = _get_ip_version(rule.cidr) - if version != ip_version: - continue + if version == 4: + rules = ipv4_rules + else: + rules = ipv6_rules protocol = rule.protocol if version == 6 and rule.protocol == 'icmp': protocol = 'icmpv6' - args = ['-A', chain_name, '-p', protocol, '-s', rule.cidr] + args = ['-p', protocol, '-s', rule.cidr] if rule.protocol in ['udp', 'tcp']: if rule.from_port == rule.to_port: @@ -1376,32 +1368,39 @@ class IptablesFirewallDriver(FirewallDriver): icmp_type_arg += '/%s' % icmp_code if icmp_type_arg: - if(ip_version == 4): + if version == 4: args += ['-m', 'icmp', '--icmp-type', icmp_type_arg] - elif(ip_version == 6): + elif version == 6: args += ['-m', 'icmp6', '--icmpv6-type', icmp_type_arg] args += ['-j ACCEPT'] - our_rules += [' '.join(args)] + rules += [' '.join(args)] + + ipv4_rules += ['-j $sg-fallback'] + ipv6_rules += ['-j $sg-fallback'] - new_filter[rules_index:rules_index] = our_rules - new_filter[rules_index:rules_index] = our_chains - logging.info('new_filter: %s', '\n'.join(new_filter)) - return new_filter + return ipv4_rules, ipv6_rules def refresh_security_group_members(self, security_group): pass def refresh_security_group_rules(self, security_group): - self.apply_ruleset() + for instance in self.instances.values(): + # We use the semaphore to make sure noone applies the rule set + # after we've yanked the existing rules but before we've put in + # the new ones. + with self.iptables.semaphore: + self.remove_filters_for_instance(instance) + self.add_filters_for_instance(instance) + self.iptables.apply() def _security_group_chain_name(self, security_group_id): return 'nova-sg-%s' % (security_group_id,) def _instance_chain_name(self, instance): - return 'nova-inst-%s' % (instance['id'],) + return 'inst-%s' % (instance['id'],) def _ip_for_instance(self, instance): return db.instance_get_fixed_address(context.get_admin_context(), diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index e8352771c..ba12d4d3a 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -286,6 +286,13 @@ class SessionBase(object): rec['currently_attached'] = False rec['device'] = '' + def host_compute_free_memory(self, _1, ref): + #Always return 12GB available + return 12 * 1024 * 1024 * 1024 + + def host_call_plugin(*args): + return 'herp' + def xenapi_request(self, methodname, params): if methodname.startswith('login'): self._login(methodname, params) @@ -397,7 +404,7 @@ class SessionBase(object): field in _db_content[cls][ref]): return _db_content[cls][ref][field] - LOG.debuug(_('Raising NotImplemented')) + LOG.debug(_('Raising NotImplemented')) raise NotImplementedError( _('xenapi.fake does not have an implementation for %s or it has ' 'been called with the wrong number of arguments') % name) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 4bbd522c1..4e6c71446 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -24,6 +24,7 @@ import pickle import re import time import urllib +import uuid from xml.dom import minidom from eventlet import event @@ -63,11 +64,14 @@ class ImageType: 0 - kernel/ramdisk image (goes on dom0's filesystem) 1 - disk image (local SR, partitioned by objectstore plugin) 2 - raw disk image (local SR, NOT partitioned by plugin) + 3 - vhd disk image (local SR, NOT inspected by XS, PV assumed for + linux, HVM assumed for Windows) """ KERNEL_RAMDISK = 0 DISK = 1 DISK_RAW = 2 + DISK_VHD = 3 class VMHelper(HelperBase): @@ -82,7 +86,8 @@ class VMHelper(HelperBase): the pv_kernel flag indicates whether the guest is HVM or PV """ - instance_type = instance_types.INSTANCE_TYPES[instance.instance_type] + instance_type = instance_types.\ + get_instance_type(instance.instance_type) mem = str(long(instance_type['memory_mb']) * 1024 * 1024) vcpus = str(instance_type['vcpus']) rec = { @@ -139,6 +144,17 @@ class VMHelper(HelperBase): return vm_ref @classmethod + def ensure_free_mem(cls, session, instance): + instance_type = instance_types.get_instance_type( + instance.instance_type) + mem = long(instance_type['memory_mb']) * 1024 * 1024 + #get free memory from host + host = session.get_xenapi_host() + host_free_mem = long(session.get_xenapi().host. + compute_free_memory(host)) + return host_free_mem >= mem + + @classmethod def create_vbd(cls, session, vm_ref, vdi_ref, userdevice, bootable): """Create a VBD record. Returns a Deferred that gives the new VBD reference.""" @@ -191,19 +207,17 @@ class VMHelper(HelperBase): """Destroy VBD from host database""" try: task = session.call_xenapi('Async.VBD.destroy', vbd_ref) - #FIXME(armando): find a solution to missing instance_id - #with Josh Kearney - session.wait_for_task(0, task) + session.wait_for_task(task) except cls.XenAPI.Failure, exc: LOG.exception(exc) raise StorageError(_('Unable to destroy VBD %s') % vbd_ref) @classmethod - def create_vif(cls, session, vm_ref, network_ref, mac_address): + def create_vif(cls, session, vm_ref, network_ref, mac_address, dev="0"): """Create a VIF record. Returns a Deferred that gives the new VIF reference.""" vif_rec = {} - vif_rec['device'] = '0' + vif_rec['device'] = dev vif_rec['network'] = network_ref vif_rec['VM'] = vm_ref vif_rec['MAC'] = mac_address @@ -239,24 +253,40 @@ class VMHelper(HelperBase): return vdi_ref @classmethod + def get_vdi_for_vm_safely(cls, session, vm_ref): + vdi_refs = VMHelper.lookup_vm_vdis(session, vm_ref) + if vdi_refs is None: + raise Exception(_("No VDIs found for VM %s") % vm_ref) + else: + num_vdis = len(vdi_refs) + if num_vdis != 1: + raise Exception( + _("Unexpected number of VDIs (%(num_vdis)s) found" + " for VM %(vm_ref)s") % locals()) + + vdi_ref = vdi_refs[0] + vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) + return vdi_ref, vdi_rec + + @classmethod def create_snapshot(cls, session, instance_id, vm_ref, label): - """ Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI, - Snapshot VHD - """ + """Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI, + Snapshot VHD""" #TODO(sirp): Add quiesce and VSS locking support when Windows support # is added LOG.debug(_("Snapshotting VM %(vm_ref)s with label '%(label)s'...") % locals()) - vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref) + vm_vdi_ref, vm_vdi_rec = cls.get_vdi_for_vm_safely(session, vm_ref) vm_vdi_uuid = vm_vdi_rec["uuid"] sr_ref = vm_vdi_rec["SR"] original_parent_uuid = get_vhd_parent_uuid(session, vm_vdi_ref) task = session.call_xenapi('Async.VM.snapshot', vm_ref, label) - template_vm_ref = session.wait_for_task(instance_id, task) - template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1] + template_vm_ref = session.wait_for_task(task, instance_id) + template_vdi_rec = cls.get_vdi_for_vm_safely(session, + template_vm_ref)[1] template_vdi_uuid = template_vdi_rec["uuid"] LOG.debug(_('Created snapshot %(template_vm_ref)s from' @@ -266,29 +296,53 @@ class VMHelper(HelperBase): session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid) #TODO(sirp): we need to assert only one parent, not parents two deep - return template_vm_ref, [template_vdi_uuid, parent_uuid] + template_vdi_uuids = {'image': parent_uuid, + 'snap': template_vdi_uuid} + return template_vm_ref, template_vdi_uuids + + @classmethod + def get_sr(cls, session, sr_label='slices'): + """Finds the SR named by the given name label and returns + the UUID""" + return session.call_xenapi('SR.get_by_name_label', sr_label)[0] + + @classmethod + def get_sr_path(cls, session): + """Return the path to our storage repository + + This is used when we're dealing with VHDs directly, either by taking + snapshots or by restoring an image in the DISK_VHD format. + """ + sr_ref = safe_find_sr(session) + sr_rec = session.get_xenapi().SR.get_record(sr_ref) + sr_uuid = sr_rec["uuid"] + return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid) @classmethod def upload_image(cls, session, instance_id, vdi_uuids, image_id): """ Requests that the Glance plugin bundle the specified VDIs and push them into Glance using the specified human-friendly name. """ + # NOTE(sirp): Currently we only support uploading images as VHD, there + # is no RAW equivalent (yet) logging.debug(_("Asking xapi to upload %(vdi_uuids)s as" " ID %(image_id)s") % locals()) params = {'vdi_uuids': vdi_uuids, 'image_id': image_id, 'glance_host': FLAGS.glance_host, - 'glance_port': FLAGS.glance_port} + 'glance_port': FLAGS.glance_port, + 'sr_path': cls.get_sr_path(session)} kwargs = {'params': pickle.dumps(params)} - task = session.async_call_plugin('glance', 'put_vdis', kwargs) - session.wait_for_task(instance_id, task) + task = session.async_call_plugin('glance', 'upload_vhd', kwargs) + session.wait_for_task(task, instance_id) @classmethod - def fetch_image(cls, session, instance_id, image, user, project, type): + def fetch_image(cls, session, instance_id, image, user, project, + image_type): """ - type is interpreted as an ImageType instance + image_type is interpreted as an ImageType instance Related flags: xenapi_image_service = ['glance', 'objectstore'] glance_address = 'address for glance services' @@ -298,35 +352,80 @@ class VMHelper(HelperBase): if FLAGS.xenapi_image_service == 'glance': return cls._fetch_image_glance(session, instance_id, image, - access, type) + access, image_type) else: return cls._fetch_image_objectstore(session, instance_id, image, - access, user.secret, type) + access, user.secret, + image_type) + + @classmethod + def _fetch_image_glance_vhd(cls, session, instance_id, image, access, + image_type): + LOG.debug(_("Asking xapi to fetch vhd image %(image)s") + % locals()) + + sr_ref = safe_find_sr(session) + + # NOTE(sirp): The Glance plugin runs under Python 2.4 which does not + # have the `uuid` module. To work around this, we generate the uuids + # here (under Python 2.6+) and pass them as arguments + uuid_stack = [str(uuid.uuid4()) for i in xrange(2)] + + params = {'image_id': image, + 'glance_host': FLAGS.glance_host, + 'glance_port': FLAGS.glance_port, + 'uuid_stack': uuid_stack, + 'sr_path': cls.get_sr_path(session)} + + kwargs = {'params': pickle.dumps(params)} + task = session.async_call_plugin('glance', 'download_vhd', kwargs) + vdi_uuid = session.wait_for_task(task, instance_id) + + cls.scan_sr(session, instance_id, sr_ref) + + # Set the name-label to ease debugging + vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid) + name_label = get_name_label_for_image(image) + session.get_xenapi().VDI.set_name_label(vdi_ref, name_label) + + LOG.debug(_("xapi 'download_vhd' returned VDI UUID %(vdi_uuid)s") + % locals()) + return vdi_uuid @classmethod - def _fetch_image_glance(cls, session, instance_id, image, access, type): - sr = find_sr(session) - if sr is None: - raise exception.NotFound('Cannot find SR to write VDI to') + def _fetch_image_glance_disk(cls, session, instance_id, image, access, + image_type): + """Fetch the image from Glance + + NOTE: + Unlike _fetch_image_glance_vhd, this method does not use the Glance + plugin; instead, it streams the disks through domU to the VDI + directly. - c = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) + """ + # FIXME(sirp): Since the Glance plugin seems to be required for the + # VHD disk, it may be worth using the plugin for both VHD and RAW and + # DISK restores + sr_ref = safe_find_sr(session) - meta, image_file = c.get_image(image) + client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) + meta, image_file = client.get_image(image) virtual_size = int(meta['size']) vdi_size = virtual_size LOG.debug(_("Size for image %(image)s:%(virtual_size)d") % locals()) - if type == ImageType.DISK: + + if image_type == ImageType.DISK: # Make room for MBR. vdi_size += MBR_SIZE_BYTES - vdi = cls.create_vdi(session, sr, _('Glance image %s') % image, - vdi_size, False) + name_label = get_name_label_for_image(image) + vdi = cls.create_vdi(session, sr_ref, name_label, vdi_size, False) with_vdi_attached_here(session, vdi, False, lambda dev: - _stream_disk(dev, type, + _stream_disk(dev, image_type, virtual_size, image_file)) - if (type == ImageType.KERNEL_RAMDISK): + if image_type == ImageType.KERNEL_RAMDISK: #we need to invoke a plugin for copying VDI's #content into proper path LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi) @@ -336,7 +435,7 @@ class VMHelper(HelperBase): #let the plugin copy the correct number of bytes args['image-size'] = str(vdi_size) task = session.async_call_plugin('glance', fn, args) - filename = session.wait_for_task(instance_id, task) + filename = session.wait_for_task(task, instance_id) #remove the VDI as it is not needed anymore session.get_xenapi().VDI.destroy(vdi) LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi) @@ -345,27 +444,99 @@ class VMHelper(HelperBase): return session.get_xenapi().VDI.get_uuid(vdi) @classmethod + def determine_disk_image_type(cls, instance): + """Disk Image Types are used to determine where the kernel will reside + within an image. To figure out which type we're dealing with, we use + the following rules: + + 1. If we're using Glance, we can use the image_type field to + determine the image_type + + 2. If we're not using Glance, then we need to deduce this based on + whether a kernel_id is specified. + """ + def log_disk_format(image_type): + pretty_format = {ImageType.KERNEL_RAMDISK: 'KERNEL_RAMDISK', + ImageType.DISK: 'DISK', + ImageType.DISK_RAW: 'DISK_RAW', + ImageType.DISK_VHD: 'DISK_VHD'} + disk_format = pretty_format[image_type] + image_id = instance.image_id + instance_id = instance.id + LOG.debug(_("Detected %(disk_format)s format for image " + "%(image_id)s, instance %(instance_id)s") % locals()) + + def determine_from_glance(): + glance_disk_format2nova_type = { + 'ami': ImageType.DISK, + 'aki': ImageType.KERNEL_RAMDISK, + 'ari': ImageType.KERNEL_RAMDISK, + 'raw': ImageType.DISK_RAW, + 'vhd': ImageType.DISK_VHD} + client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) + meta = client.get_image_meta(instance.image_id) + disk_format = meta['disk_format'] + try: + return glance_disk_format2nova_type[disk_format] + except KeyError: + raise exception.NotFound( + _("Unrecognized disk_format '%(disk_format)s'") + % locals()) + + def determine_from_instance(): + if instance.kernel_id: + return ImageType.DISK + else: + return ImageType.DISK_RAW + + # FIXME(sirp): can we unify the ImageService and xenapi_image_service + # abstractions? + if FLAGS.xenapi_image_service == 'glance': + image_type = determine_from_glance() + else: + image_type = determine_from_instance() + + log_disk_format(image_type) + return image_type + + @classmethod + def _fetch_image_glance(cls, session, instance_id, image, access, + image_type): + if image_type == ImageType.DISK_VHD: + return cls._fetch_image_glance_vhd( + session, instance_id, image, access, image_type) + else: + return cls._fetch_image_glance_disk( + session, instance_id, image, access, image_type) + + @classmethod def _fetch_image_objectstore(cls, session, instance_id, image, access, - secret, type): + secret, image_type): url = images.image_url(image) LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals()) - fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel' + if image_type == ImageType.KERNEL_RAMDISK: + fn = 'get_kernel' + else: + fn = 'get_vdi' args = {} args['src_url'] = url args['username'] = access args['password'] = secret args['add_partition'] = 'false' args['raw'] = 'false' - if type != ImageType.KERNEL_RAMDISK: + if image_type != ImageType.KERNEL_RAMDISK: args['add_partition'] = 'true' - if type == ImageType.DISK_RAW: + if image_type == ImageType.DISK_RAW: args['raw'] = 'true' task = session.async_call_plugin('objectstore', fn, args) - uuid = session.wait_for_task(instance_id, task) + uuid = session.wait_for_task(task, instance_id) return uuid @classmethod def lookup_image(cls, session, instance_id, vdi_ref): + """ + Determine if VDI is using a PV kernel + """ if FLAGS.xenapi_image_service == 'glance': return cls._lookup_image_glance(session, vdi_ref) else: @@ -378,31 +549,19 @@ class VMHelper(HelperBase): args = {} args['vdi-ref'] = vdi_ref task = session.async_call_plugin('objectstore', fn, args) - pv_str = session.wait_for_task(instance_id, task) + pv_str = session.wait_for_task(task, instance_id) pv = None if pv_str.lower() == 'true': pv = True elif pv_str.lower() == 'false': pv = False - LOG.debug(_("PV Kernel in VDI:%d"), pv) + LOG.debug(_("PV Kernel in VDI:%s"), pv) return pv @classmethod def _lookup_image_glance(cls, session, vdi_ref): LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref) - - def is_vdi_pv(dev): - LOG.debug(_("Running pygrub against %s"), dev) - output = os.popen('pygrub -qn /dev/%s' % dev) - for line in output.readlines(): - #try to find kernel string - m = re.search('(?<=kernel:)/.*(?:>)', line) - if m and m.group(0).find('xen') != -1: - LOG.debug(_("Found Xen kernel %s") % m.group(0)) - return True - LOG.debug(_("No Xen kernel found. Booting HVM.")) - return False - return with_vdi_attached_here(session, vdi_ref, True, is_vdi_pv) + return with_vdi_attached_here(session, vdi_ref, True, _is_vdi_pv) @classmethod def lookup(cls, session, i): @@ -440,6 +599,14 @@ class VMHelper(HelperBase): return None @classmethod + def lookup_kernel_ramdisk(cls, session, vm): + vm_rec = session.get_xenapi().VM.get_record(vm) + if 'PV_kernel' in vm_rec and 'PV_ramdisk' in vm_rec: + return (vm_rec['PV_kernel'], vm_rec['PV_ramdisk']) + else: + return (None, None) + + @classmethod def compile_info(cls, record): """Fill record with VM status information""" LOG.info(_("(VM_UTILS) xenserver vm state -> |%s|"), @@ -478,6 +645,21 @@ class VMHelper(HelperBase): except cls.XenAPI.Failure as e: return {"Unable to retrieve diagnostics": e} + @classmethod + def scan_sr(cls, session, instance_id=None, sr_ref=None): + """Scans the SR specified by sr_ref""" + if sr_ref: + LOG.debug(_("Re-scanning SR %s"), sr_ref) + task = session.call_xenapi('Async.SR.scan', sr_ref) + session.wait_for_task(task, instance_id) + + @classmethod + def scan_default_sr(cls, session): + """Looks for the system default SR and triggers a re-scan""" + #FIXME(sirp/mdietz): refactor scan_default_sr in there + sr_ref = cls.get_sr(session) + session.call_xenapi('SR.scan', sr_ref) + def get_rrd(host, uuid): """Return the VM RRD XML as a string""" @@ -520,12 +702,6 @@ def get_vhd_parent_uuid(session, vdi_ref): return None -def scan_sr(session, instance_id, sr_ref): - LOG.debug(_("Re-scanning SR %s"), sr_ref) - task = session.call_xenapi('Async.SR.scan', sr_ref) - session.wait_for_task(instance_id, task) - - def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref, original_parent_uuid): """ Spin until the parent VHD is coalesced into its parent VHD @@ -550,7 +726,7 @@ def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref, " %(max_attempts)d), giving up...") % locals()) raise exception.Error(msg) - scan_sr(session, instance_id, sr_ref) + VMHelper.scan_sr(session, instance_id, sr_ref) parent_uuid = get_vhd_parent_uuid(session, vdi_ref) if original_parent_uuid and (parent_uuid != original_parent_uuid): LOG.debug(_("Parent %(parent_uuid)s doesn't match original parent" @@ -581,7 +757,18 @@ def get_vdi_for_vm_safely(session, vm_ref): return vdi_ref, vdi_rec +def safe_find_sr(session): + """Same as find_sr except raises a NotFound exception if SR cannot be + determined + """ + sr_ref = find_sr(session) + if sr_ref is None: + raise exception.NotFound(_('Cannot find SR to read/write VDI')) + return sr_ref + + def find_sr(session): + """Return the storage repository to hold VM images""" host = session.get_xenapi_host() srs = session.get_xenapi().SR.get_all() for sr in srs: @@ -696,9 +883,22 @@ def get_this_vm_ref(session): return session.get_xenapi().VM.get_by_uuid(get_this_vm_uuid()) -def _stream_disk(dev, type, virtual_size, image_file): +def _is_vdi_pv(dev): + LOG.debug(_("Running pygrub against %s"), dev) + output = os.popen('pygrub -qn /dev/%s' % dev) + for line in output.readlines(): + #try to find kernel string + m = re.search('(?<=kernel:)/.*(?:>)', line) + if m and m.group(0).find('xen') != -1: + LOG.debug(_("Found Xen kernel %s") % m.group(0)) + return True + LOG.debug(_("No Xen kernel found. Booting HVM.")) + return False + + +def _stream_disk(dev, image_type, virtual_size, image_file): offset = 0 - if type == ImageType.DISK: + if image_type == ImageType.DISK: offset = MBR_SIZE_BYTES _write_partition(virtual_size, dev) @@ -717,13 +917,17 @@ def _write_partition(virtual_size, dev): LOG.debug(_('Writing partition table %(primary_first)d %(primary_last)d' ' to %(dest)s...') % locals()) - def execute(cmd, process_input=None, check_exit_code=True): - return utils.execute(cmd=cmd, - process_input=process_input, - check_exit_code=check_exit_code) + def execute(*cmd, **kwargs): + return utils.execute(*cmd, **kwargs) - execute('parted --script %s mklabel msdos' % dest) - execute('parted --script %s mkpart primary %ds %ds' % - (dest, primary_first, primary_last)) + execute('parted', '--script', dest, 'mklabel', 'msdos') + execute('parted', '--script', dest, 'mkpart', 'primary', + '%ds' % primary_first, + '%ds' % primary_last) LOG.debug(_('Writing partition table %s done.'), dest) + + +def get_name_label_for_image(image): + # TODO(sirp): This should eventually be the URI for the Glance image + return _('Glance image %s') % image diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index e84ce20c4..562ecd4d5 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -22,6 +22,7 @@ Management class for VM-related functions (spawn, reboot, etc). import json import M2Crypto import os +import pickle import subprocess import tempfile import uuid @@ -49,6 +50,7 @@ class VMOps(object): def __init__(self, session): self.XenAPI = session.get_imported_xenapi() self._session = session + VMHelper.XenAPI = self.XenAPI def list_instances(self): @@ -60,112 +62,185 @@ class VMOps(object): vms.append(rec["name_label"]) return vms + def _start(self, instance, vm_ref=None): + """Power on a VM instance""" + if not vm_ref: + vm_ref = VMHelper.lookup(self._session, instance.name) + if vm_ref is None: + raise exception(_('Attempted to power on non-existent instance' + ' bad instance id %s') % instance.id) + LOG.debug(_("Starting instance %s"), instance.name) + self._session.call_xenapi('VM.start', vm_ref, False, False) + + def create_disk(self, instance): + user = AuthManager().get_user(instance.user_id) + project = AuthManager().get_project(instance.project_id) + disk_image_type = VMHelper.determine_disk_image_type(instance) + vdi_uuid = VMHelper.fetch_image(self._session, instance.id, + instance.image_id, user, project, disk_image_type) + return vdi_uuid + def spawn(self, instance): + vdi_uuid = self.create_disk(instance) + self._spawn_with_disk(instance, vdi_uuid=vdi_uuid) + + def _spawn_with_disk(self, instance, vdi_uuid): """Create VM instance""" - vm = VMHelper.lookup(self._session, instance.name) + instance_name = instance.name + vm = VMHelper.lookup(self._session, instance_name) if vm is not None: raise exception.Duplicate(_('Attempted to create' - ' non-unique name %s') % instance.name) - - bridge = db.network_get_by_instance(context.get_admin_context(), - instance['id'])['bridge'] - network_ref = \ - NetworkHelper.find_network_with_bridge(self._session, bridge) + ' non-unique name %s') % instance_name) + + #ensure enough free memory is available + if not VMHelper.ensure_free_mem(self._session, instance): + LOG.exception(_('instance %(instance_name)s: not enough free ' + 'memory') % locals()) + db.instance_set_state(context.get_admin_context(), + instance['id'], + power_state.SHUTDOWN) + return user = AuthManager().get_user(instance.user_id) project = AuthManager().get_project(instance.project_id) - #if kernel is not present we must download a raw disk - if instance.kernel_id: - disk_image_type = ImageType.DISK - else: - disk_image_type = ImageType.DISK_RAW - vdi_uuid = VMHelper.fetch_image(self._session, instance.id, - instance.image_id, user, project, disk_image_type) + + kernel = ramdisk = pv_kernel = None + + # Are we building from a pre-existing disk? vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) - #Have a look at the VDI and see if it has a PV kernel - pv_kernel = False - if not instance.kernel_id: + + disk_image_type = VMHelper.determine_disk_image_type(instance) + if disk_image_type == ImageType.DISK_RAW: + # Have a look at the VDI and see if it has a PV kernel pv_kernel = VMHelper.lookup_image(self._session, instance.id, vdi_ref) - kernel = None + elif disk_image_type == ImageType.DISK_VHD: + # TODO(sirp): Assuming PV for now; this will need to be + # configurable as Windows will use HVM. + pv_kernel = True + if instance.kernel_id: kernel = VMHelper.fetch_image(self._session, instance.id, instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK) - ramdisk = None + if instance.ramdisk_id: ramdisk = VMHelper.fetch_image(self._session, instance.id, instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK) + vm_ref = VMHelper.create_vm(self._session, instance, kernel, ramdisk, pv_kernel) - VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True) + VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, + vdi_ref=vdi_ref, userdevice=0, bootable=True) + + # inject_network_info and create vifs + networks = self.inject_network_info(instance) + self.create_vifs(instance, networks) - if network_ref: - VMHelper.create_vif(self._session, vm_ref, - network_ref, instance.mac_address) LOG.debug(_('Starting VM %s...'), vm_ref) - self._session.call_xenapi('VM.start', vm_ref, False, False) - instance_name = instance.name + self._start(instance, vm_ref) LOG.info(_('Spawning VM %(instance_name)s created %(vm_ref)s.') - % locals()) - + % locals()) + + def _inject_onset_files(): + onset_files = instance.onset_files + if onset_files: + # Check if this is a JSON-encoded string and convert if needed. + if isinstance(onset_files, basestring): + try: + onset_files = json.loads(onset_files) + except ValueError: + LOG.exception(_("Invalid value for onset_files: '%s'") + % onset_files) + onset_files = [] + # Inject any files, if specified + for path, contents in instance.onset_files: + LOG.debug(_("Injecting file path: '%s'") % path) + self.inject_file(instance, path, contents) # NOTE(armando): Do we really need to do this in virt? + # NOTE(tr3buchet): not sure but wherever we do it, we need to call + # reset_network afterwards timer = utils.LoopingCall(f=None) def _wait_for_boot(): try: - state = self.get_info(instance['name'])['state'] + state = self.get_info(instance_name)['state'] db.instance_set_state(context.get_admin_context(), instance['id'], state) if state == power_state.RUNNING: - LOG.debug(_('Instance %s: booted'), instance['name']) + LOG.debug(_('Instance %s: booted'), instance_name) timer.stop() + _inject_onset_files() + return True except Exception, exc: LOG.warn(exc) LOG.exception(_('instance %s: failed to boot'), - instance['name']) + instance_name) db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) timer.stop() + return False timer.f = _wait_for_boot + + # call to reset network to configure network from xenstore + self.reset_network(instance) + return timer.start(interval=0.5, now=True) def _get_vm_opaque_ref(self, instance_or_vm): """Refactored out the common code of many methods that receive either a vm name or a vm instance, and want a vm instance in return. """ - vm = None - try: - if instance_or_vm.startswith("OpaqueRef:"): - # Got passed an opaque ref; return it + # if instance_or_vm is a string it must be opaque ref or instance name + if isinstance(instance_or_vm, basestring): + obj = None + try: + # check for opaque ref + obj = self._session.get_xenapi().VM.get_record(instance_or_vm) return instance_or_vm - else: - # Must be the instance name + except self.XenAPI.Failure: + # wasn't an opaque ref, must be an instance name + instance_name = instance_or_vm + + # if instance_or_vm is an int/long it must be instance id + elif isinstance(instance_or_vm, (int, long)): + ctx = context.get_admin_context() + try: + instance_obj = db.instance_get(ctx, instance_or_vm) + instance_name = instance_obj.name + except exception.NotFound: + # The unit tests screw this up, as they use an integer for + # the vm name. I'd fix that up, but that's a matter for + # another bug report. So for now, just try with the passed + # value instance_name = instance_or_vm - except (AttributeError, KeyError): - # Note the the KeyError will only happen with fakes.py - # Not a string; must be an ID or a vm instance - if isinstance(instance_or_vm, (int, long)): - ctx = context.get_admin_context() - try: - instance_obj = db.instance_get(ctx, instance_or_vm) - instance_name = instance_obj.name - except exception.NotFound: - # The unit tests screw this up, as they use an integer for - # the vm name. I'd fix that up, but that's a matter for - # another bug report. So for now, just try with the passed - # value - instance_name = instance_or_vm - else: - instance_name = instance_or_vm.name - vm = VMHelper.lookup(self._session, instance_name) - if vm is None: - raise Exception(_('Instance not present %s') % instance_name) - return vm + + # otherwise instance_or_vm is an instance object + else: + instance_name = instance_or_vm.name + vm_ref = VMHelper.lookup(self._session, instance_name) + if vm_ref is None: + raise exception.NotFound( + _('Instance not present %s') % instance_name) + return vm_ref + + def _acquire_bootlock(self, vm): + """Prevent an instance from booting""" + self._session.call_xenapi( + "VM.set_blocked_operations", + vm, + {"start": ""}) + + def _release_bootlock(self, vm): + """Allow an instance to boot""" + self._session.call_xenapi( + "VM.remove_from_blocked_operations", + vm, + "start") def snapshot(self, instance, image_id): - """ Create snapshot from a running VM instance + """Create snapshot from a running VM instance :param instance: instance to be snapshotted :param image_id: id of image to upload to @@ -186,7 +261,20 @@ class VMOps(object): that will bundle the VHDs together and then push the bundle into Glance. """ + template_vm_ref = None + try: + template_vm_ref, template_vdi_uuids = self._get_snapshot(instance) + # call plugin to ship snapshot off to glance + VMHelper.upload_image( + self._session, instance.id, template_vdi_uuids, image_id) + finally: + if template_vm_ref: + self._destroy(instance, template_vm_ref, + shutdown=False, destroy_kernel_ramdisk=False) + logging.debug(_("Finished snapshot and upload for VM %s"), instance) + + def _get_snapshot(self, instance): #TODO(sirp): Add quiesce and VSS locking support when Windows support # is added @@ -197,25 +285,95 @@ class VMOps(object): try: template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot( self._session, instance.id, vm_ref, label) + return template_vm_ref, template_vdi_uuids except self.XenAPI.Failure, exc: logging.error(_("Unable to Snapshot %(vm_ref)s: %(exc)s") % locals()) return + def migrate_disk_and_power_off(self, instance, dest): + """Copies a VHD from one host machine to another + + :param instance: the instance that owns the VHD in question + :param dest: the destination host machine + :param disk_type: values are 'primary' or 'cow' + """ + vm_ref = VMHelper.lookup(self._session, instance.name) + + # The primary VDI becomes the COW after the snapshot, and we can + # identify it via the VBD. The base copy is the parent_uuid returned + # from the snapshot creation + + base_copy_uuid = cow_uuid = None + template_vdi_uuids = template_vm_ref = None try: - # call plugin to ship snapshot off to glance - VMHelper.upload_image( - self._session, instance.id, template_vdi_uuids, image_id) + # transfer the base copy + template_vm_ref, template_vdi_uuids = self._get_snapshot(instance) + base_copy_uuid = template_vdi_uuids[1] + vdi_ref, vm_vdi_rec = \ + VMHelper.get_vdi_for_vm_safely(self._session, vm_ref) + cow_uuid = vm_vdi_rec['uuid'] + + params = {'host': dest, + 'vdi_uuid': base_copy_uuid, + 'instance_id': instance.id, + 'sr_path': VMHelper.get_sr_path(self._session)} + + task = self._session.async_call_plugin('migration', 'transfer_vhd', + {'params': pickle.dumps(params)}) + self._session.wait_for_task(task, instance.id) + + # Now power down the instance and transfer the COW VHD + self._shutdown(instance, vm_ref, method='clean') + + params = {'host': dest, + 'vdi_uuid': cow_uuid, + 'instance_id': instance.id, + 'sr_path': VMHelper.get_sr_path(self._session), } + + task = self._session.async_call_plugin('migration', 'transfer_vhd', + {'params': pickle.dumps(params)}) + self._session.wait_for_task(task, instance.id) + finally: - self._destroy(instance, template_vm_ref, shutdown=False) + if template_vm_ref: + self._destroy(instance, template_vm_ref, + shutdown=False, destroy_kernel_ramdisk=False) - logging.debug(_("Finished snapshot and upload for VM %s"), instance) + # TODO(mdietz): we could also consider renaming these to something + # sensible so we don't need to blindly pass around dictionaries + return {'base_copy': base_copy_uuid, 'cow': cow_uuid} + + def attach_disk(self, instance, base_copy_uuid, cow_uuid): + """Links the base copy VHD to the COW via the XAPI plugin""" + vm_ref = VMHelper.lookup(self._session, instance.name) + new_base_copy_uuid = str(uuid.uuid4()) + new_cow_uuid = str(uuid.uuid4()) + params = {'instance_id': instance.id, + 'old_base_copy_uuid': base_copy_uuid, + 'old_cow_uuid': cow_uuid, + 'new_base_copy_uuid': new_base_copy_uuid, + 'new_cow_uuid': new_cow_uuid, + 'sr_path': VMHelper.get_sr_path(self._session), } + + task = self._session.async_call_plugin('migration', + 'move_vhds_into_sr', {'params': pickle.dumps(params)}) + self._session.wait_for_task(task, instance.id) + + # Now we rescan the SR so we find the VHDs + VMHelper.scan_default_sr(self._session) + + return new_cow_uuid + + def resize(self, instance, flavor): + """Resize a running instance by changing it's RAM and disk size """ + raise NotImplementedError() def reboot(self, instance): """Reboot VM instance""" vm = self._get_vm_opaque_ref(instance) task = self._session.call_xenapi('Async.VM.clean_reboot', vm) - self._session.wait_for_task(instance.id, task) + self._session.wait_for_task(task, instance.id) def set_admin_password(self, instance, new_pass): """Set the root/admin password on the VM instance. This is done via @@ -255,22 +413,58 @@ class VMOps(object): raise RuntimeError(resp_dict['message']) return resp_dict['message'] - def _shutdown(self, instance, vm): - """Shutdown an instance """ + def inject_file(self, instance, b64_path, b64_contents): + """Write a file to the VM instance. The path to which it is to be + written and the contents of the file need to be supplied; both should + be base64-encoded to prevent errors with non-ASCII characters being + transmitted. If the agent does not support file injection, or the user + has disabled it, a NotImplementedError will be raised. + """ + # Files/paths *should* be base64-encoded at this point, but + # double-check to make sure. + b64_path = utils.ensure_b64_encoding(b64_path) + b64_contents = utils.ensure_b64_encoding(b64_contents) + + # Need to uniquely identify this request. + transaction_id = str(uuid.uuid4()) + args = {'id': transaction_id, 'b64_path': b64_path, + 'b64_contents': b64_contents} + # If the agent doesn't support file injection, a NotImplementedError + # will be raised with the appropriate message. + resp = self._make_agent_call('inject_file', instance, '', args) + resp_dict = json.loads(resp) + if resp_dict['returncode'] != '0': + # There was some other sort of error; the message will contain + # a description of the error. + raise RuntimeError(resp_dict['message']) + return resp_dict['message'] + + def _shutdown(self, instance, vm, hard=True): + """Shutdown an instance""" state = self.get_info(instance['name'])['state'] if state == power_state.SHUTDOWN: LOG.warn(_("VM %(vm)s already halted, skipping shutdown...") % locals()) return + instance_id = instance.id + LOG.debug(_("Shutting down VM for Instance %(instance_id)s") + % locals()) try: - task = self._session.call_xenapi('Async.VM.hard_shutdown', vm) - self._session.wait_for_task(instance.id, task) + task = None + if hard: + task = self._session.call_xenapi("Async.VM.hard_shutdown", vm) + else: + task = self._session.call_xenapi('Async.VM.clean_shutdown', vm) + self._session.wait_for_task(task, instance.id) except self.XenAPI.Failure, exc: LOG.exception(exc) def _destroy_vdis(self, instance, vm): """Destroys all VDIs associated with a VM """ + instance_id = instance.id + LOG.debug(_("Destroying VDIs for Instance %(instance_id)s") + % locals()) vdis = VMHelper.lookup_vm_vdis(self._session, vm) if not vdis: @@ -279,18 +473,60 @@ class VMOps(object): for vdi in vdis: try: task = self._session.call_xenapi('Async.VDI.destroy', vdi) - self._session.wait_for_task(instance.id, task) + self._session.wait_for_task(task, instance.id) except self.XenAPI.Failure, exc: LOG.exception(exc) + def _destroy_kernel_ramdisk(self, instance, vm): + """ + Three situations can occur: + + 1. We have neither a ramdisk nor a kernel, in which case we are a + RAW image and can omit this step + + 2. We have one or the other, in which case, we should flag as an + error + + 3. We have both, in which case we safely remove both the kernel + and the ramdisk. + """ + instance_id = instance.id + if not instance.kernel_id and not instance.ramdisk_id: + # 1. No kernel or ramdisk + LOG.debug(_("Instance %(instance_id)s using RAW or VHD, " + "skipping kernel and ramdisk deletion") % locals()) + return + + if not (instance.kernel_id and instance.ramdisk_id): + # 2. We only have kernel xor ramdisk + raise exception.NotFound( + _("Instance %(instance_id)s has a kernel or ramdisk but not " + "both" % locals())) + + # 3. We have both kernel and ramdisk + (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk( + self._session, vm) + + LOG.debug(_("Removing kernel/ramdisk files")) + + args = {'kernel-file': kernel, 'ramdisk-file': ramdisk} + task = self._session.async_call_plugin( + 'glance', 'remove_kernel_ramdisk', args) + self._session.wait_for_task(task, instance.id) + + LOG.debug(_("kernel/ramdisk files removed")) + def _destroy_vm(self, instance, vm): """Destroys a VM record """ + instance_id = instance.id try: task = self._session.call_xenapi('Async.VM.destroy', vm) - self._session.wait_for_task(instance.id, task) + self._session.wait_for_task(task, instance_id) except self.XenAPI.Failure, exc: LOG.exception(exc) + LOG.debug(_("Instance %(instance_id)s VM destroyed") % locals()) + def destroy(self, instance): """ Destroy VM instance @@ -298,32 +534,37 @@ class VMOps(object): This is the method exposed by xenapi_conn.destroy(). The rest of the destroy_* methods are internal. """ + instance_id = instance.id + LOG.info(_("Destroying VM for Instance %(instance_id)s") % locals()) vm = VMHelper.lookup(self._session, instance.name) return self._destroy(instance, vm, shutdown=True) - def _destroy(self, instance, vm, shutdown=True): + def _destroy(self, instance, vm, shutdown=True, + destroy_kernel_ramdisk=True): """ Destroys VM instance by performing: - 1. A shutdown if requested - 2. Destroying associated VDIs - 3. Destroying that actual VM record + 1. A shutdown if requested + 2. Destroying associated VDIs + 3. Destroying kernel and ramdisk files (if necessary) + 4. Destroying that actual VM record """ if vm is None: - # Don't complain, just return. This lets us clean up instances - # that have already disappeared from the underlying platform. + LOG.warning(_("VM is not present, skipping destroy...")) return if shutdown: self._shutdown(instance, vm) self._destroy_vdis(instance, vm) + if destroy_kernel_ramdisk: + self._destroy_kernel_ramdisk(instance, vm) self._destroy_vm(instance, vm) def _wait_with_callback(self, instance_id, task, callback): ret = None try: - ret = self._session.wait_for_task(instance_id, task) + ret = self._session.wait_for_task(task, instance_id) except self.XenAPI.Failure, exc: LOG.exception(exc) callback(ret) @@ -352,6 +593,78 @@ class VMOps(object): task = self._session.call_xenapi('Async.VM.resume', vm, False, True) self._wait_with_callback(instance.id, task, callback) + def rescue(self, instance, callback): + """Rescue the specified instance + - shutdown the instance VM + - set 'bootlock' to prevent the instance from starting in rescue + - spawn a rescue VM (the vm name-label will be instance-N-rescue) + + """ + rescue_vm = VMHelper.lookup(self._session, instance.name + "-rescue") + if rescue_vm: + raise RuntimeError(_( + "Instance is already in Rescue Mode: %s" % instance.name)) + + vm = self._get_vm_opaque_ref(instance) + self._shutdown(instance, vm) + self._acquire_bootlock(vm) + + instance._rescue = True + self.spawn(instance) + rescue_vm = self._get_vm_opaque_ref(instance) + + vbd = self._session.get_xenapi().VM.get_VBDs(vm)[0] + vdi_ref = self._session.get_xenapi().VBD.get_record(vbd)["VDI"] + vbd_ref = VMHelper.create_vbd( + self._session, + rescue_vm, + vdi_ref, + 1, + False) + + self._session.call_xenapi("Async.VBD.plug", vbd_ref) + + def unrescue(self, instance, callback): + """Unrescue the specified instance + - unplug the instance VM's disk from the rescue VM + - teardown the rescue VM + - release the bootlock to allow the instance VM to start + + """ + rescue_vm = VMHelper.lookup(self._session, instance.name + "-rescue") + + if not rescue_vm: + raise exception.NotFound(_( + "Instance is not in Rescue Mode: %s" % instance.name)) + + original_vm = self._get_vm_opaque_ref(instance) + vbds = self._session.get_xenapi().VM.get_VBDs(rescue_vm) + + instance._rescue = False + + for vbd_ref in vbds: + vbd = self._session.get_xenapi().VBD.get_record(vbd_ref) + if vbd["userdevice"] == "1": + VMHelper.unplug_vbd(self._session, vbd_ref) + VMHelper.destroy_vbd(self._session, vbd_ref) + + task1 = self._session.call_xenapi("Async.VM.hard_shutdown", rescue_vm) + self._session.wait_for_task(task1, instance.id) + + vdis = VMHelper.lookup_vm_vdis(self._session, rescue_vm) + for vdi in vdis: + try: + task = self._session.call_xenapi('Async.VDI.destroy', vdi) + self._session.wait_for_task(task, instance.id) + except self.XenAPI.Failure: + continue + + task2 = self._session.call_xenapi('Async.VM.destroy', rescue_vm) + self._session.wait_for_task(task2, instance.id) + + self._release_bootlock(original_vm) + self._start(instance, original_vm) + def get_info(self, instance): """Return data about VM instance""" vm = self._get_vm_opaque_ref(instance) @@ -374,6 +687,102 @@ class VMOps(object): # TODO: implement this! return 'http://fakeajaxconsole/fake_url' + def inject_network_info(self, instance): + """ + Generate the network info and make calls to place it into the + xenstore and the xenstore param list + + """ + # TODO(tr3buchet) - remove comment in multi-nic + # I've decided to go ahead and consider multiple IPs and networks + # at this stage even though they aren't implemented because these will + # be needed for multi-nic and there was no sense writing it for single + # network/single IP and then having to turn around and re-write it + vm_opaque_ref = self._get_vm_opaque_ref(instance.id) + logging.debug(_("injecting network info to xenstore for vm: |%s|"), + vm_opaque_ref) + admin_context = context.get_admin_context() + IPs = db.fixed_ip_get_all_by_instance(admin_context, instance['id']) + networks = db.network_get_all_by_instance(admin_context, + instance['id']) + for network in networks: + network_IPs = [ip for ip in IPs if ip.network_id == network.id] + + def ip_dict(ip): + return { + "ip": ip.address, + "netmask": network["netmask"], + "enabled": "1"} + + def ip6_dict(ip6): + return { + "ip": ip6.addressV6, + "netmask": ip6.netmaskV6, + "gateway": ip6.gatewayV6, + "enabled": "1"} + + mac_id = instance.mac_address.replace(':', '') + location = 'vm-data/networking/%s' % mac_id + mapping = { + 'label': network['label'], + 'gateway': network['gateway'], + 'mac': instance.mac_address, + 'dns': [network['dns']], + 'ips': [ip_dict(ip) for ip in network_IPs], + 'ip6s': [ip6_dict(ip) for ip in network_IPs]} + + self.write_to_param_xenstore(vm_opaque_ref, {location: mapping}) + + try: + self.write_to_xenstore(vm_opaque_ref, location, + mapping['location']) + except KeyError: + # catch KeyError for domid if instance isn't running + pass + + return networks + + def create_vifs(self, instance, networks=None): + """ + Creates vifs for an instance + + """ + vm_opaque_ref = self._get_vm_opaque_ref(instance.id) + logging.debug(_("creating vif(s) for vm: |%s|"), vm_opaque_ref) + if networks is None: + networks = db.network_get_all_by_instance(admin_context, + instance['id']) + # TODO(tr3buchet) - remove comment in multi-nic + # this bit here about creating the vifs will be updated + # in multi-nic to handle multiple IPs on the same network + # and multiple networks + # for now it works as there is only one of each + for network in networks: + bridge = network['bridge'] + network_ref = \ + NetworkHelper.find_network_with_bridge(self._session, bridge) + + if network_ref: + try: + device = "1" if instance._rescue else "0" + except AttributeError: + device = "0" + + VMHelper.create_vif( + self._session, + vm_opaque_ref, + network_ref, + instance.mac_address, + device) + + def reset_network(self, instance): + """ + Creates uuid arg to pass to make_agent_call and calls it. + + """ + args = {'id': str(uuid.uuid4())} + resp = self._make_agent_call('resetnetwork', instance, '', args) + def list_from_xenstore(self, vm, path): """Runs the xenstore-ls command to get a listing of all records from 'path' downward. Returns a dict with the sub-paths as keys, @@ -434,7 +843,7 @@ class VMOps(object): args.update(addl_args) try: task = self._session.async_call_plugin(plugin, method, args) - ret = self._session.wait_for_task(instance_id, task) + ret = self._session.wait_for_task(task, instance_id) except self.XenAPI.Failure, e: ret = None err_trace = e.details[-1] @@ -443,6 +852,11 @@ class VMOps(object): if 'TIMEOUT:' in err_msg: LOG.error(_('TIMEOUT: The call to %(method)s timed out. ' 'VM id=%(instance_id)s; args=%(strargs)s') % locals()) + elif 'NOT IMPLEMENTED:' in err_msg: + LOG.error(_('NOT IMPLEMENTED: The call to %(method)s is not' + ' supported by the agent. VM id=%(instance_id)s;' + ' args=%(strargs)s') % locals()) + raise NotImplementedError(err_msg) else: LOG.error(_('The call to %(method)s returned an error: %(e)s. ' 'VM id=%(instance_id)s; args=%(strargs)s') % locals()) diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index d89a6f995..757ecf5ad 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -83,7 +83,7 @@ class VolumeOps(object): try: task = self._session.call_xenapi('Async.VBD.plug', vbd_ref) - self._session.wait_for_task(vol_rec['deviceNumber'], task) + self._session.wait_for_task(task, vol_rec['deviceNumber']) except self.XenAPI.Failure, exc: LOG.exception(exc) VolumeHelper.destroy_iscsi_storage(self._session, diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index a0b0499b8..b63a5f8c3 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -100,6 +100,8 @@ flags.DEFINE_integer('xenapi_vhd_coalesce_max_attempts', 5, 'Max number of times to poll for VHD to coalesce.' ' Used only if connection_type=xenapi.') +flags.DEFINE_string('xenapi_sr_base_path', '/var/run/sr-mount', + 'Base path to the storage repository') flags.DEFINE_string('target_host', None, 'iSCSI Target Host') @@ -156,10 +158,20 @@ class XenAPIConnection(object): """Create VM instance""" self._vmops.spawn(instance) + def finish_resize(self, instance, disk_info): + """Completes a resize, turning on the migrated instance""" + vdi_uuid = self._vmops.attach_disk(instance, disk_info['base_copy'], + disk_info['cow']) + self._vmops._spawn_with_disk(instance, vdi_uuid) + def snapshot(self, instance, image_id): """ Create snapshot from a running VM instance """ self._vmops.snapshot(instance, image_id) + def resize(self, instance, flavor): + """Resize a VM instance""" + raise NotImplementedError() + def reboot(self, instance): """Reboot VM instance""" self._vmops.reboot(instance) @@ -168,6 +180,12 @@ class XenAPIConnection(object): """Set the root/admin password on the VM instance""" self._vmops.set_admin_password(instance, new_pass) + def inject_file(self, instance, b64_path, b64_contents): + """Create a file on the VM instance. The file path and contents + should be base64-encoded. + """ + self._vmops.inject_file(instance, b64_path, b64_contents) + def destroy(self, instance): """Destroy VM instance""" self._vmops.destroy(instance) @@ -180,6 +198,11 @@ class XenAPIConnection(object): """Unpause paused VM instance""" self._vmops.unpause(instance, callback) + def migrate_disk_and_power_off(self, instance, dest): + """Transfers the VHD of a running instance to another host, then shuts + off the instance copies over the COW disk""" + return self._vmops.migrate_disk_and_power_off(instance, dest) + def suspend(self, instance, callback): """suspend the specified instance""" self._vmops.suspend(instance, callback) @@ -188,6 +211,22 @@ class XenAPIConnection(object): """resume the specified instance""" self._vmops.resume(instance, callback) + def rescue(self, instance, callback): + """Rescue the specified instance""" + self._vmops.rescue(instance, callback) + + def unrescue(self, instance, callback): + """Unrescue the specified instance""" + self._vmops.unrescue(instance, callback) + + def reset_network(self, instance): + """reset networking for specified instance""" + self._vmops.reset_network(instance) + + def inject_network_info(self, instance): + """inject network info for specified instance""" + self._vmops.inject_network_info(instance) + def get_info(self, instance_id): """Return data about VM instance""" return self._vmops.get_info(instance_id) @@ -204,6 +243,10 @@ class XenAPIConnection(object): """Return link to instance's ajax console""" return self._vmops.get_ajax_console(instance) + def get_host_ip_addr(self): + xs_url = urlparse.urlparse(FLAGS.xenapi_connection_url) + return xs_url.netloc + def attach_volume(self, instance_name, device_path, mountpoint): """Attach volume storage to VM instance""" return self._volumeops.attach_volume(instance_name, @@ -263,7 +306,7 @@ class XenAPISession(object): self._session.xenapi.Async.host.call_plugin, self.get_xenapi_host(), plugin, fn, args) - def wait_for_task(self, id, task): + def wait_for_task(self, task, id=None): """Return the result of the given task. The task is polled until it completes. Not re-entrant.""" done = event.Event() @@ -290,10 +333,11 @@ class XenAPISession(object): try: name = self._session.xenapi.task.get_name_label(task) status = self._session.xenapi.task.get_status(task) - action = dict( - instance_id=int(id), - action=name[0:255], # Ensure action is never > 255 - error=None) + if id: + action = dict( + instance_id=int(id), + action=name[0:255], # Ensure action is never > 255 + error=None) if status == "pending": return elif status == "success": @@ -307,7 +351,9 @@ class XenAPISession(object): LOG.warn(_("Task [%(name)s] %(task)s status:" " %(status)s %(error_info)s") % locals()) done.send_exception(self.XenAPI.Failure(error_info)) - db.instance_action_create(context.get_admin_context(), action) + + if id: + db.instance_action_create(context.get_admin_context(), action) except self.XenAPI.Failure, exc: LOG.warn(exc) done.send_exception(*sys.exc_info()) diff --git a/nova/volume/api.py b/nova/volume/api.py index 478c83486..2f4494845 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -49,7 +49,7 @@ class API(base.Base): options = { 'size': size, - 'user_id': context.user.id, + 'user_id': context.user_id, 'project_id': context.project_id, 'availability_zone': FLAGS.storage_availability_zone, 'status': "creating", @@ -85,7 +85,7 @@ class API(base.Base): return self.db.volume_get(context, volume_id) def get_all(self, context): - if context.user.is_admin(): + if context.is_admin: return self.db.volume_get_all(context) return self.db.volume_get_all_by_project(context, context.project_id) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index da7307733..45cc800e7 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -21,6 +21,7 @@ Drivers for volumes. """ import time +import os from nova import exception from nova import flags @@ -36,6 +37,8 @@ flags.DEFINE_string('aoe_eth_dev', 'eth0', 'Which device to export the volumes on') flags.DEFINE_string('num_shell_tries', 3, 'number of times to attempt to run flakey shell commands') +flags.DEFINE_string('num_iscsi_scan_tries', 3, + 'number of times to rescan iSCSI target to find volume') flags.DEFINE_integer('num_shelves', 100, 'Number of vblade shelves') @@ -62,14 +65,14 @@ class VolumeDriver(object): self._execute = execute self._sync_exec = sync_exec - def _try_execute(self, command): + def _try_execute(self, *command): # NOTE(vish): Volume commands can partially fail due to timing, but # running them a second time on failure will usually # recover nicely. tries = 0 while True: try: - self._execute(command) + self._execute(*command) return True except exception.ProcessExecutionError: tries = tries + 1 @@ -81,34 +84,35 @@ class VolumeDriver(object): def check_for_setup_error(self): """Returns an error if prerequisites aren't met""" - out, err = self._execute("sudo vgs --noheadings -o name") + out, err = self._execute('sudo', 'vgs', '--noheadings', '-o', 'name') volume_groups = out.split() if not FLAGS.volume_group in volume_groups: raise exception.Error(_("volume group %s doesn't exist") % FLAGS.volume_group) def create_volume(self, volume): - """Creates a logical volume.""" + """Creates a logical volume. Can optionally return a Dictionary of + changes to the volume object to be persisted.""" if int(volume['size']) == 0: sizestr = '100M' else: sizestr = '%sG' % volume['size'] - self._try_execute("sudo lvcreate -L %s -n %s %s" % - (sizestr, + self._try_execute('sudo', 'lvcreate', '-L', sizestr, '-n', volume['name'], - FLAGS.volume_group)) + FLAGS.volume_group) def delete_volume(self, volume): """Deletes a logical volume.""" try: - self._try_execute("sudo lvdisplay %s/%s" % + self._try_execute('sudo', 'lvdisplay', + '%s/%s' % (FLAGS.volume_group, volume['name'])) except Exception as e: # If the volume isn't present, then don't attempt to delete return True - self._try_execute("sudo lvremove -f %s/%s" % + self._try_execute('sudo', 'lvremove', '-f', "%s/%s" % (FLAGS.volume_group, volume['name'])) @@ -123,7 +127,8 @@ class VolumeDriver(object): raise NotImplementedError() def create_export(self, context, volume): - """Exports the volume.""" + """Exports the volume. Can optionally return a Dictionary of changes + to the volume object to be persisted.""" raise NotImplementedError() def remove_export(self, context, volume): @@ -163,12 +168,13 @@ class AOEDriver(VolumeDriver): blade_id) = self.db.volume_allocate_shelf_and_blade(context, volume['id']) self._try_execute( - "sudo vblade-persist setup %s %s %s /dev/%s/%s" % - (shelf_id, + 'sudo', 'vblade-persist', 'setup', + shelf_id, blade_id, FLAGS.aoe_eth_dev, - FLAGS.volume_group, - volume['name'])) + "/dev/%s/%s" % + (FLAGS.volume_group, + volume['name'])) # NOTE(vish): The standard _try_execute does not work here # because these methods throw errors if other # volumes on this host are in the process of @@ -177,9 +183,9 @@ class AOEDriver(VolumeDriver): # just wait a bit for the current volume to # be ready and ignore any errors. time.sleep(2) - self._execute("sudo vblade-persist auto all", + self._execute('sudo', 'vblade-persist', 'auto', 'all', check_exit_code=False) - self._execute("sudo vblade-persist start all", + self._execute('sudo', 'vblade-persist', 'start', 'all', check_exit_code=False) def remove_export(self, context, volume): @@ -187,15 +193,15 @@ class AOEDriver(VolumeDriver): (shelf_id, blade_id) = self.db.volume_get_shelf_and_blade(context, volume['id']) - self._try_execute("sudo vblade-persist stop %s %s" % - (shelf_id, blade_id)) - self._try_execute("sudo vblade-persist destroy %s %s" % - (shelf_id, blade_id)) + self._try_execute('sudo', 'vblade-persist', 'stop', + shelf_id, blade_id) + self._try_execute('sudo', 'vblade-persist', 'destroy', + shelf_id, blade_id) def discover_volume(self, _volume): """Discover volume on a remote host.""" - self._execute("sudo aoe-discover") - self._execute("sudo aoe-stat", check_exit_code=False) + self._execute('sudo', 'aoe-discover') + self._execute('sudo', 'aoe-stat', check_exit_code=False) def undiscover_volume(self, _volume): """Undiscover volume on a remote host.""" @@ -222,7 +228,18 @@ class FakeAOEDriver(AOEDriver): class ISCSIDriver(VolumeDriver): - """Executes commands relating to ISCSI volumes.""" + """Executes commands relating to ISCSI volumes. + + We make use of model provider properties as follows: + + :provider_location: if present, contains the iSCSI target information + in the same format as an ietadm discovery + i.e. '<ip>:<port>,<portal> <target IQN>' + + :provider_auth: if present, contains a space-separated triple: + '<auth method> <auth username> <auth password>'. + `CHAP` is the only auth_method in use at the moment. + """ def ensure_export(self, context, volume): """Synchronously recreates an export for a logical volume.""" @@ -236,13 +253,16 @@ class ISCSIDriver(VolumeDriver): iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name']) volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name']) - self._sync_exec("sudo ietadm --op new " - "--tid=%s --params Name=%s" % - (iscsi_target, iscsi_name), + self._sync_exec('sudo', 'ietadm', '--op', 'new', + "--tid=%s" % iscsi_target, + '--params', + "Name=%s" % iscsi_name, check_exit_code=False) - self._sync_exec("sudo ietadm --op new --tid=%s " - "--lun=0 --params Path=%s,Type=fileio" % - (iscsi_target, volume_path), + self._sync_exec('sudo', 'ietadm', '--op', 'new', + "--tid=%s" % iscsi_target, + '--lun=0', + '--params', + "Path=%s,Type=fileio" % volume_path, check_exit_code=False) def _ensure_iscsi_targets(self, context, host): @@ -263,12 +283,13 @@ class ISCSIDriver(VolumeDriver): volume['host']) iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name']) volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name']) - self._execute("sudo ietadm --op new " - "--tid=%s --params Name=%s" % + self._execute('sudo', 'ietadm', '--op', 'new', + '--tid=%s --params Name=%s' % (iscsi_target, iscsi_name)) - self._execute("sudo ietadm --op new --tid=%s " - "--lun=0 --params Path=%s,Type=fileio" % - (iscsi_target, volume_path)) + self._execute('sudo', 'ietadm', '--op', 'new', + '--tid=%s' % iscsi_target, + '--lun=0', '--params', + 'Path=%s,Type=fileio' % volume_path) def remove_export(self, context, volume): """Removes an export for a logical volume.""" @@ -283,51 +304,162 @@ class ISCSIDriver(VolumeDriver): try: # ietadm show will exit with an error # this export has already been removed - self._execute("sudo ietadm --op show --tid=%s " % iscsi_target) + self._execute('sudo', 'ietadm', '--op', 'show', + '--tid=%s' % iscsi_target) except Exception as e: LOG.info(_("Skipping remove_export. No iscsi_target " + "is presently exported for volume: %d"), volume['id']) return - self._execute("sudo ietadm --op delete --tid=%s " - "--lun=0" % iscsi_target) - self._execute("sudo ietadm --op delete --tid=%s" % - iscsi_target) + self._execute('sudo', 'ietadm', '--op', 'delete', + '--tid=%s' % iscsi_target, + '--lun=0') + self._execute('sudo', 'ietadm', '--op', 'delete', + '--tid=%s' % iscsi_target) + + def _do_iscsi_discovery(self, volume): + #TODO(justinsb): Deprecate discovery and use stored info + #NOTE(justinsb): Discovery won't work with CHAP-secured targets (?) + LOG.warn(_("ISCSI provider_location not stored, using discovery")) + + volume_name = volume['name'] - def _get_name_and_portal(self, volume_name, host): - """Gets iscsi name and portal from volume name and host.""" - (out, _err) = self._execute("sudo iscsiadm -m discovery -t " - "sendtargets -p %s" % host) + (out, _err) = self._execute('sudo', 'iscsiadm', '-m', 'discovery', + '-t', 'sendtargets', '-p', volume['host']) for target in out.splitlines(): if FLAGS.iscsi_ip_prefix in target and volume_name in target: - (location, _sep, iscsi_name) = target.partition(" ") - break - iscsi_portal = location.split(",")[0] - return (iscsi_name, iscsi_portal) + return target + return None + + def _get_iscsi_properties(self, volume): + """Gets iscsi configuration + + We ideally get saved information in the volume entity, but fall back + to discovery if need be. Discovery may be completely removed in future + The properties are: + + :target_discovered: boolean indicating whether discovery was used + + :target_iqn: the IQN of the iSCSI target + + :target_portal: the portal of the iSCSI target + + :auth_method:, :auth_username:, :auth_password: + + the authentication details. Right now, either auth_method is not + present meaning no authentication, or auth_method == `CHAP` + meaning use CHAP with the specified credentials. + """ + + properties = {} + + location = volume['provider_location'] + + if location: + # provider_location is the same format as iSCSI discovery output + properties['target_discovered'] = False + else: + location = self._do_iscsi_discovery(volume) + + if not location: + raise exception.Error(_("Could not find iSCSI export " + " for volume %s") % + (volume['name'])) + + LOG.debug(_("ISCSI Discovery: Found %s") % (location)) + properties['target_discovered'] = True + + (iscsi_target, _sep, iscsi_name) = location.partition(" ") + + iscsi_portal = iscsi_target.split(",")[0] + + properties['target_iqn'] = iscsi_name + properties['target_portal'] = iscsi_portal + + auth = volume['provider_auth'] + + if auth: + (auth_method, auth_username, auth_secret) = auth.split() + + properties['auth_method'] = auth_method + properties['auth_username'] = auth_username + properties['auth_password'] = auth_secret + + return properties + + def _run_iscsiadm(self, iscsi_properties, iscsi_command): + command = ("sudo iscsiadm -m node -T %s -p %s %s" % + (iscsi_properties['target_iqn'], + iscsi_properties['target_portal'], + iscsi_command)) + (out, err) = self._execute(command) + LOG.debug("iscsiadm %s: stdout=%s stderr=%s" % + (iscsi_command, out, err)) + return (out, err) + + def _iscsiadm_update(self, iscsi_properties, property_key, property_value): + iscsi_command = ("--op update -n %s -v %s" % + (property_key, property_value)) + return self._run_iscsiadm(iscsi_properties, iscsi_command) def discover_volume(self, volume): """Discover volume on a remote host.""" - iscsi_name, iscsi_portal = self._get_name_and_portal(volume['name'], - volume['host']) - self._execute("sudo iscsiadm -m node -T %s -p %s --login" % - (iscsi_name, iscsi_portal)) - self._execute("sudo iscsiadm -m node -T %s -p %s --op update " - "-n node.startup -v automatic" % - (iscsi_name, iscsi_portal)) - return "/dev/disk/by-path/ip-%s-iscsi-%s-lun-0" % (iscsi_portal, - iscsi_name) + iscsi_properties = self._get_iscsi_properties(volume) + + if not iscsi_properties['target_discovered']: + self._run_iscsiadm(iscsi_properties, "--op new") + + if iscsi_properties.get('auth_method'): + self._iscsiadm_update(iscsi_properties, + "node.session.auth.authmethod", + iscsi_properties['auth_method']) + self._iscsiadm_update(iscsi_properties, + "node.session.auth.username", + iscsi_properties['auth_username']) + self._iscsiadm_update(iscsi_properties, + "node.session.auth.password", + iscsi_properties['auth_password']) + + self._run_iscsiadm(iscsi_properties, "--login") + + self._iscsiadm_update(iscsi_properties, "node.startup", "automatic") + + mount_device = ("/dev/disk/by-path/ip-%s-iscsi-%s-lun-0" % + (iscsi_properties['target_portal'], + iscsi_properties['target_iqn'])) + + # The /dev/disk/by-path/... node is not always present immediately + # TODO(justinsb): This retry-with-delay is a pattern, move to utils? + tries = 0 + while not os.path.exists(mount_device): + if tries >= FLAGS.num_iscsi_scan_tries: + raise exception.Error(_("iSCSI device not found at %s") % + (mount_device)) + + LOG.warn(_("ISCSI volume not yet found at: %(mount_device)s. " + "Will rescan & retry. Try number: %(tries)s") % + locals()) + + # The rescan isn't documented as being necessary(?), but it helps + self._run_iscsiadm(iscsi_properties, "--rescan") + + tries = tries + 1 + if not os.path.exists(mount_device): + time.sleep(tries ** 2) + + if tries != 0: + LOG.debug(_("Found iSCSI node %(mount_device)s " + "(after %(tries)s rescans)") % + locals()) + + return mount_device def undiscover_volume(self, volume): """Undiscover volume on a remote host.""" - iscsi_name, iscsi_portal = self._get_name_and_portal(volume['name'], - volume['host']) - self._execute("sudo iscsiadm -m node -T %s -p %s --op update " - "-n node.startup -v manual" % - (iscsi_name, iscsi_portal)) - self._execute("sudo iscsiadm -m node -T %s -p %s --logout " % - (iscsi_name, iscsi_portal)) - self._execute("sudo iscsiadm -m node --op delete " - "--targetname %s" % iscsi_name) + iscsi_properties = self._get_iscsi_properties(volume) + self._iscsiadm_update(iscsi_properties, "node.startup", "manual") + self._run_iscsiadm(iscsi_properties, "--logout") + self._run_iscsiadm(iscsi_properties, "--op delete") class FakeISCSIDriver(ISCSIDriver): @@ -353,7 +485,7 @@ class RBDDriver(VolumeDriver): def check_for_setup_error(self): """Returns an error if prerequisites aren't met""" - (stdout, stderr) = self._execute("rados lspools") + (stdout, stderr) = self._execute('rados', 'lspools') pools = stdout.split("\n") if not FLAGS.rbd_pool in pools: raise exception.Error(_("rbd has no pool %s") % @@ -365,16 +497,13 @@ class RBDDriver(VolumeDriver): size = 100 else: size = int(volume['size']) * 1024 - self._try_execute("rbd --pool %s --size %d create %s" % - (FLAGS.rbd_pool, - size, - volume['name'])) + self._try_execute('rbd', '--pool', FLAGS.rbd_pool, + '--size', size, 'create', volume['name']) def delete_volume(self, volume): """Deletes a logical volume.""" - self._try_execute("rbd --pool %s rm %s" % - (FLAGS.rbd_pool, - volume['name'])) + self._try_execute('rbd', '--pool', FLAGS.rbd_pool, + 'rm', voluname['name']) def local_path(self, volume): """Returns the path of the rbd volume.""" @@ -409,7 +538,7 @@ class SheepdogDriver(VolumeDriver): def check_for_setup_error(self): """Returns an error if prerequisites aren't met""" try: - (out, err) = self._execute("collie cluster info") + (out, err) = self._execute('collie', 'cluster', 'info') if not out.startswith('running'): raise exception.Error(_("Sheepdog is not working: %s") % out) except exception.ProcessExecutionError: @@ -421,12 +550,13 @@ class SheepdogDriver(VolumeDriver): sizestr = '100M' else: sizestr = '%sG' % volume['size'] - self._try_execute("qemu-img create sheepdog:%s %s" % - (volume['name'], sizestr)) + self._try_execute('qemu-img', 'create', + "sheepdog:%s" % volume['name'], + sizestr) def delete_volume(self, volume): """Deletes a logical volume""" - self._try_execute("collie vdi delete %s" % volume['name']) + self._try_execute('collie', 'vdi', 'delete', volume['name']) def local_path(self, volume): return "sheepdog:%s" % volume['name'] diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 6f8e25e19..3e8bc16b3 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -87,7 +87,7 @@ class VolumeManager(manager.Manager): if volume['status'] in ['available', 'in-use']: self.driver.ensure_export(ctxt, volume) else: - LOG.info(_("volume %s: skipping export"), volume_ref['name']) + LOG.info(_("volume %s: skipping export"), volume['name']) def create_volume(self, context, volume_id): """Creates and exports the volume.""" @@ -107,14 +107,18 @@ class VolumeManager(manager.Manager): vol_size = volume_ref['size'] LOG.debug(_("volume %(vol_name)s: creating lv of" " size %(vol_size)sG") % locals()) - self.driver.create_volume(volume_ref) + model_update = self.driver.create_volume(volume_ref) + if model_update: + self.db.volume_update(context, volume_ref['id'], model_update) LOG.debug(_("volume %s: creating export"), volume_ref['name']) - self.driver.create_export(context, volume_ref) - except Exception as e: + model_update = self.driver.create_export(context, volume_ref) + if model_update: + self.db.volume_update(context, volume_ref['id'], model_update) + except Exception: self.db.volume_update(context, volume_ref['id'], {'status': 'error'}) - raise e + raise now = datetime.datetime.utcnow() self.db.volume_update(context, @@ -137,11 +141,11 @@ class VolumeManager(manager.Manager): self.driver.remove_export(context, volume_ref) LOG.debug(_("volume %s: deleting"), volume_ref['name']) self.driver.delete_volume(volume_ref) - except Exception as e: + except Exception: self.db.volume_update(context, volume_ref['id'], {'status': 'error_deleting'}) - raise e + raise self.db.volume_destroy(context, volume_id) LOG.debug(_("volume %s: deleted successfully"), volume_ref['name']) diff --git a/nova/volume/san.py b/nova/volume/san.py new file mode 100644 index 000000000..9532c8116 --- /dev/null +++ b/nova/volume/san.py @@ -0,0 +1,585 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Drivers for san-stored volumes. + +The unique thing about a SAN is that we don't expect that we can run the volume +controller on the SAN hardware. We expect to access it over SSH or some API. +""" + +import os +import paramiko + +from xml.etree import ElementTree + +from nova import exception +from nova import flags +from nova import log as logging +from nova.utils import ssh_execute +from nova.volume.driver import ISCSIDriver + +LOG = logging.getLogger("nova.volume.driver") +FLAGS = flags.FLAGS +flags.DEFINE_boolean('san_thin_provision', 'true', + 'Use thin provisioning for SAN volumes?') +flags.DEFINE_string('san_ip', '', + 'IP address of SAN controller') +flags.DEFINE_string('san_login', 'admin', + 'Username for SAN controller') +flags.DEFINE_string('san_password', '', + 'Password for SAN controller') +flags.DEFINE_string('san_privatekey', '', + 'Filename of private key to use for SSH authentication') +flags.DEFINE_string('san_clustername', '', + 'Cluster name to use for creating volumes') +flags.DEFINE_integer('san_ssh_port', 22, + 'SSH port to use with SAN') + + +class SanISCSIDriver(ISCSIDriver): + """ Base class for SAN-style storage volumes + + A SAN-style storage value is 'different' because the volume controller + probably won't run on it, so we need to access is over SSH or another + remote protocol. + """ + + def _build_iscsi_target_name(self, volume): + return "%s%s" % (FLAGS.iscsi_target_prefix, volume['name']) + + # discover_volume is still OK + # undiscover_volume is still OK + + def _connect_to_ssh(self): + ssh = paramiko.SSHClient() + #TODO(justinsb): We need a better SSH key policy + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + if FLAGS.san_password: + ssh.connect(FLAGS.san_ip, + port=FLAGS.san_ssh_port, + username=FLAGS.san_login, + password=FLAGS.san_password) + elif FLAGS.san_privatekey: + privatekeyfile = os.path.expanduser(FLAGS.san_privatekey) + # It sucks that paramiko doesn't support DSA keys + privatekey = paramiko.RSAKey.from_private_key_file(privatekeyfile) + ssh.connect(FLAGS.san_ip, + port=FLAGS.san_ssh_port, + username=FLAGS.san_login, + pkey=privatekey) + else: + raise exception.Error(_("Specify san_password or san_privatekey")) + return ssh + + def _run_ssh(self, command, check_exit_code=True): + #TODO(justinsb): SSH connection caching (?) + ssh = self._connect_to_ssh() + + #TODO(justinsb): Reintroduce the retry hack + ret = ssh_execute(ssh, command, check_exit_code=check_exit_code) + + ssh.close() + + return ret + + def ensure_export(self, context, volume): + """Synchronously recreates an export for a logical volume.""" + pass + + def create_export(self, context, volume): + """Exports the volume.""" + pass + + def remove_export(self, context, volume): + """Removes an export for a logical volume.""" + pass + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met""" + if not (FLAGS.san_password or FLAGS.san_privatekey): + raise exception.Error(_("Specify san_password or san_privatekey")) + + if not (FLAGS.san_ip): + raise exception.Error(_("san_ip must be set")) + + +def _collect_lines(data): + """ Split lines from data into an array, trimming them """ + matches = [] + for line in data.splitlines(): + match = line.strip() + matches.append(match) + + return matches + + +def _get_prefixed_values(data, prefix): + """Collect lines which start with prefix; with trimming""" + matches = [] + for line in data.splitlines(): + line = line.strip() + if line.startswith(prefix): + match = line[len(prefix):] + match = match.strip() + matches.append(match) + + return matches + + +class SolarisISCSIDriver(SanISCSIDriver): + """Executes commands relating to Solaris-hosted ISCSI volumes. + + Basic setup for a Solaris iSCSI server: + + pkg install storage-server SUNWiscsit + + svcadm enable stmf + + svcadm enable -r svc:/network/iscsi/target:default + + pfexec itadm create-tpg e1000g0 ${MYIP} + + pfexec itadm create-target -t e1000g0 + + + Then grant the user that will be logging on lots of permissions. + I'm not sure exactly which though: + + zfs allow justinsb create,mount,destroy rpool + + usermod -P'File System Management' justinsb + + usermod -P'Primary Administrator' justinsb + + Also make sure you can login using san_login & san_password/san_privatekey + """ + + def _view_exists(self, luid): + cmd = "pfexec /usr/sbin/stmfadm list-view -l %s" % (luid) + (out, _err) = self._run_ssh(cmd, + check_exit_code=False) + if "no views found" in out: + return False + + if "View Entry:" in out: + return True + + raise exception.Error("Cannot parse list-view output: %s" % (out)) + + def _get_target_groups(self): + """Gets list of target groups from host.""" + (out, _err) = self._run_ssh("pfexec /usr/sbin/stmfadm list-tg") + matches = _get_prefixed_values(out, 'Target group: ') + LOG.debug("target_groups=%s" % matches) + return matches + + def _target_group_exists(self, target_group_name): + return target_group_name not in self._get_target_groups() + + def _get_target_group_members(self, target_group_name): + (out, _err) = self._run_ssh("pfexec /usr/sbin/stmfadm list-tg -v %s" % + (target_group_name)) + matches = _get_prefixed_values(out, 'Member: ') + LOG.debug("members of %s=%s" % (target_group_name, matches)) + return matches + + def _is_target_group_member(self, target_group_name, iscsi_target_name): + return iscsi_target_name in ( + self._get_target_group_members(target_group_name)) + + def _get_iscsi_targets(self): + cmd = ("pfexec /usr/sbin/itadm list-target | " + "awk '{print $1}' | grep -v ^TARGET") + (out, _err) = self._run_ssh(cmd) + matches = _collect_lines(out) + LOG.debug("_get_iscsi_targets=%s" % (matches)) + return matches + + def _iscsi_target_exists(self, iscsi_target_name): + return iscsi_target_name in self._get_iscsi_targets() + + def _build_zfs_poolname(self, volume): + #TODO(justinsb): rpool should be configurable + zfs_poolname = 'rpool/%s' % (volume['name']) + return zfs_poolname + + def create_volume(self, volume): + """Creates a volume.""" + if int(volume['size']) == 0: + sizestr = '100M' + else: + sizestr = '%sG' % volume['size'] + + zfs_poolname = self._build_zfs_poolname(volume) + + thin_provision_arg = '-s' if FLAGS.san_thin_provision else '' + # Create a zfs volume + self._run_ssh("pfexec /usr/sbin/zfs create %s -V %s %s" % + (thin_provision_arg, + sizestr, + zfs_poolname)) + + def _get_luid(self, volume): + zfs_poolname = self._build_zfs_poolname(volume) + + cmd = ("pfexec /usr/sbin/sbdadm list-lu | " + "grep -w %s | awk '{print $1}'" % + (zfs_poolname)) + + (stdout, _stderr) = self._run_ssh(cmd) + + luid = stdout.strip() + return luid + + def _is_lu_created(self, volume): + luid = self._get_luid(volume) + return luid + + def delete_volume(self, volume): + """Deletes a volume.""" + zfs_poolname = self._build_zfs_poolname(volume) + self._run_ssh("pfexec /usr/sbin/zfs destroy %s" % + (zfs_poolname)) + + def local_path(self, volume): + # TODO(justinsb): Is this needed here? + escaped_group = FLAGS.volume_group.replace('-', '--') + escaped_name = volume['name'].replace('-', '--') + return "/dev/mapper/%s-%s" % (escaped_group, escaped_name) + + def ensure_export(self, context, volume): + """Synchronously recreates an export for a logical volume.""" + #TODO(justinsb): On bootup, this is called for every volume. + # It then runs ~5 SSH commands for each volume, + # most of which fetch the same info each time + # This makes initial start stupid-slow + self._do_export(volume, force_create=False) + + def create_export(self, context, volume): + self._do_export(volume, force_create=True) + + def _do_export(self, volume, force_create): + # Create a Logical Unit (LU) backed by the zfs volume + zfs_poolname = self._build_zfs_poolname(volume) + + if force_create or not self._is_lu_created(volume): + cmd = ("pfexec /usr/sbin/sbdadm create-lu /dev/zvol/rdsk/%s" % + (zfs_poolname)) + self._run_ssh(cmd) + + luid = self._get_luid(volume) + iscsi_name = self._build_iscsi_target_name(volume) + target_group_name = 'tg-%s' % volume['name'] + + # Create a iSCSI target, mapped to just this volume + if force_create or not self._target_group_exists(target_group_name): + self._run_ssh("pfexec /usr/sbin/stmfadm create-tg %s" % + (target_group_name)) + + # Yes, we add the initiatior before we create it! + # Otherwise, it complains that the target is already active + if force_create or not self._is_target_group_member(target_group_name, + iscsi_name): + self._run_ssh("pfexec /usr/sbin/stmfadm add-tg-member -g %s %s" % + (target_group_name, iscsi_name)) + if force_create or not self._iscsi_target_exists(iscsi_name): + self._run_ssh("pfexec /usr/sbin/itadm create-target -n %s" % + (iscsi_name)) + if force_create or not self._view_exists(luid): + self._run_ssh("pfexec /usr/sbin/stmfadm add-view -t %s %s" % + (target_group_name, luid)) + + #TODO(justinsb): Is this always 1? Does it matter? + iscsi_portal_interface = '1' + iscsi_portal = FLAGS.san_ip + ":3260," + iscsi_portal_interface + + db_update = {} + db_update['provider_location'] = ("%s %s" % + (iscsi_portal, + iscsi_name)) + + return db_update + + def remove_export(self, context, volume): + """Removes an export for a logical volume.""" + + # This is the reverse of _do_export + luid = self._get_luid(volume) + iscsi_name = self._build_iscsi_target_name(volume) + target_group_name = 'tg-%s' % volume['name'] + + if self._view_exists(luid): + self._run_ssh("pfexec /usr/sbin/stmfadm remove-view -l %s -a" % + (luid)) + + if self._iscsi_target_exists(iscsi_name): + self._run_ssh("pfexec /usr/sbin/stmfadm offline-target %s" % + (iscsi_name)) + self._run_ssh("pfexec /usr/sbin/itadm delete-target %s" % + (iscsi_name)) + + # We don't delete the tg-member; we delete the whole tg! + + if self._target_group_exists(target_group_name): + self._run_ssh("pfexec /usr/sbin/stmfadm delete-tg %s" % + (target_group_name)) + + if self._is_lu_created(volume): + self._run_ssh("pfexec /usr/sbin/sbdadm delete-lu %s" % + (luid)) + + +class HpSanISCSIDriver(SanISCSIDriver): + """Executes commands relating to HP/Lefthand SAN ISCSI volumes. + + We use the CLIQ interface, over SSH. + + Rough overview of CLIQ commands used: + + :createVolume: (creates the volume) + + :getVolumeInfo: (to discover the IQN etc) + + :getClusterInfo: (to discover the iSCSI target IP address) + + :assignVolumeChap: (exports it with CHAP security) + + The 'trick' here is that the HP SAN enforces security by default, so + normally a volume mount would need both to configure the SAN in the volume + layer and do the mount on the compute layer. Multi-layer operations are + not catered for at the moment in the nova architecture, so instead we + share the volume using CHAP at volume creation time. Then the mount need + only use those CHAP credentials, so can take place exclusively in the + compute layer. + """ + + def _cliq_run(self, verb, cliq_args): + """Runs a CLIQ command over SSH, without doing any result parsing""" + cliq_arg_strings = [] + for k, v in cliq_args.items(): + cliq_arg_strings.append(" %s=%s" % (k, v)) + cmd = verb + ''.join(cliq_arg_strings) + + return self._run_ssh(cmd) + + def _cliq_run_xml(self, verb, cliq_args, check_cliq_result=True): + """Runs a CLIQ command over SSH, parsing and checking the output""" + cliq_args['output'] = 'XML' + (out, _err) = self._cliq_run(verb, cliq_args) + + LOG.debug(_("CLIQ command returned %s"), out) + + result_xml = ElementTree.fromstring(out) + if check_cliq_result: + response_node = result_xml.find("response") + if response_node is None: + msg = (_("Malformed response to CLIQ command " + "%(verb)s %(cliq_args)s. Result=%(out)s") % + locals()) + raise exception.Error(msg) + + result_code = response_node.attrib.get("result") + + if result_code != "0": + msg = (_("Error running CLIQ command %(verb)s %(cliq_args)s. " + " Result=%(out)s") % + locals()) + raise exception.Error(msg) + + return result_xml + + def _cliq_get_cluster_info(self, cluster_name): + """Queries for info about the cluster (including IP)""" + cliq_args = {} + cliq_args['clusterName'] = cluster_name + cliq_args['searchDepth'] = '1' + cliq_args['verbose'] = '0' + + result_xml = self._cliq_run_xml("getClusterInfo", cliq_args) + + return result_xml + + def _cliq_get_cluster_vip(self, cluster_name): + """Gets the IP on which a cluster shares iSCSI volumes""" + cluster_xml = self._cliq_get_cluster_info(cluster_name) + + vips = [] + for vip in cluster_xml.findall("response/cluster/vip"): + vips.append(vip.attrib.get('ipAddress')) + + if len(vips) == 1: + return vips[0] + + _xml = ElementTree.tostring(cluster_xml) + msg = (_("Unexpected number of virtual ips for cluster " + " %(cluster_name)s. Result=%(_xml)s") % + locals()) + raise exception.Error(msg) + + def _cliq_get_volume_info(self, volume_name): + """Gets the volume info, including IQN""" + cliq_args = {} + cliq_args['volumeName'] = volume_name + result_xml = self._cliq_run_xml("getVolumeInfo", cliq_args) + + # Result looks like this: + #<gauche version="1.0"> + # <response description="Operation succeeded." name="CliqSuccess" + # processingTime="87" result="0"> + # <volume autogrowPages="4" availability="online" blockSize="1024" + # bytesWritten="0" checkSum="false" clusterName="Cluster01" + # created="2011-02-08T19:56:53Z" deleting="false" description="" + # groupName="Group01" initialQuota="536870912" isPrimary="true" + # iscsiIqn="iqn.2003-10.com.lefthandnetworks:group01:25366:vol-b" + # maxSize="6865387257856" md5="9fa5c8b2cca54b2948a63d833097e1ca" + # minReplication="1" name="vol-b" parity="0" replication="2" + # reserveQuota="536870912" scratchQuota="4194304" + # serialNumber="9fa5c8b2cca54b2948a63d833097e1ca0000000000006316" + # size="1073741824" stridePages="32" thinProvision="true"> + # <status description="OK" value="2"/> + # <permission access="rw" + # authGroup="api-34281B815713B78-(trimmed)51ADD4B7030853AA7" + # chapName="chapusername" chapRequired="true" id="25369" + # initiatorSecret="" iqn="" iscsiEnabled="true" + # loadBalance="true" targetSecret="supersecret"/> + # </volume> + # </response> + #</gauche> + + # Flatten the nodes into a dictionary; use prefixes to avoid collisions + volume_attributes = {} + + volume_node = result_xml.find("response/volume") + for k, v in volume_node.attrib.items(): + volume_attributes["volume." + k] = v + + status_node = volume_node.find("status") + if not status_node is None: + for k, v in status_node.attrib.items(): + volume_attributes["status." + k] = v + + # We only consider the first permission node + permission_node = volume_node.find("permission") + if not permission_node is None: + for k, v in status_node.attrib.items(): + volume_attributes["permission." + k] = v + + LOG.debug(_("Volume info: %(volume_name)s => %(volume_attributes)s") % + locals()) + return volume_attributes + + def create_volume(self, volume): + """Creates a volume.""" + cliq_args = {} + cliq_args['clusterName'] = FLAGS.san_clustername + #TODO(justinsb): Should we default to inheriting thinProvision? + cliq_args['thinProvision'] = '1' if FLAGS.san_thin_provision else '0' + cliq_args['volumeName'] = volume['name'] + if int(volume['size']) == 0: + cliq_args['size'] = '100MB' + else: + cliq_args['size'] = '%sGB' % volume['size'] + + self._cliq_run_xml("createVolume", cliq_args) + + volume_info = self._cliq_get_volume_info(volume['name']) + cluster_name = volume_info['volume.clusterName'] + iscsi_iqn = volume_info['volume.iscsiIqn'] + + #TODO(justinsb): Is this always 1? Does it matter? + cluster_interface = '1' + + cluster_vip = self._cliq_get_cluster_vip(cluster_name) + iscsi_portal = cluster_vip + ":3260," + cluster_interface + + model_update = {} + model_update['provider_location'] = ("%s %s" % + (iscsi_portal, + iscsi_iqn)) + + return model_update + + def delete_volume(self, volume): + """Deletes a volume.""" + cliq_args = {} + cliq_args['volumeName'] = volume['name'] + cliq_args['prompt'] = 'false' # Don't confirm + + self._cliq_run_xml("deleteVolume", cliq_args) + + def local_path(self, volume): + # TODO(justinsb): Is this needed here? + raise exception.Error(_("local_path not supported")) + + def ensure_export(self, context, volume): + """Synchronously recreates an export for a logical volume.""" + return self._do_export(context, volume, force_create=False) + + def create_export(self, context, volume): + return self._do_export(context, volume, force_create=True) + + def _do_export(self, context, volume, force_create): + """Supports ensure_export and create_export""" + volume_info = self._cliq_get_volume_info(volume['name']) + + is_shared = 'permission.authGroup' in volume_info + + model_update = {} + + should_export = False + + if force_create or not is_shared: + should_export = True + # Check that we have a project_id + project_id = volume['project_id'] + if not project_id: + project_id = context.project_id + + if project_id: + #TODO(justinsb): Use a real per-project password here + chap_username = 'proj_' + project_id + # HP/Lefthand requires that the password be >= 12 characters + chap_password = 'project_secret_' + project_id + else: + msg = (_("Could not determine project for volume %s, " + "can't export") % + (volume['name'])) + if force_create: + raise exception.Error(msg) + else: + LOG.warn(msg) + should_export = False + + if should_export: + cliq_args = {} + cliq_args['volumeName'] = volume['name'] + cliq_args['chapName'] = chap_username + cliq_args['targetSecret'] = chap_password + + self._cliq_run_xml("assignVolumeChap", cliq_args) + + model_update['provider_auth'] = ("CHAP %s %s" % + (chap_username, chap_password)) + + return model_update + + def remove_export(self, context, volume): + """Removes an export for a logical volume.""" + cliq_args = {} + cliq_args['volumeName'] = volume['name'] + + self._cliq_run_xml("unassignVolume", cliq_args) diff --git a/nova/wsgi.py b/nova/wsgi.py index e01cc1e1e..2d18da8fb 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -36,6 +36,7 @@ import webob.exc from paste import deploy +from nova import exception from nova import flags from nova import log as logging from nova import utils @@ -59,7 +60,6 @@ class Server(object): """Server class to manage multiple WSGI sockets and applications.""" def __init__(self, threads=1000): - logging.basicConfig() self.pool = eventlet.GreenPool(threads) def start(self, application, port, host='0.0.0.0', backlog=128): @@ -83,6 +83,35 @@ class Server(object): log=WritableLogger(logger)) +class Request(webob.Request): + + def best_match_content_type(self): + """ + Determine the most acceptable content-type based on the + query extension then the Accept header + """ + + parts = self.path.rsplit(".", 1) + + if len(parts) > 1: + format = parts[1] + if format in ["json", "xml"]: + return "application/{0}".format(parts[1]) + + ctypes = ["application/json", "application/xml"] + bm = self.accept.best_match(ctypes) + + return bm or "application/json" + + def get_content_type(self): + try: + ct = self.headers["Content-Type"] + assert ct in ("application/xml", "application/json") + return ct + except Exception: + raise webob.exc.HTTPBadRequest("Invalid content type") + + class Application(object): """Base WSGI application wrapper. Subclasses need to implement __call__.""" @@ -114,7 +143,7 @@ class Application(object): def __call__(self, environ, start_response): r"""Subclasses will probably want to implement __call__ like this: - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): # Any of the following objects work as responses: @@ -200,7 +229,7 @@ class Middleware(Application): """Do whatever you'd like to the response.""" return response - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): response = self.process_request(req) if response: @@ -213,7 +242,7 @@ class Debug(Middleware): """Helper class that can be inserted into any WSGI application chain to get information about the request and response.""" - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): print ("*" * 40) + " REQUEST ENVIRON" for key, value in req.environ.items(): @@ -277,7 +306,7 @@ class Router(object): self._router = routes.middleware.RoutesMiddleware(self._dispatch, self.map) - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): """ Route the incoming request to a controller based on self.map. @@ -286,7 +315,7 @@ class Router(object): return self._router @staticmethod - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=Request) def _dispatch(req): """ Called by self._router after matching the incoming request to a route @@ -305,11 +334,11 @@ class Controller(object): WSGI app that reads routing information supplied by RoutesMiddleware and calls the requested action method upon itself. All action methods must, in addition to their normal parameters, accept a 'req' argument - which is the incoming webob.Request. They raise a webob.exc exception, + which is the incoming wsgi.Request. They raise a webob.exc exception, or return a dict which will be serialized by requested content type. """ - @webob.dec.wsgify + @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): """ Call the method specified in req.environ by RoutesMiddleware. @@ -319,32 +348,45 @@ class Controller(object): method = getattr(self, action) del arg_dict['controller'] del arg_dict['action'] + if 'format' in arg_dict: + del arg_dict['format'] arg_dict['req'] = req result = method(**arg_dict) + if type(result) is dict: - return self._serialize(result, req) + content_type = req.best_match_content_type() + body = self._serialize(result, content_type) + + response = webob.Response() + response.headers["Content-Type"] = content_type + response.body = body + return response + else: return result - def _serialize(self, data, request): + def _serialize(self, data, content_type): """ - Serialize the given dict to the response type requested in request. + Serialize the given dict to the provided content_type. Uses self._serialization_metadata if it exists, which is a dict mapping MIME types to information needed to serialize to that type. """ _metadata = getattr(type(self), "_serialization_metadata", {}) - serializer = Serializer(request.environ, _metadata) - return serializer.to_content_type(data) + serializer = Serializer(_metadata) + try: + return serializer.serialize(data, content_type) + except exception.InvalidContentType: + raise webob.exc.HTTPNotAcceptable() - def _deserialize(self, data, request): + def _deserialize(self, data, content_type): """ - Deserialize the request body to the response type requested in request. + Deserialize the request body to the specefied content type. Uses self._serialization_metadata if it exists, which is a dict mapping MIME types to information needed to serialize to that type. """ _metadata = getattr(type(self), "_serialization_metadata", {}) - serializer = Serializer(request.environ, _metadata) - return serializer.deserialize(data) + serializer = Serializer(_metadata) + return serializer.deserialize(data, content_type) class Serializer(object): @@ -352,50 +394,52 @@ class Serializer(object): Serializes and deserializes dictionaries to certain MIME types. """ - def __init__(self, environ, metadata=None): + def __init__(self, metadata=None): """ Create a serializer based on the given WSGI environment. 'metadata' is an optional dict mapping MIME types to information needed to serialize a dictionary to that type. """ self.metadata = metadata or {} - req = webob.Request.blank('', environ) - suffix = req.path_info.split('.')[-1].lower() - if suffix == 'json': - self.handler = self._to_json - elif suffix == 'xml': - self.handler = self._to_xml - elif 'application/json' in req.accept: - self.handler = self._to_json - elif 'application/xml' in req.accept: - self.handler = self._to_xml - else: - # This is the default - self.handler = self._to_json - def to_content_type(self, data): - """ - Serialize a dictionary into a string. + def _get_serialize_handler(self, content_type): + handlers = { + "application/json": self._to_json, + "application/xml": self._to_xml, + } + + try: + return handlers[content_type] + except Exception: + raise exception.InvalidContentType() - The format of the string will be decided based on the Content Type - requested in self.environ: by Accept: header, or by URL suffix. + def serialize(self, data, content_type): """ - return self.handler(data) + Serialize a dictionary into a string of the specified content type. + """ + return self._get_serialize_handler(content_type)(data) - def deserialize(self, datastring): + def deserialize(self, datastring, content_type): """ Deserialize a string to a dictionary. The string must be in the format of a supported MIME type. """ - datastring = datastring.strip() + return self.get_deserialize_handler(content_type)(datastring) + + def get_deserialize_handler(self, content_type): + handlers = { + "application/json": self._from_json, + "application/xml": self._from_xml, + } + try: - is_xml = (datastring[0] == '<') - if not is_xml: - return utils.loads(datastring) - return self._from_xml(datastring) - except: - return None + return handlers[content_type] + except Exception: + raise exception.InvalidContentType() + + def _from_json(self, datastring): + return utils.loads(datastring) def _from_xml(self, datastring): xmldata = self.metadata.get('application/xml', {}) @@ -515,10 +559,3 @@ def load_paste_app(filename, appname): except LookupError: pass return app - - -def paste_config_to_flags(config, mixins): - for k, v in mixins.iteritems(): - value = config.get(k, v) - converted_value = FLAGS[k].parser.Parse(value) - setattr(FLAGS, k, converted_value) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py b/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py index d60816ce7..d2b2d61e6 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py @@ -30,13 +30,14 @@ import simplejson as json def main(dom_id, command, only_this_vif=None): - xsls = execute("/usr/bin/xenstore-ls /local/domain/%s/vm-data/networking" \ - % dom_id, True) + xsls = execute('/usr/bin/xenstore-ls', + '/local/domain/%s/vm-data/networking' % dom_id, True) macs = [line.split("=")[0].strip() for line in xsls.splitlines()] for mac in macs: - xsr = "/usr/bin/xenstore-read /local/domain/%s/vm-data/networking/%s" - xsread = execute(xsr % (dom_id, mac), True) + xsread = execute('/usr/bin/enstore-read', + '/local/domain/%s/vm-data/networking/%s' % + (dom_id, mac), True) data = json.loads(xsread) for ip in data['ips']: if data["label"] == "public": @@ -51,9 +52,9 @@ def main(dom_id, command, only_this_vif=None): apply_iptables_rules(command, params) -def execute(command, return_stdout=False): +def execute(*command, return_stdout=False): devnull = open(os.devnull, 'w') - proc = subprocess.Popen(command, shell=True, close_fds=True, + proc = subprocess.Popen(command, close_fds=True, stdout=subprocess.PIPE, stderr=devnull) devnull.close() if return_stdout: @@ -67,45 +68,69 @@ def execute(command, return_stdout=False): def apply_iptables_rules(command, params): - iptables = lambda rule: execute("/sbin/iptables %s" % rule) + iptables = lambda *rule: execute('/sbin/iptables', *rule) - iptables("-D FORWARD -m physdev --physdev-in %(VIF)s -s %(IP)s \ - -j ACCEPT" % params) + iptables('-D', 'FORWARD', '-m', 'physdev', + '--physdev-in', '%(VIF)s' % params, + '-s', '%(IP)s' % params, + '-j', 'ACCEPT') if command == 'online': - iptables("-A FORWARD -m physdev --physdev-in %(VIF)s -s %(IP)s \ - -j ACCEPT" % params) + iptables('-A', 'FORWARD', '-m', 'physdev', + '--physdev-in', '%(VIF)s' % params, + '-s', '%(IP)s' % params, + '-j', 'ACCEPT') def apply_arptables_rules(command, params): - arptables = lambda rule: execute("/sbin/arptables %s" % rule) - - arptables("-D FORWARD --opcode Request --in-interface %(VIF)s \ - --source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params) - arptables("-D FORWARD --opcode Reply --in-interface %(VIF)s \ - --source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params) + arptables = lambda *rule: execute('/sbin/arptables', *rule) + + arptables('-D', 'FORWARD', '--opcode', 'Request', + '--in-interface', '%(VIF)s' % params, + '--source-ip', '%(IP)s' % params, + '--source-mac', '%(MAC)s' % params, + '-j', 'ACCEPT') + arptables('-D', 'FORWARD', '--opcode', 'Reply', + '--in-interface', '%(VIF)s' % params, + '--source-ip', '%(IP)s' % params, + '--source-mac', '%(MAC)s' % params, + '-j', 'ACCEPT') if command == 'online': - arptables("-A FORWARD --opcode Request --in-interface %(VIF)s \ - --source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params) - arptables("-A FORWARD --opcode Reply --in-interface %(VIF)s \ - --source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params) + arptables('-A', 'FORWARD', '--opcode', 'Request', + '--in-interface', '%(VIF)s' % params + '--source-ip', '%(IP)s' % params, + '--source-mac', '%(MAC)s' % params, + '-j', 'ACCEPT') + arptables('-A', 'FORWARD', '--opcode', 'Reply', + '--in-interface', '%(VIF)s' % params, + '--source-ip', '%(IP)s' % params, + '--source-mac', '%(MAC)s' % params, + '-j', 'ACCEPT') def apply_ebtables_rules(command, params): - ebtables = lambda rule: execute("/sbin/ebtables %s" % rule) - - ebtables("-D FORWARD -p 0806 -o %(VIF)s --arp-ip-dst %(IP)s -j ACCEPT" % - params) - ebtables("-D FORWARD -p 0800 -o %(VIF)s --ip-dst %(IP)s -j ACCEPT" % - params) + ebtables = lambda *rule: execute("/sbin/ebtables", *rule) + + ebtables('-D', 'FORWARD', '-p', '0806', '-o', params['VIF'], + '--arp-ip-dst', params['IP'], + '-j', 'ACCEPT') + ebtables('-D', 'FORWARD', '-p', '0800', '-o', + params['VIF'], '--ip-dst', params['IP'], + '-j', 'ACCEPT') if command == 'online': - ebtables("-A FORWARD -p 0806 -o %(VIF)s --arp-ip-dst %(IP)s \ - -j ACCEPT" % params) - ebtables("-A FORWARD -p 0800 -o %(VIF)s --ip-dst %(IP)s \ - -j ACCEPT" % params) - - ebtables("-D FORWARD -s ! %(MAC)s -i %(VIF)s -j DROP" % params) + ebtables('-A', 'FORWARD', '-p', '0806', + '-o', params['VIF'], + '--arp-ip-dst', params['IP'], + '-j', 'ACCEPT') + ebtables('-A', 'FORWARD', '-p', '0800', + '-o', params['VIF'], + '--ip-dst', params['IP'], + '-j', 'ACCEPT') + + ebtables('-D', 'FORWARD', '-s', '!', params['MAC'], + '-i', params['VIF'], '-j', 'DROP') if command == 'online': - ebtables("-I FORWARD 1 -s ! %(MAC)s -i %(VIF)s -j DROP" % params) + ebtables('-I', 'FORWARD', '1', '-s', '!', params['MAC'], + '-i', '%(VIF)s', '-j', 'DROP') if __name__ == "__main__": diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent index 12c3a19c8..94eaabe73 100755 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent @@ -73,8 +73,8 @@ def key_init(self, arg_dict): @jsonify def password(self, arg_dict): """Writes a request to xenstore that tells the agent to set - the root password for the given VM. The password should be - encrypted using the shared secret key that was returned by a + the root password for the given VM. The password should be + encrypted using the shared secret key that was returned by a previous call to key_init. The encrypted password value should be passed as the value for the 'enc_pass' key in arg_dict. """ @@ -91,6 +91,17 @@ def password(self, arg_dict): return resp +@jsonify +def resetnetwork(self, arg_dict): + """Writes a resquest to xenstore that tells the agent + to reset networking. + """ + arg_dict['value'] = json.dumps({'name': 'resetnetwork', 'value': ''}) + request_id = arg_dict['id'] + arg_dict['path'] = "data/host/%s" % request_id + xenstore.write_record(self, arg_dict) + + def _wait_for_agent(self, request_id, arg_dict): """Periodically checks xenstore for a response from the agent. The request is always written to 'data/host/{id}', and @@ -108,7 +119,8 @@ def _wait_for_agent(self, request_id, arg_dict): # First, delete the request record arg_dict["path"] = "data/host/%s" % request_id xenstore.delete_record(self, arg_dict) - raise TimeoutError("TIMEOUT: No response from agent within %s seconds." % + raise TimeoutError( + "TIMEOUT: No response from agent within %s seconds." % AGENT_TIMEOUT) ret = xenstore.read_record(self, arg_dict) # Note: the response for None with be a string that includes @@ -123,4 +135,5 @@ def _wait_for_agent(self, request_id, arg_dict): if __name__ == "__main__": XenAPIPlugin.dispatch( {"key_init": key_init, - "password": password}) + "password": password, + "resetnetwork": resetnetwork}) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index aadacce57..201b99fda 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -21,17 +21,14 @@ # XenAPI plugin for managing glance images # -import base64 -import errno -import hmac import httplib import os import os.path import pickle -import sha +import shlex +import shutil import subprocess -import time -import urlparse +import tempfile import XenAPIPlugin @@ -41,120 +38,341 @@ configure_logging('glance') CHUNK_SIZE = 8192 KERNEL_DIR = '/boot/guest' -FILE_SR_PATH = '/var/run/sr-mount' -def copy_kernel_vdi(session,args): - vdi = exists(args, 'vdi-ref') - size = exists(args,'image-size') - #Use the uuid as a filename - vdi_uuid=session.xenapi.VDI.get_uuid(vdi) - copy_args={'vdi_uuid':vdi_uuid,'vdi_size':int(size)} - filename=with_vdi_in_dom0(session, vdi, False, - lambda dev: - _copy_kernel_vdi('/dev/%s' % dev,copy_args)) - return filename -def _copy_kernel_vdi(dest,copy_args): - vdi_uuid=copy_args['vdi_uuid'] - vdi_size=copy_args['vdi_size'] - logging.debug("copying kernel/ramdisk file from %s to /boot/guest/%s",dest,vdi_uuid) - filename=KERNEL_DIR + '/' + vdi_uuid +def _copy_kernel_vdi(dest, copy_args): + vdi_uuid = copy_args['vdi_uuid'] + vdi_size = copy_args['vdi_size'] + logging.debug("copying kernel/ramdisk file from %s to /boot/guest/%s", + dest, vdi_uuid) + filename = KERNEL_DIR + '/' + vdi_uuid + #make sure KERNEL_DIR exists, otherwise create it + if not os.path.isdir(KERNEL_DIR): + logging.debug("Creating directory %s", KERNEL_DIR) + os.makedirs(KERNEL_DIR) #read data from /dev/ and write into a file on /boot/guest - of=open(filename,'wb') - f=open(dest,'rb') + of = open(filename, 'wb') + f = open(dest, 'rb') #copy only vdi_size bytes - data=f.read(vdi_size) + data = f.read(vdi_size) of.write(data) f.close() - of.close() - logging.debug("Done. Filename: %s",filename) - return filename + of.close() + logging.debug("Done. Filename: %s", filename) + return filename + + +def _download_tarball(sr_path, staging_path, image_id, glance_host, + glance_port): + """Download the tarball image from Glance and extract it into the staging + area. + """ + conn = httplib.HTTPConnection(glance_host, glance_port) + conn.request('GET', '/images/%s' % image_id) + resp = conn.getresponse() + if resp.status == httplib.NOT_FOUND: + raise Exception("Image '%s' not found in Glance" % image_id) + elif resp.status != httplib.OK: + raise Exception("Unexpected response from Glance %i" % res.status) + + tar_cmd = "tar -zx --directory=%(staging_path)s" % locals() + tar_proc = _make_subprocess(tar_cmd, stderr=True, stdin=True) + + chunk = resp.read(CHUNK_SIZE) + while chunk: + tar_proc.stdin.write(chunk) + chunk = resp.read(CHUNK_SIZE) + + _finish_subprocess(tar_proc, tar_cmd) + conn.close() + + +def _fixup_vhds(sr_path, staging_path, uuid_stack): + """Fixup the downloaded VHDs before we move them into the SR. + + We cannot extract VHDs directly into the SR since they don't yet have + UUIDs, aren't properly associated with each other, and would be subject to + a race-condition of one-file being present and the other not being + downloaded yet. + + To avoid these we problems, we use a staging area to fixup the VHDs before + moving them into the SR. The steps involved are: + + 1. Extracting tarball into staging area + + 2. Renaming VHDs to use UUIDs ('snap.vhd' -> 'ffff-aaaa-...vhd') + + 3. Linking the two VHDs together + + 4. Pseudo-atomically moving the images into the SR. (It's not really + atomic because it takes place as two os.rename operations; however, + the chances of an SR.scan occuring between the two rename() + invocations is so small that we can safely ignore it) + """ + def rename_with_uuid(orig_path): + """Rename VHD using UUID so that it will be recognized by SR on a + subsequent scan. + + Since Python2.4 doesn't have the `uuid` module, we pass a stack of + pre-computed UUIDs from the compute worker. + """ + orig_dirname = os.path.dirname(orig_path) + uuid = uuid_stack.pop() + new_path = os.path.join(orig_dirname, "%s.vhd" % uuid) + os.rename(orig_path, new_path) + return new_path, uuid + + def link_vhds(child_path, parent_path): + """Use vhd-util to associate the snapshot VHD with its base_copy. + + This needs to be done before we move both VHDs into the SR to prevent + the base_copy from being DOA (deleted-on-arrival). + """ + modify_cmd = ("vhd-util modify -n %(child_path)s -p %(parent_path)s" + % locals()) + modify_proc = _make_subprocess(modify_cmd, stderr=True) + _finish_subprocess(modify_proc, modify_cmd) + + def move_into_sr(orig_path): + """Move a file into the SR""" + filename = os.path.basename(orig_path) + new_path = os.path.join(sr_path, filename) + os.rename(orig_path, new_path) + return new_path + + def assert_vhd_not_hidden(path): + """ + This is a sanity check on the image; if a snap.vhd isn't + present, then the image.vhd better not be marked 'hidden' or it will + be deleted when moved into the SR. + """ + query_cmd = "vhd-util query -n %(path)s -f" % locals() + query_proc = _make_subprocess(query_cmd, stdout=True, stderr=True) + out, err = _finish_subprocess(query_proc, query_cmd) + + for line in out.splitlines(): + if line.startswith('hidden'): + value = line.split(':')[1].strip() + if value == "1": + raise Exception( + "VHD %(path)s is marked as hidden without child" % + locals()) + + orig_base_copy_path = os.path.join(staging_path, 'image.vhd') + if not os.path.exists(orig_base_copy_path): + raise Exception("Invalid image: image.vhd not present") + + base_copy_path, base_copy_uuid = rename_with_uuid(orig_base_copy_path) + + vdi_uuid = base_copy_uuid + orig_snap_path = os.path.join(staging_path, 'snap.vhd') + if os.path.exists(orig_snap_path): + snap_path, snap_uuid = rename_with_uuid(orig_snap_path) + vdi_uuid = snap_uuid + # NOTE(sirp): this step is necessary so that an SR scan won't + # delete the base_copy out from under us (since it would be + # orphaned) + link_vhds(snap_path, base_copy_path) + move_into_sr(snap_path) + else: + assert_vhd_not_hidden(base_copy_path) + + move_into_sr(base_copy_path) + return vdi_uuid + + +def _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids): + """Hard-link VHDs into staging area with appropriate filename + ('snap' or 'image.vhd') + """ + for name, uuid in vdi_uuids.items(): + source = os.path.join(sr_path, "%s.vhd" % uuid) + link_name = os.path.join(staging_path, "%s.vhd" % name) + os.link(source, link_name) + + +def _upload_tarball(staging_path, image_id, glance_host, glance_port): + """ + Create a tarball of the image and then stream that into Glance + using chunked-transfer-encoded HTTP. + """ + conn = httplib.HTTPConnection(glance_host, glance_port) + # NOTE(sirp): httplib under python2.4 won't accept a file-like object + # to request + conn.putrequest('PUT', '/images/%s' % image_id) + + # NOTE(sirp): There is some confusion around OVF. Here's a summary of + # where we currently stand: + # 1. OVF as a container format is misnamed. We really should be using + # OVA since that is the name for the container format; OVF is the + # standard applied to the manifest file contained within. + # 2. We're currently uploading a vanilla tarball. In order to be OVF/OVA + # compliant, we'll need to embed a minimal OVF manifest as the first + # file. + headers = { + 'content-type': 'application/octet-stream', + 'transfer-encoding': 'chunked', + 'x-image-meta-is-public': 'True', + 'x-image-meta-status': 'queued', + 'x-image-meta-disk-format': 'vhd', + 'x-image-meta-container-format': 'ovf'} + for header, value in headers.iteritems(): + conn.putheader(header, value) + conn.endheaders() + + tar_cmd = "tar -zc --directory=%(staging_path)s ." % locals() + tar_proc = _make_subprocess(tar_cmd, stdout=True, stderr=True) -def put_vdis(session, args): + chunk = tar_proc.stdout.read(CHUNK_SIZE) + while chunk: + conn.send("%x\r\n%s\r\n" % (len(chunk), chunk)) + chunk = tar_proc.stdout.read(CHUNK_SIZE) + conn.send("0\r\n\r\n") + + _finish_subprocess(tar_proc, tar_cmd) + + resp = conn.getresponse() + if resp.status != httplib.OK: + raise Exception("Unexpected response from Glance %i" % resp.status) + conn.close() + + +def _make_staging_area(sr_path): + """ + The staging area is a place where we can temporarily store and + manipulate VHDs. The use of the staging area is different for upload and + download: + + Download + ======== + + When we download the tarball, the VHDs contained within will have names + like "snap.vhd" and "image.vhd". We need to assign UUIDs to them before + moving them into the SR. However, since 'image.vhd' may be a base_copy, we + need to link it to 'snap.vhd' (using vhd-util modify) before moving both + into the SR (otherwise the SR.scan will cause 'image.vhd' to be deleted). + The staging area gives us a place to perform these operations before they + are moved to the SR, scanned, and then registered with XenServer. + + Upload + ====== + + On upload, we want to rename the VHDs to reflect what they are, 'snap.vhd' + in the case of the snapshot VHD, and 'image.vhd' in the case of the + base_copy. The staging area provides a directory in which we can create + hard-links to rename the VHDs without affecting what's in the SR. + + + NOTE + ==== + + The staging area is created as a subdirectory within the SR in order to + guarantee that it resides within the same filesystem and therefore permit + hard-linking and cheap file moves. + """ + staging_path = tempfile.mkdtemp(dir=sr_path) + return staging_path + + +def _cleanup_staging_area(staging_path): + """Remove staging area directory + + On upload, the staging area contains hard-links to the VHDs in the SR; + it's safe to remove the staging-area because the SR will keep the link + count > 0 (so the VHDs in the SR will not be deleted). + """ + shutil.rmtree(staging_path) + + +def _make_subprocess(cmdline, stdout=False, stderr=False, stdin=False): + """Make a subprocess according to the given command-line string + """ + kwargs = {} + kwargs['stdout'] = stdout and subprocess.PIPE or None + kwargs['stderr'] = stderr and subprocess.PIPE or None + kwargs['stdin'] = stdin and subprocess.PIPE or None + args = shlex.split(cmdline) + proc = subprocess.Popen(args, **kwargs) + return proc + + +def _finish_subprocess(proc, cmdline): + """Ensure that the process returned a zero exit code indicating success + """ + out, err = proc.communicate() + ret = proc.returncode + if ret != 0: + raise Exception("'%(cmdline)s' returned non-zero exit code: " + "retcode=%(ret)i, stderr='%(err)s'" % locals()) + return out, err + + +def download_vhd(session, args): + """Download an image from Glance, unbundle it, and then deposit the VHDs + into the storage repository + """ + params = pickle.loads(exists(args, 'params')) + image_id = params["image_id"] + glance_host = params["glance_host"] + glance_port = params["glance_port"] + uuid_stack = params["uuid_stack"] + sr_path = params["sr_path"] + + staging_path = _make_staging_area(sr_path) + try: + _download_tarball(sr_path, staging_path, image_id, glance_host, + glance_port) + vdi_uuid = _fixup_vhds(sr_path, staging_path, uuid_stack) + return vdi_uuid + finally: + _cleanup_staging_area(staging_path) + + +def upload_vhd(session, args): + """Bundle the VHDs comprising an image and then stream them into Glance. + """ params = pickle.loads(exists(args, 'params')) vdi_uuids = params["vdi_uuids"] image_id = params["image_id"] glance_host = params["glance_host"] glance_port = params["glance_port"] - - sr_path = get_sr_path(session) - #FIXME(sirp): writing to a temp file until Glance supports chunked-PUTs - tmp_file = "%s.tar.gz" % os.path.join('/tmp', str(image_id)) - tar_cmd = ['tar', '-zcf', tmp_file, '--directory=%s' % sr_path] - paths = [ "%s.vhd" % vdi_uuid for vdi_uuid in vdi_uuids ] - tar_cmd.extend(paths) - logging.debug("Bundling image with cmd: %s", tar_cmd) - subprocess.call(tar_cmd) - logging.debug("Writing to test file %s", tmp_file) - put_bundle_in_glance(tmp_file, image_id, glance_host, glance_port) - return "" # FIXME(sirp): return anything useful here? - - -def put_bundle_in_glance(tmp_file, image_id, glance_host, glance_port): - size = os.path.getsize(tmp_file) - basename = os.path.basename(tmp_file) - - bundle = open(tmp_file, 'r') + sr_path = params["sr_path"] + + staging_path = _make_staging_area(sr_path) try: - headers = { - 'x-image-meta-store': 'file', - 'x-image-meta-is_public': 'True', - 'x-image-meta-type': 'raw', - 'x-image-meta-size': size, - 'content-length': size, - 'content-type': 'application/octet-stream', - } - conn = httplib.HTTPConnection(glance_host, glance_port) - #NOTE(sirp): httplib under python2.4 won't accept a file-like object - # to request - conn.putrequest('PUT', '/images/%s' % image_id) - - for header, value in headers.iteritems(): - conn.putheader(header, value) - conn.endheaders() - - chunk = bundle.read(CHUNK_SIZE) - while chunk: - conn.send(chunk) - chunk = bundle.read(CHUNK_SIZE) - - - res = conn.getresponse() - #FIXME(sirp): should this be 201 Created? - if res.status != httplib.OK: - raise Exception("Unexpected response from Glance %i" % res.status) + _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids) + _upload_tarball(staging_path, image_id, glance_host, glance_port) finally: - bundle.close() + _cleanup_staging_area(staging_path) -def get_sr_path(session): - sr_ref = find_sr(session) + return "" # Nothing useful to return on an upload - if sr_ref is None: - raise Exception('Cannot find SR to read VDI from') - sr_rec = session.xenapi.SR.get_record(sr_ref) - sr_uuid = sr_rec["uuid"] - sr_path = os.path.join(FILE_SR_PATH, sr_uuid) - return sr_path +def copy_kernel_vdi(session, args): + vdi = exists(args, 'vdi-ref') + size = exists(args, 'image-size') + #Use the uuid as a filename + vdi_uuid = session.xenapi.VDI.get_uuid(vdi) + copy_args = {'vdi_uuid': vdi_uuid, 'vdi_size': int(size)} + filename = with_vdi_in_dom0(session, vdi, False, + lambda dev: + _copy_kernel_vdi('/dev/%s' % dev, copy_args)) + return filename -#TODO(sirp): both objectstore and glance need this, should this be refactored -#into common lib -def find_sr(session): - host = get_this_host(session) - srs = session.xenapi.SR.get_all() - for sr in srs: - sr_rec = session.xenapi.SR.get_record(sr) - if not ('i18n-key' in sr_rec['other_config'] and - sr_rec['other_config']['i18n-key'] == 'local-storage'): - continue - for pbd in sr_rec['PBDs']: - pbd_rec = session.xenapi.PBD.get_record(pbd) - if pbd_rec['host'] == host: - return sr - return None +def remove_kernel_ramdisk(session, args): + """Removes kernel and/or ramdisk from dom0's file system""" + kernel_file = exists(args, 'kernel-file') + ramdisk_file = exists(args, 'ramdisk-file') + if kernel_file: + os.remove(kernel_file) + if ramdisk_file: + os.remove(ramdisk_file) + return "ok" if __name__ == '__main__': - XenAPIPlugin.dispatch({'put_vdis': put_vdis, - 'copy_kernel_vdi': copy_kernel_vdi}) + XenAPIPlugin.dispatch({'upload_vhd': upload_vhd, + 'download_vhd': download_vhd, + 'copy_kernel_vdi': copy_kernel_vdi, + 'remove_kernel_ramdisk': remove_kernel_ramdisk}) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration new file mode 100644 index 000000000..4aa89863a --- /dev/null +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration @@ -0,0 +1,117 @@ +#!/usr/bin/env python + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +XenAPI Plugin for transfering data between host nodes +""" + +import os +import os.path +import pickle +import shutil +import subprocess + +import XenAPIPlugin + +from pluginlib_nova import * +configure_logging('migration') + + +def move_vhds_into_sr(session, args): + """Moves the VHDs from their copied location to the SR""" + params = pickle.loads(exists(args, 'params')) + instance_id = params['instance_id'] + + old_base_copy_uuid = params['old_base_copy_uuid'] + old_cow_uuid = params['old_cow_uuid'] + + new_base_copy_uuid = params['new_base_copy_uuid'] + new_cow_uuid = params['new_cow_uuid'] + + sr_path = params['sr_path'] + sr_temp_path = "%s/images/" % sr_path + + # Discover the copied VHDs locally, and then set up paths to copy + # them to under the SR + source_image_path = "%s/instance%d" % ('/images/', instance_id) + source_base_copy_path = "%s/%s.vhd" % (source_image_path, + old_base_copy_uuid) + source_cow_path = "%s/%s.vhd" % (source_image_path, old_cow_uuid) + + temp_vhd_path = "%s/instance%d/" % (sr_temp_path, instance_id) + new_base_copy_path = "%s/%s.vhd" % (temp_vhd_path, new_base_copy_uuid) + new_cow_path = "%s/%s.vhd" % (temp_vhd_path, new_cow_uuid) + + logging.debug('Creating temporary SR path %s' % temp_vhd_path) + os.makedirs(temp_vhd_path) + + logging.debug('Moving %s into %s' % (source_base_copy_path, temp_vhd_path)) + shutil.move(source_base_copy_path, new_base_copy_path) + + logging.debug('Moving %s into %s' % (source_cow_path, temp_vhd_path)) + shutil.move(source_cow_path, new_cow_path) + + logging.debug('Cleaning up %s' % source_image_path) + os.rmdir(source_image_path) + + # Link the COW to the base copy + logging.debug('Attaching COW to the base copy %s -> %s' % + (new_cow_path, new_base_copy_path)) + subprocess.call(shlex.split('/usr/sbin/vhd-util modify -n %s -p %s' % + (new_cow_path, new_base_copy_path))) + logging.debug('Moving VHDs into SR %s' % sr_path) + shutil.move("%s/%s.vhd" % (temp_vhd_path, new_base_copy_uuid), sr_path) + shutil.move("%s/%s.vhd" % (temp_vhd_path, new_cow_uuid), sr_path) + + logging.debug('Cleaning up temporary SR path %s' % temp_vhd_path) + os.rmdir(temp_vhd_path) + return "" + + +def transfer_vhd(session, args): + """Rsyncs a VHD to an adjacent host""" + params = pickle.loads(exists(args, 'params')) + instance_id = params['instance_id'] + host = params['host'] + vdi_uuid = params['vdi_uuid'] + sr_path = params['sr_path'] + vhd_path = "%s.vhd" % vdi_uuid + + source_path = "%s/%s" % (sr_path, vhd_path) + dest_path = '%s:%sinstance%d/' % (host, '/images/', instance_id) + + logging.debug("Preparing to transmit %s to %s" % (source_path, + dest_path)) + + ssh_cmd = 'ssh -o StrictHostKeyChecking=no' + + rsync_args = shlex.split('nohup /usr/bin/rsync -av --progress -e %s %s %s' + % (ssh_cmd, source_path, dest_path)) + + logging.debug('rsync %s' % (' '.join(rsync_args, ))) + + rsync_proc = subprocess.Popen(rsync_args, stdout=subprocess.PIPE) + logging.debug('Rsync output: \n %s' % rsync_proc.communicate()[0]) + logging.debug('Rsync return: %d' % rsync_proc.returncode) + if rsync_proc.returncode != 0: + raise Exception("Unexpected VHD transfer failure") + return "" + + +if __name__ == '__main__': + XenAPIPlugin.dispatch({'transfer_vhd': transfer_vhd, + 'move_vhds_into_sr': move_vhds_into_sr, }) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/objectstore b/plugins/xenserver/xenapi/etc/xapi.d/plugins/objectstore index 8ee2f748d..d0313b4ed 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/objectstore +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/objectstore @@ -43,34 +43,37 @@ SECTOR_SIZE = 512 MBR_SIZE_SECTORS = 63 MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE -def is_vdi_pv(session,args): + +def is_vdi_pv(session, args): logging.debug("Checking wheter VDI has PV kernel") vdi = exists(args, 'vdi-ref') - pv=with_vdi_in_dom0(session, vdi, False, + pv = with_vdi_in_dom0(session, vdi, False, lambda dev: _is_vdi_pv('/dev/%s' % dev)) if pv: return 'true' else: return 'false' + def _is_vdi_pv(dest): - logging.debug("Running pygrub against %s",dest) - output=os.popen('pygrub -qn %s' % dest) - pv=False + logging.debug("Running pygrub against %s", dest) + output = os.popen('pygrub -qn %s' % dest) + pv = False for line in output.readlines(): #try to find kernel string - m=re.search('(?<=kernel:)/.*(?:>)',line) + m = re.search('(?<=kernel:)/.*(?:>)', line) if m: - if m.group(0).find('xen')!=-1: - pv=True - logging.debug("PV:%d",pv) - return pv - + if m.group(0).find('xen') != -1: + pv = True + logging.debug("PV:%d", pv) + return pv + + def get_vdi(session, args): src_url = exists(args, 'src_url') username = exists(args, 'username') password = exists(args, 'password') - raw_image=validate_bool(args, 'raw', 'false') + raw_image = validate_bool(args, 'raw', 'false') add_partition = validate_bool(args, 'add_partition', 'false') (proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url) sr = find_sr(session) @@ -88,16 +91,17 @@ def get_vdi(session, args): vdi = create_vdi(session, sr, src_url, vdi_size, False) with_vdi_in_dom0(session, vdi, False, lambda dev: get_vdi_(proto, netloc, url_path, - username, password, add_partition,raw_image, + username, password, + add_partition, raw_image, virtual_size, '/dev/%s' % dev)) return session.xenapi.VDI.get_uuid(vdi) -def get_vdi_(proto, netloc, url_path, username, password, add_partition,raw_image, - virtual_size, dest): +def get_vdi_(proto, netloc, url_path, username, password, + add_partition, raw_image, virtual_size, dest): - #Salvatore: vdi should not be partitioned for raw images - if (add_partition and not raw_image): + #vdi should not be partitioned for raw images + if add_partition and not raw_image: write_partition(virtual_size, dest) offset = (add_partition and not raw_image and MBR_SIZE_BYTES) or 0 @@ -144,7 +148,7 @@ def get_kernel(session, args): password = exists(args, 'password') (proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url) - + dest = os.path.join(KERNEL_DIR, url_path[1:]) # Paranoid check against people using ../ to do rude things. @@ -154,8 +158,8 @@ def get_kernel(session, args): dirname = os.path.dirname(dest) try: os.makedirs(dirname) - except os.error, e: - if e.errno != errno.EEXIST: + except os.error, e: + if e.errno != errno.EEXIST: raise if not os.path.isdir(dirname): raise Exception('Cannot make directory %s', dirname) @@ -248,5 +252,5 @@ def download_all(response, length, dest_file, offset): if __name__ == '__main__': XenAPIPlugin.dispatch({'get_vdi': get_vdi, - 'get_kernel': get_kernel, + 'get_kernel': get_kernel, 'is_vdi_pv': is_vdi_pv}) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py index 695bf3448..a35ccd6ab 100755 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py @@ -36,7 +36,15 @@ pluginlib.configure_logging("xenstore") def jsonify(fnc): def wrapper(*args, **kwargs): - return json.dumps(fnc(*args, **kwargs)) + ret = fnc(*args, **kwargs) + try: + json.loads(ret) + except ValueError: + # Value should already be JSON-encoded, but some operations + # may write raw sting values; this will catch those and + # properly encode them. + ret = json.dumps(ret) + return ret return wrapper diff --git a/po/ast.po b/po/ast.po new file mode 100644 index 000000000..6e224f235 --- /dev/null +++ b/po/ast.po @@ -0,0 +1,2130 @@ +# Asturian translation for nova +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the nova package. +# FIRST AUTHOR <EMAIL@ADDRESS>, 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: nova\n" +"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n" +"POT-Creation-Date: 2011-01-10 11:25-0800\n" +"PO-Revision-Date: 2011-01-12 19:50+0000\n" +"Last-Translator: Xuacu Saturio <xuacusk8@gmail.com>\n" +"Language-Team: Asturian <ast@li.org>\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Launchpad-Export-Date: 2011-02-05 05:36+0000\n" +"X-Generator: Launchpad (build 12177)\n" + +#: nova/crypto.py:46 +msgid "Filename of root CA" +msgstr "Nome del ficheru de l'autoridá de certificáu raíz" + +#: nova/crypto.py:49 +msgid "Filename of private key" +msgstr "Nome del ficheru de clave privada" + +#: nova/crypto.py:51 +msgid "Filename of root Certificate Revokation List" +msgstr "Nome del ficheru de llista de refugu de certificáu raíz" + +#: nova/crypto.py:53 +msgid "Where we keep our keys" +msgstr "" + +#: nova/crypto.py:55 +msgid "Where we keep our root CA" +msgstr "" + +#: nova/crypto.py:57 +msgid "Should we use a CA for each project?" +msgstr "" + +#: nova/crypto.py:61 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" + +#: nova/crypto.py:66 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "" + +#: nova/crypto.py:71 +#, python-format +msgid "Subject for certificate for vpns, %s for project, timestamp" +msgstr "" + +#: nova/crypto.py:258 +#, python-format +msgid "Flags path: %s" +msgstr "" + +#: nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "" + +#: nova/exception.py:36 +#, python-format +msgid "" +"%s\n" +"Command: %s\n" +"Exit code: %s\n" +"Stdout: %r\n" +"Stderr: %r" +msgstr "" + +#: nova/exception.py:86 +msgid "Uncaught exception" +msgstr "" + +#: nova/fakerabbit.py:48 +#, python-format +msgid "(%s) publish (key: %s) %s" +msgstr "" + +#: nova/fakerabbit.py:53 +#, python-format +msgid "Publishing to route %s" +msgstr "" + +#: nova/fakerabbit.py:83 +#, python-format +msgid "Declaring queue %s" +msgstr "" + +#: nova/fakerabbit.py:89 +#, python-format +msgid "Declaring exchange %s" +msgstr "" + +#: nova/fakerabbit.py:95 +#, python-format +msgid "Binding %s to %s with key %s" +msgstr "" + +#: nova/fakerabbit.py:120 +#, python-format +msgid "Getting from %s: %s" +msgstr "" + +#: nova/rpc.py:92 +#, python-format +msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +msgstr "" + +#: nova/rpc.py:99 +#, python-format +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "" + +#: nova/rpc.py:118 +msgid "Reconnected to queue" +msgstr "" + +#: nova/rpc.py:125 +msgid "Failed to fetch message from queue" +msgstr "" + +#: nova/rpc.py:155 +#, python-format +msgid "Initing the Adapter Consumer for %s" +msgstr "" + +#: nova/rpc.py:170 +#, python-format +msgid "received %s" +msgstr "" + +#: nova/rpc.py:183 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: nova/rpc.py:184 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: nova/rpc.py:245 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: nova/rpc.py:286 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: nova/rpc.py:305 +msgid "Making asynchronous call..." +msgstr "" + +#: nova/rpc.py:308 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: nova/rpc.py:356 +#, python-format +msgid "response %s" +msgstr "" + +#: nova/rpc.py:365 +#, python-format +msgid "topic is %s" +msgstr "" + +#: nova/rpc.py:366 +#, python-format +msgid "message %s" +msgstr "" + +#: nova/service.py:157 +#, python-format +msgid "Starting %s node" +msgstr "" + +#: nova/service.py:169 +msgid "Service killed that has no database entry" +msgstr "" + +#: nova/service.py:190 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: nova/service.py:202 +msgid "Recovered model server connection!" +msgstr "" + +#: nova/service.py:208 +msgid "model server went away" +msgstr "" + +#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#, python-format +msgid "Data store %s is unreachable. Trying again in %d seconds." +msgstr "" + +#: nova/service.py:232 nova/twistd.py:232 +#, python-format +msgid "Serving %s" +msgstr "" + +#: nova/service.py:234 nova/twistd.py:264 +msgid "Full set of FLAGS:" +msgstr "" + +#: nova/twistd.py:211 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "" + +#: nova/twistd.py:268 +#, python-format +msgid "Starting %s" +msgstr "" + +#: nova/utils.py:53 +#, python-format +msgid "Inner Exception: %s" +msgstr "" + +#: nova/utils.py:54 +#, python-format +msgid "Class %s cannot be found" +msgstr "" + +#: nova/utils.py:113 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: nova/utils.py:125 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: nova/utils.py:138 +#, python-format +msgid "Result was %s" +msgstr "" + +#: nova/utils.py:171 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: nova/utils.py:176 +#, python-format +msgid "Running %s" +msgstr "" + +#: nova/utils.py:207 +#, python-format +msgid "Couldn't get IP, using 127.0.0.1 %s" +msgstr "" + +#: nova/utils.py:289 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: nova/utils.py:300 +#, python-format +msgid "backend %s" +msgstr "" + +#: nova/api/ec2/__init__.py:133 +msgid "Too many failed authentications." +msgstr "" + +#: nova/api/ec2/__init__.py:142 +#, python-format +msgid "" +"Access key %s has had %d failed authentications and will be locked out for " +"%d minutes." +msgstr "" + +#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#, python-format +msgid "Authentication Failure: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:190 +#, python-format +msgid "Authenticated Request For %s:%s)" +msgstr "" + +#: nova/api/ec2/__init__.py:227 +#, python-format +msgid "action: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:229 +#, python-format +msgid "arg: %s\t\tval: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:301 +#, python-format +msgid "Unauthorized request for controller=%s and action=%s" +msgstr "" + +#: nova/api/ec2/__init__.py:339 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:342 +#, python-format +msgid "ApiError raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:349 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:354 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: nova/api/ec2/admin.py:84 +#, python-format +msgid "Creating new user: %s" +msgstr "" + +#: nova/api/ec2/admin.py:92 +#, python-format +msgid "Deleting user: %s" +msgstr "" + +#: nova/api/ec2/admin.py:114 +#, python-format +msgid "Adding role %s to user %s for project %s" +msgstr "" + +#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#, python-format +msgid "Adding sitewide role %s to user %s" +msgstr "" + +#: nova/api/ec2/admin.py:122 +#, python-format +msgid "Removing role %s from user %s for project %s" +msgstr "" + +#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#, python-format +msgid "Removing sitewide role %s from user %s" +msgstr "" + +#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 +msgid "operation must be add or remove" +msgstr "" + +#: nova/api/ec2/admin.py:142 +#, python-format +msgid "Getting x509 for user: %s on project: %s" +msgstr "" + +#: nova/api/ec2/admin.py:159 +#, python-format +msgid "Create project %s managed by %s" +msgstr "" + +#: nova/api/ec2/admin.py:170 +#, python-format +msgid "Delete project: %s" +msgstr "" + +#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#, python-format +msgid "Adding user %s to project %s" +msgstr "" + +#: nova/api/ec2/admin.py:188 +#, python-format +msgid "Removing user %s from project %s" +msgstr "" + +#: nova/api/ec2/apirequest.py:95 +#, python-format +msgid "Unsupported API request: controller = %s,action = %s" +msgstr "" + +#: nova/api/ec2/cloud.py:117 +#, python-format +msgid "Generating root CA: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:277 +#, python-format +msgid "Create key pair %s" +msgstr "" + +#: nova/api/ec2/cloud.py:285 +#, python-format +msgid "Delete key pair %s" +msgstr "" + +#: nova/api/ec2/cloud.py:357 +#, python-format +msgid "%s is not a valid ipProtocol" +msgstr "" + +#: nova/api/ec2/cloud.py:361 +msgid "Invalid port range" +msgstr "" + +#: nova/api/ec2/cloud.py:392 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 +msgid "No rule for the specified parameters." +msgstr "" + +#: nova/api/ec2/cloud.py:421 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: nova/api/ec2/cloud.py:432 +#, python-format +msgid "This rule already exists in group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:460 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:463 +#, python-format +msgid "group %s already exists" +msgstr "" + +#: nova/api/ec2/cloud.py:475 +#, python-format +msgid "Delete security group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 +#, python-format +msgid "Get console output for instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:543 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: nova/api/ec2/cloud.py:567 +#, python-format +msgid "Attach volume %s to instacne %s at %s" +msgstr "" + +#: nova/api/ec2/cloud.py:579 +#, python-format +msgid "Detach volume %s" +msgstr "" + +#: nova/api/ec2/cloud.py:686 +msgid "Allocate address" +msgstr "" + +#: nova/api/ec2/cloud.py:691 +#, python-format +msgid "Release address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:696 +#, python-format +msgid "Associate address %s to instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:703 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:730 +msgid "Going to start terminating instances" +msgstr "" + +#: nova/api/ec2/cloud.py:738 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: nova/api/ec2/cloud.py:775 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: nova/api/ec2/cloud.py:783 +#, python-format +msgid "Registered image %s with id %s" +msgstr "" + +#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#, python-format +msgid "attribute not supported: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:794 +#, python-format +msgid "invalid id: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:807 +msgid "user or group not specified" +msgstr "" + +#: nova/api/ec2/cloud.py:809 +msgid "only group \"all\" is supported" +msgstr "" + +#: nova/api/ec2/cloud.py:811 +msgid "operation_type must be add or remove" +msgstr "" + +#: nova/api/ec2/cloud.py:812 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: nova/api/ec2/metadatarequesthandler.py:75 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:70 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:86 +msgid "Including admin operations in API." +msgstr "" + +#: nova/api/openstack/servers.py:184 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:199 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: nova/api/openstack/servers.py:213 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:224 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: nova/api/openstack/servers.py:235 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: nova/api/openstack/servers.py:246 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: nova/api/openstack/servers.py:257 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" +msgstr "" + +#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "" + +#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#, python-format +msgid "Project can't be created because project %s already exists" +msgstr "" + +#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#, python-format +msgid "Project can't be modified because manager %s doesn't exist" +msgstr "" + +#: nova/auth/dbdriver.py:245 +#, python-format +msgid "User \"%s\" not found" +msgstr "" + +#: nova/auth/dbdriver.py:248 +#, python-format +msgid "Project \"%s\" not found" +msgstr "" + +#: nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: nova/auth/ldapdriver.py:181 +#, python-format +msgid "LDAP object for %s doesn't exist" +msgstr "" + +#: nova/auth/ldapdriver.py:218 +#, python-format +msgid "Project can't be created because user %s doesn't exist" +msgstr "" + +#: nova/auth/ldapdriver.py:478 +#, python-format +msgid "User %s is already a member of the group %s" +msgstr "" + +#: nova/auth/ldapdriver.py:507 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: nova/auth/ldapdriver.py:528 +#, python-format +msgid "Group at dn %s doesn't exist" +msgstr "" + +#: nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" +msgstr "" + +#: nova/auth/manager.py:270 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: nova/auth/manager.py:275 +#, python-format +msgid "failed authorization: no project named %s (user=%s)" +msgstr "" + +#: nova/auth/manager.py:277 +#, python-format +msgid "No project called %s could be found" +msgstr "" + +#: nova/auth/manager.py:281 +#, python-format +msgid "Failed authorization: user %s not admin and not member of project %s" +msgstr "" + +#: nova/auth/manager.py:283 +#, python-format +msgid "User %s is not a member of project %s" +msgstr "" + +#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: nova/auth/manager.py:293 nova/auth/manager.py:304 +msgid "Signature does not match" +msgstr "" + +#: nova/auth/manager.py:374 +msgid "Must specify project" +msgstr "" + +#: nova/auth/manager.py:408 +#, python-format +msgid "The %s role can not be found" +msgstr "" + +#: nova/auth/manager.py:410 +#, python-format +msgid "The %s role is global only" +msgstr "" + +#: nova/auth/manager.py:412 +#, python-format +msgid "Adding role %s to user %s in project %s" +msgstr "" + +#: nova/auth/manager.py:438 +#, python-format +msgid "Removing role %s from user %s on project %s" +msgstr "" + +#: nova/auth/manager.py:505 +#, python-format +msgid "Created project %s with manager %s" +msgstr "" + +#: nova/auth/manager.py:523 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: nova/auth/manager.py:553 +#, python-format +msgid "Remove user %s from project %s" +msgstr "" + +#: nova/auth/manager.py:581 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: nova/auth/manager.py:637 +#, python-format +msgid "Created user %s (admin: %r)" +msgstr "" + +#: nova/auth/manager.py:645 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: nova/auth/manager.py:655 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:657 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:659 +#, python-format +msgid "Admin status set to %r for user %s" +msgstr "" + +#: nova/auth/manager.py:708 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "" + +#: nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:97 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: nova/compute/api.py:67 +#, python-format +msgid "Instance %d was not found in get_network_topic" +msgstr "" + +#: nova/compute/api.py:73 +#, python-format +msgid "Instance %d has no host" +msgstr "" + +#: nova/compute/api.py:92 +#, python-format +msgid "Quota exceeeded for %s, tried to run %s instances" +msgstr "" + +#: nova/compute/api.py:94 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." +msgstr "" + +#: nova/compute/api.py:109 +msgid "Creating a raw instance" +msgstr "" + +#: nova/compute/api.py:156 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: nova/compute/api.py:180 +#, python-format +msgid "Casting to scheduler for %s/%s's instance %s" +msgstr "" + +#: nova/compute/api.py:279 +#, python-format +msgid "Going to try and terminate %s" +msgstr "" + +#: nova/compute/api.py:283 +#, python-format +msgid "Instance %d was not found during terminate" +msgstr "" + +#: nova/compute/api.py:288 +#, python-format +msgid "Instance %d is already being terminated" +msgstr "" + +#: nova/compute/api.py:450 +#, python-format +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "" + +#: nova/compute/api.py:465 +msgid "Volume isn't attached to anything!" +msgstr "" + +#: nova/compute/disk.py:71 +#, python-format +msgid "Input partition size not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:75 +#, python-format +msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:128 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: nova/compute/disk.py:136 +#, python-format +msgid "Failed to load partition: %s" +msgstr "" + +#: nova/compute/disk.py:158 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "" + +#: nova/compute/manager.py:69 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: nova/compute/manager.py:71 +#, python-format +msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +msgstr "" + +#: nova/compute/manager.py:75 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: nova/compute/manager.py:77 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: nova/compute/manager.py:82 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: nova/compute/manager.py:86 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: nova/compute/manager.py:157 +msgid "Instance has already been created" +msgstr "" + +#: nova/compute/manager.py:158 +#, python-format +msgid "instance %s: starting..." +msgstr "" + +#: nova/compute/manager.py:197 +#, python-format +msgid "instance %s: Failed to spawn" +msgstr "" + +#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 +#, python-format +msgid "Terminating instance %s" +msgstr "" + +#: nova/compute/manager.py:217 +#, python-format +msgid "Disassociating address %s" +msgstr "" + +#: nova/compute/manager.py:230 +#, python-format +msgid "Deallocating address %s" +msgstr "" + +#: nova/compute/manager.py:243 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "" + +#: nova/compute/manager.py:257 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: nova/compute/manager.py:260 +#, python-format +msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:286 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: nova/compute/manager.py:289 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:301 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: nova/compute/manager.py:316 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: nova/compute/manager.py:335 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: nova/compute/manager.py:352 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: nova/compute/manager.py:369 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: nova/compute/manager.py:382 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: nova/compute/manager.py:401 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: nova/compute/manager.py:420 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: nova/compute/manager.py:432 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: nova/compute/manager.py:442 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: nova/compute/manager.py:462 +#, python-format +msgid "instance %s: attaching volume %s to %s" +msgstr "" + +#: nova/compute/manager.py:478 +#, python-format +msgid "instance %s: attach failed %s, removing" +msgstr "" + +#: nova/compute/manager.py:493 +#, python-format +msgid "Detach volume %s from mountpoint %s on instance %s" +msgstr "" + +#: nova/compute/manager.py:497 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: nova/compute/monitor.py:259 +#, python-format +msgid "updating %s..." +msgstr "" + +#: nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "" + +#: nova/compute/monitor.py:355 +#, python-format +msgid "Cannot get blockstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:377 +#, python-format +msgid "Cannot get ifstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:412 +msgid "unexpected exception getting connection" +msgstr "" + +#: nova/compute/monitor.py:427 +#, python-format +msgid "Found instance: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:43 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: nova/db/sqlalchemy/api.py:132 +#, python-format +msgid "No service for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:229 +#, python-format +msgid "No service for %s, %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:574 +#, python-format +msgid "No floating ip for address %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:668 +#, python-format +msgid "No instance for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 +#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#, python-format +msgid "Instance %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:891 +#, python-format +msgid "no keypair for user %s, name %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 +#, python-format +msgid "No network for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1036 +#, python-format +msgid "No network for bridge %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1050 +#, python-format +msgid "No network for instance %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1180 +#, python-format +msgid "Token %s does not exist" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1205 +#, python-format +msgid "No quota for project_id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1356 +#, python-format +msgid "No volume for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1401 +#, python-format +msgid "Volume %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1413 +#, python-format +msgid "No export device found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1426 +#, python-format +msgid "No target id found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1471 +#, python-format +msgid "No security group with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1488 +#, python-format +msgid "No security group named %s for project: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1576 +#, python-format +msgid "No secuity group rule with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1650 +#, python-format +msgid "No user for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1666 +#, python-format +msgid "No user for access key %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1728 +#, python-format +msgid "No project with id %s" +msgstr "" + +#: nova/image/glance.py:78 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images" +msgstr "" + +#: nova/image/glance.py:97 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images/detail" +msgstr "" + +#: nova/image/s3.py:82 +#, python-format +msgid "Image %s could not be found" +msgstr "" + +#: nova/network/api.py:39 +#, python-format +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "" + +#: nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" + +#: nova/network/linux_net.py:176 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: nova/network/linux_net.py:186 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#: nova/network/linux_net.py:254 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: nova/network/linux_net.py:256 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: nova/network/linux_net.py:334 +#, python-format +msgid "Killing dnsmasq threw %s" +msgstr "" + +#: nova/network/manager.py:135 +msgid "setting network host" +msgstr "" + +#: nova/network/manager.py:190 +#, python-format +msgid "Leasing IP %s" +msgstr "" + +#: nova/network/manager.py:194 +#, python-format +msgid "IP %s leased that isn't associated" +msgstr "" + +#: nova/network/manager.py:197 +#, python-format +msgid "IP %s leased to bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:205 +#, python-format +msgid "IP %s leased that was already deallocated" +msgstr "" + +#: nova/network/manager.py:214 +#, python-format +msgid "IP %s released that isn't associated" +msgstr "" + +#: nova/network/manager.py:217 +#, python-format +msgid "IP %s released from bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:220 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: nova/network/manager.py:442 +#, python-format +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "" + +#: nova/objectstore/handler.py:106 +#, python-format +msgid "Unknown S3 value type %r" +msgstr "" + +#: nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "" + +#: nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "" + +#: nova/objectstore/handler.py:209 +#, python-format +msgid "List keys for bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:217 +#, python-format +msgid "Unauthorized attempt to access bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:235 +#, python-format +msgid "Creating bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:245 +#, python-format +msgid "Deleting bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:249 +#, python-format +msgid "Unauthorized attempt to delete bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:271 +#, python-format +msgid "Getting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:274 +#, python-format +msgid "Unauthorized attempt to get object %s from bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:292 +#, python-format +msgid "Putting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:295 +#, python-format +msgid "Unauthorized attempt to upload object %s to bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:314 +#, python-format +msgid "Deleting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:393 +#, python-format +msgid "Not authorized to upload image: invalid directory %s" +msgstr "" + +#: nova/objectstore/handler.py:401 +#, python-format +msgid "Not authorized to upload image: unauthorized bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:406 +#, python-format +msgid "Starting image upload: %s" +msgstr "" + +#: nova/objectstore/handler.py:420 +#, python-format +msgid "Not authorized to update attributes of image %s" +msgstr "" + +#: nova/objectstore/handler.py:428 +#, python-format +msgid "Toggling publicity flag of image %s %r" +msgstr "" + +#: nova/objectstore/handler.py:433 +#, python-format +msgid "Updating user fields on image %s" +msgstr "" + +#: nova/objectstore/handler.py:447 +#, python-format +msgid "Unauthorized attempt to delete image %s" +msgstr "" + +#: nova/objectstore/handler.py:452 +#, python-format +msgid "Deleted image: %s" +msgstr "" + +#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 +#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 +msgid "No hosts found" +msgstr "" + +#: nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "" + +#: nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %s %s for %s" +msgstr "" + +#: nova/scheduler/simple.py:63 +msgid "All hosts have too many cores" +msgstr "" + +#: nova/scheduler/simple.py:95 +msgid "All hosts have too many gigabytes" +msgstr "" + +#: nova/scheduler/simple.py:115 +msgid "All hosts have too many networks" +msgstr "" + +#: nova/tests/test_cloud.py:198 +msgid "Can't test instances without a real virtual env." +msgstr "" + +#: nova/tests/test_cloud.py:210 +#, python-format +msgid "Need to watch instance %s until it's running..." +msgstr "" + +#: nova/tests/test_compute.py:104 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: nova/tests/test_compute.py:110 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: nova/tests/test_rpc.py:89 +#, python-format +msgid "Nested received %s, %s" +msgstr "" + +#: nova/tests/test_rpc.py:94 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#, python-format +msgid "Received %s" +msgstr "" + +#: nova/tests/test_volume.py:162 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: nova/virt/fake.py:210 +#, python-format +msgid "Instance %s Not Found" +msgstr "" + +#: nova/virt/hyperv.py:118 +msgid "In init host" +msgstr "" + +#: nova/virt/hyperv.py:131 +#, python-format +msgid "Attempt to create duplicate vm %s" +msgstr "" + +#: nova/virt/hyperv.py:148 +#, python-format +msgid "Starting VM %s " +msgstr "" + +#: nova/virt/hyperv.py:150 +#, python-format +msgid "Started VM %s " +msgstr "" + +#: nova/virt/hyperv.py:152 +#, python-format +msgid "spawn vm failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:169 +#, python-format +msgid "Failed to create VM %s" +msgstr "" + +#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 +#, python-format +msgid "Created VM %s..." +msgstr "" + +#: nova/virt/hyperv.py:188 +#, python-format +msgid "Set memory for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:198 +#, python-format +msgid "Set vcpus for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:202 +#, python-format +msgid "Creating disk for %s by attaching disk file %s" +msgstr "" + +#: nova/virt/hyperv.py:227 +#, python-format +msgid "Failed to add diskdrive to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:230 +#, python-format +msgid "New disk drive path is %s" +msgstr "" + +#: nova/virt/hyperv.py:247 +#, python-format +msgid "Failed to add vhd file to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:249 +#, python-format +msgid "Created disk for %s" +msgstr "" + +#: nova/virt/hyperv.py:253 +#, python-format +msgid "Creating nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:272 +msgid "Failed creating a port on the external vswitch" +msgstr "" + +#: nova/virt/hyperv.py:273 +#, python-format +msgid "Failed creating port for %s" +msgstr "" + +#: nova/virt/hyperv.py:275 +#, python-format +msgid "Created switch port %s on switch %s" +msgstr "" + +#: nova/virt/hyperv.py:285 +#, python-format +msgid "Failed to add nic to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:287 +#, python-format +msgid "Created nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:320 +#, python-format +msgid "WMI job failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:322 +#, python-format +msgid "WMI job succeeded: %s, Elapsed=%s " +msgstr "" + +#: nova/virt/hyperv.py:358 +#, python-format +msgid "Got request to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:383 +#, python-format +msgid "Failed to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:389 +#, python-format +msgid "Del: disk %s vm %s" +msgstr "" + +#: nova/virt/hyperv.py:405 +#, python-format +msgid "" +"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " +"cpu_time=%s" +msgstr "" + +#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#, python-format +msgid "duplicate name found: %s" +msgstr "" + +#: nova/virt/hyperv.py:444 +#, python-format +msgid "Successfully changed vm state of %s to %s" +msgstr "" + +#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#, python-format +msgid "Failed to change vm state of %s to %s" +msgstr "" + +#: nova/virt/images.py:70 +#, python-format +msgid "Finished retreving %s -- placed in %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:144 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:157 +msgid "Connection to libvirt broke" +msgstr "" + +#: nova/virt/libvirt_conn.py:229 +#, python-format +msgid "instance %s: deleting instance files %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:271 +#, python-format +msgid "No disk at %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:278 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "" + +#: nova/virt/libvirt_conn.py:294 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: nova/virt/libvirt_conn.py:297 +#, python-format +msgid "_wait_for_reboot failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:340 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: nova/virt/libvirt_conn.py:343 +#, python-format +msgid "_wait_for_rescue failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:370 +#, python-format +msgid "instance %s: is running" +msgstr "" + +#: nova/virt/libvirt_conn.py:381 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 +#, python-format +msgid "instance %s: failed to boot" +msgstr "" + +#: nova/virt/libvirt_conn.py:395 +#, python-format +msgid "virsh said: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:399 +msgid "cool, it's a device" +msgstr "" + +#: nova/virt/libvirt_conn.py:407 +#, python-format +msgid "data: %r, fpath: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:415 +#, python-format +msgid "Contents of file %s: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:449 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: nova/virt/libvirt_conn.py:505 +#, python-format +msgid "instance %s: injecting key into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:508 +#, python-format +msgid "instance %s: injecting net into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:516 +#, python-format +msgid "instance %s: ignoring error injecting data into image %s (%s)" +msgstr "" + +#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: nova/virt/libvirt_conn.py:589 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: nova/virt/xenapi_conn.py:113 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" +msgstr "" + +#: nova/virt/xenapi_conn.py:263 +#, python-format +msgid "Task [%s] %s status: success %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:271 +#, python-format +msgid "Task [%s] %s status: %s %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:72 +#, python-format +msgid "%s: _db_content => %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 +#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 +msgid "Raising NotImplemented" +msgstr "" + +#: nova/virt/xenapi/fake.py:249 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:283 +#, python-format +msgid "Calling %s %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:288 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:340 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:127 +#, python-format +msgid "Created VM %s as %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:147 +#, python-format +msgid "Creating VBD for VM %s, VDI %s ... " +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:149 +#, python-format +msgid "Created VBD %s for VM %s, VDI %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:165 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:175 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:202 +#, python-format +msgid "Creating VIF for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:205 +#, python-format +msgid "Created VIF %s for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:216 +#, python-format +msgid "Snapshotting VM %s with label '%s'..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:229 +#, python-format +msgid "Created snapshot %s from VM %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:243 +#, python-format +msgid "Asking xapi to upload %s as '%s'" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:261 +#, python-format +msgid "Asking xapi to fetch %s as %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:279 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:290 +#, python-format +msgid "PV Kernel in VDI:%d" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:318 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:331 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:333 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:390 +#, python-format +msgid "VHD %s has parent %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:407 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:448 +#, python-format +msgid "No VDIs found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:452 +#, python-format +msgid "Unexpected number of VDIs (%s) found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:62 +#, python-format +msgid "Attempted to create non-unique name %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:99 +#, python-format +msgid "Starting VM %s..." +msgstr "" + +#: nova/virt/xenapi/vmops.py:101 +#, python-format +msgid "Spawning VM %s created %s." +msgstr "" + +#: nova/virt/xenapi/vmops.py:112 +#, python-format +msgid "Instance %s: booted" +msgstr "" + +#: nova/virt/xenapi/vmops.py:137 +#, python-format +msgid "Instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:166 +#, python-format +msgid "Starting snapshot for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:174 +#, python-format +msgid "Unable to Snapshot %s: %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:184 +#, python-format +msgid "Finished snapshot and upload for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:252 +#, python-format +msgid "suspend: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:262 +#, python-format +msgid "resume: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:271 +#, python-format +msgid "Instance not found %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:57 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:74 +#, python-format +msgid "Introduced %s as %s." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:90 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:96 +#, python-format +msgid "Forgetting SR %s ... " +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:101 +#, python-format +msgid "Ignoring exception %s when getting PBDs for %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:107 +#, python-format +msgid "Ignoring exception %s when unplugging PBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:111 +#, python-format +msgid "Forgetting SR %s done." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:113 +#, python-format +msgid "Ignoring exception %s when forgetting SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:123 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:128 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:146 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:175 +#, python-format +msgid "Unable to obtain target information %s, %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:197 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %s, %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Unable to use SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:95 +#, python-format +msgid "Mountpoint %s attached to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:106 +#, python-format +msgid "Detach_volume: %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:113 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:128 +#, python-format +msgid "Mountpoint %s detached from instance %s" +msgstr "" + +#: nova/volume/api.py:44 +#, python-format +msgid "Quota exceeeded for %s, tried to create %sG volume" +msgstr "" + +#: nova/volume/api.py:46 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %s" +msgstr "" + +#: nova/volume/api.py:70 nova/volume/api.py:95 +msgid "Volume status must be available" +msgstr "" + +#: nova/volume/api.py:97 +msgid "Volume is already attached" +msgstr "" + +#: nova/volume/api.py:103 +msgid "Volume is already detached" +msgstr "" + +#: nova/volume/driver.py:76 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: nova/volume/driver.py:85 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: nova/volume/driver.py:210 +#, python-format +msgid "FAKE AOE: %s" +msgstr "" + +#: nova/volume/driver.py:315 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: nova/volume/manager.py:85 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: nova/volume/manager.py:93 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: nova/volume/manager.py:102 +#, python-format +msgid "volume %s: creating lv of size %sG" +msgstr "" + +#: nova/volume/manager.py:106 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: nova/volume/manager.py:113 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: nova/volume/manager.py:121 +msgid "Volume is still attached" +msgstr "" + +#: nova/volume/manager.py:123 +msgid "Volume is not local to this node" +msgstr "" + +#: nova/volume/manager.py:124 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: nova/volume/manager.py:126 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: nova/volume/manager.py:129 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" diff --git a/po/cs.po b/po/cs.po new file mode 100644 index 000000000..861efa37e --- /dev/null +++ b/po/cs.po @@ -0,0 +1,2137 @@ +# Czech translation for nova +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the nova package. +# FIRST AUTHOR <EMAIL@ADDRESS>, 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: nova\n" +"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n" +"POT-Creation-Date: 2011-01-10 11:25-0800\n" +"PO-Revision-Date: 2011-02-07 12:45+0000\n" +"Last-Translator: David Pravec <Unknown>\n" +"Language-Team: Czech <cs@li.org>\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Launchpad-Export-Date: 2011-02-08 05:28+0000\n" +"X-Generator: Launchpad (build 12177)\n" + +#: nova/crypto.py:46 +msgid "Filename of root CA" +msgstr "Jméno souboru kořenové CA" + +#: nova/crypto.py:49 +msgid "Filename of private key" +msgstr "Jméno souboru s privátním klíčem" + +#: nova/crypto.py:51 +msgid "Filename of root Certificate Revokation List" +msgstr "" + +#: nova/crypto.py:53 +msgid "Where we keep our keys" +msgstr "Adresář, do kterého ukládáme naše klíče" + +#: nova/crypto.py:55 +msgid "Where we keep our root CA" +msgstr "Adresář, do kterého ukládáme naši kořenovou CA" + +#: nova/crypto.py:57 +msgid "Should we use a CA for each project?" +msgstr "Použijeme CA pro každý projekt?" + +#: nova/crypto.py:61 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" + +#: nova/crypto.py:66 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "" + +#: nova/crypto.py:71 +#, python-format +msgid "Subject for certificate for vpns, %s for project, timestamp" +msgstr "" + +#: nova/crypto.py:258 +#, python-format +msgid "Flags path: %s" +msgstr "" + +#: nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "Při spouštění příkazu došlo k nečekané chybě" + +#: nova/exception.py:36 +#, python-format +msgid "" +"%s\n" +"Command: %s\n" +"Exit code: %s\n" +"Stdout: %r\n" +"Stderr: %r" +msgstr "" +"%s\n" +"Příkaz: %s\n" +"Vrácená hodnota: %s\n" +"Stdout: %r\n" +"Stderr: %r" + +#: nova/exception.py:86 +msgid "Uncaught exception" +msgstr "Neošetřená výjimka" + +#: nova/fakerabbit.py:48 +#, python-format +msgid "(%s) publish (key: %s) %s" +msgstr "" + +#: nova/fakerabbit.py:53 +#, python-format +msgid "Publishing to route %s" +msgstr "" + +#: nova/fakerabbit.py:83 +#, python-format +msgid "Declaring queue %s" +msgstr "" + +#: nova/fakerabbit.py:89 +#, python-format +msgid "Declaring exchange %s" +msgstr "" + +#: nova/fakerabbit.py:95 +#, python-format +msgid "Binding %s to %s with key %s" +msgstr "" + +#: nova/fakerabbit.py:120 +#, python-format +msgid "Getting from %s: %s" +msgstr "" + +#: nova/rpc.py:92 +#, python-format +msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +msgstr "AMQP server na %s:%d není dosažitelný. Zkusím znovu za %d sekund." + +#: nova/rpc.py:99 +#, python-format +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "" +"Nepodařilo se připojit k AMQP serveru ani po %d pokusech. Tento proces bude " +"ukončen." + +#: nova/rpc.py:118 +msgid "Reconnected to queue" +msgstr "Znovu připojeno k AMQP frontě" + +#: nova/rpc.py:125 +msgid "Failed to fetch message from queue" +msgstr "Selhalo získání zprávy z AMQP fronty" + +#: nova/rpc.py:155 +#, python-format +msgid "Initing the Adapter Consumer for %s" +msgstr "" + +#: nova/rpc.py:170 +#, python-format +msgid "received %s" +msgstr "získáno: %s" + +#: nova/rpc.py:183 +#, python-format +msgid "no method for message: %s" +msgstr "Není metoda pro zpracování zprávy: %s" + +#: nova/rpc.py:184 +#, python-format +msgid "No method for message: %s" +msgstr "Není metoda pro zpracování zprávy: %s" + +#: nova/rpc.py:245 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Volajícímu je vrácena výjimka: %s" + +#: nova/rpc.py:286 +#, python-format +msgid "unpacked context: %s" +msgstr "rozbalený obsah: %s" + +#: nova/rpc.py:305 +msgid "Making asynchronous call..." +msgstr "Volání asynchronní funkce..." + +#: nova/rpc.py:308 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID je %s" + +#: nova/rpc.py:356 +#, python-format +msgid "response %s" +msgstr "odpověď %s" + +#: nova/rpc.py:365 +#, python-format +msgid "topic is %s" +msgstr "" + +#: nova/rpc.py:366 +#, python-format +msgid "message %s" +msgstr "zpráva %s" + +#: nova/service.py:157 +#, python-format +msgid "Starting %s node" +msgstr "" + +#: nova/service.py:169 +msgid "Service killed that has no database entry" +msgstr "" + +#: nova/service.py:190 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: nova/service.py:202 +msgid "Recovered model server connection!" +msgstr "" + +#: nova/service.py:208 +msgid "model server went away" +msgstr "" + +#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#, python-format +msgid "Data store %s is unreachable. Trying again in %d seconds." +msgstr "" + +#: nova/service.py:232 nova/twistd.py:232 +#, python-format +msgid "Serving %s" +msgstr "" + +#: nova/service.py:234 nova/twistd.py:264 +msgid "Full set of FLAGS:" +msgstr "" + +#: nova/twistd.py:211 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "" + +#: nova/twistd.py:268 +#, python-format +msgid "Starting %s" +msgstr "" + +#: nova/utils.py:53 +#, python-format +msgid "Inner Exception: %s" +msgstr "" + +#: nova/utils.py:54 +#, python-format +msgid "Class %s cannot be found" +msgstr "" + +#: nova/utils.py:113 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: nova/utils.py:125 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: nova/utils.py:138 +#, python-format +msgid "Result was %s" +msgstr "" + +#: nova/utils.py:171 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: nova/utils.py:176 +#, python-format +msgid "Running %s" +msgstr "" + +#: nova/utils.py:207 +#, python-format +msgid "Couldn't get IP, using 127.0.0.1 %s" +msgstr "" + +#: nova/utils.py:289 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: nova/utils.py:300 +#, python-format +msgid "backend %s" +msgstr "" + +#: nova/api/ec2/__init__.py:133 +msgid "Too many failed authentications." +msgstr "" + +#: nova/api/ec2/__init__.py:142 +#, python-format +msgid "" +"Access key %s has had %d failed authentications and will be locked out for " +"%d minutes." +msgstr "" + +#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#, python-format +msgid "Authentication Failure: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:190 +#, python-format +msgid "Authenticated Request For %s:%s)" +msgstr "" + +#: nova/api/ec2/__init__.py:227 +#, python-format +msgid "action: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:229 +#, python-format +msgid "arg: %s\t\tval: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:301 +#, python-format +msgid "Unauthorized request for controller=%s and action=%s" +msgstr "" + +#: nova/api/ec2/__init__.py:339 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:342 +#, python-format +msgid "ApiError raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:349 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:354 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: nova/api/ec2/admin.py:84 +#, python-format +msgid "Creating new user: %s" +msgstr "" + +#: nova/api/ec2/admin.py:92 +#, python-format +msgid "Deleting user: %s" +msgstr "" + +#: nova/api/ec2/admin.py:114 +#, python-format +msgid "Adding role %s to user %s for project %s" +msgstr "" + +#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#, python-format +msgid "Adding sitewide role %s to user %s" +msgstr "" + +#: nova/api/ec2/admin.py:122 +#, python-format +msgid "Removing role %s from user %s for project %s" +msgstr "" + +#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#, python-format +msgid "Removing sitewide role %s from user %s" +msgstr "" + +#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 +msgid "operation must be add or remove" +msgstr "" + +#: nova/api/ec2/admin.py:142 +#, python-format +msgid "Getting x509 for user: %s on project: %s" +msgstr "" + +#: nova/api/ec2/admin.py:159 +#, python-format +msgid "Create project %s managed by %s" +msgstr "" + +#: nova/api/ec2/admin.py:170 +#, python-format +msgid "Delete project: %s" +msgstr "" + +#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#, python-format +msgid "Adding user %s to project %s" +msgstr "" + +#: nova/api/ec2/admin.py:188 +#, python-format +msgid "Removing user %s from project %s" +msgstr "" + +#: nova/api/ec2/apirequest.py:95 +#, python-format +msgid "Unsupported API request: controller = %s,action = %s" +msgstr "" + +#: nova/api/ec2/cloud.py:117 +#, python-format +msgid "Generating root CA: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:277 +#, python-format +msgid "Create key pair %s" +msgstr "" + +#: nova/api/ec2/cloud.py:285 +#, python-format +msgid "Delete key pair %s" +msgstr "" + +#: nova/api/ec2/cloud.py:357 +#, python-format +msgid "%s is not a valid ipProtocol" +msgstr "" + +#: nova/api/ec2/cloud.py:361 +msgid "Invalid port range" +msgstr "" + +#: nova/api/ec2/cloud.py:392 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 +msgid "No rule for the specified parameters." +msgstr "" + +#: nova/api/ec2/cloud.py:421 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: nova/api/ec2/cloud.py:432 +#, python-format +msgid "This rule already exists in group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:460 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:463 +#, python-format +msgid "group %s already exists" +msgstr "" + +#: nova/api/ec2/cloud.py:475 +#, python-format +msgid "Delete security group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 +#, python-format +msgid "Get console output for instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:543 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: nova/api/ec2/cloud.py:567 +#, python-format +msgid "Attach volume %s to instacne %s at %s" +msgstr "" + +#: nova/api/ec2/cloud.py:579 +#, python-format +msgid "Detach volume %s" +msgstr "" + +#: nova/api/ec2/cloud.py:686 +msgid "Allocate address" +msgstr "" + +#: nova/api/ec2/cloud.py:691 +#, python-format +msgid "Release address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:696 +#, python-format +msgid "Associate address %s to instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:703 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:730 +msgid "Going to start terminating instances" +msgstr "" + +#: nova/api/ec2/cloud.py:738 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: nova/api/ec2/cloud.py:775 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: nova/api/ec2/cloud.py:783 +#, python-format +msgid "Registered image %s with id %s" +msgstr "" + +#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#, python-format +msgid "attribute not supported: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:794 +#, python-format +msgid "invalid id: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:807 +msgid "user or group not specified" +msgstr "" + +#: nova/api/ec2/cloud.py:809 +msgid "only group \"all\" is supported" +msgstr "" + +#: nova/api/ec2/cloud.py:811 +msgid "operation_type must be add or remove" +msgstr "" + +#: nova/api/ec2/cloud.py:812 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: nova/api/ec2/metadatarequesthandler.py:75 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:70 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:86 +msgid "Including admin operations in API." +msgstr "" + +#: nova/api/openstack/servers.py:184 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:199 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: nova/api/openstack/servers.py:213 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:224 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: nova/api/openstack/servers.py:235 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: nova/api/openstack/servers.py:246 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: nova/api/openstack/servers.py:257 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" +msgstr "" + +#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "" + +#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#, python-format +msgid "Project can't be created because project %s already exists" +msgstr "" + +#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#, python-format +msgid "Project can't be modified because manager %s doesn't exist" +msgstr "" + +#: nova/auth/dbdriver.py:245 +#, python-format +msgid "User \"%s\" not found" +msgstr "" + +#: nova/auth/dbdriver.py:248 +#, python-format +msgid "Project \"%s\" not found" +msgstr "" + +#: nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: nova/auth/ldapdriver.py:181 +#, python-format +msgid "LDAP object for %s doesn't exist" +msgstr "" + +#: nova/auth/ldapdriver.py:218 +#, python-format +msgid "Project can't be created because user %s doesn't exist" +msgstr "" + +#: nova/auth/ldapdriver.py:478 +#, python-format +msgid "User %s is already a member of the group %s" +msgstr "" + +#: nova/auth/ldapdriver.py:507 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: nova/auth/ldapdriver.py:528 +#, python-format +msgid "Group at dn %s doesn't exist" +msgstr "" + +#: nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" +msgstr "" + +#: nova/auth/manager.py:270 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: nova/auth/manager.py:275 +#, python-format +msgid "failed authorization: no project named %s (user=%s)" +msgstr "" + +#: nova/auth/manager.py:277 +#, python-format +msgid "No project called %s could be found" +msgstr "" + +#: nova/auth/manager.py:281 +#, python-format +msgid "Failed authorization: user %s not admin and not member of project %s" +msgstr "" + +#: nova/auth/manager.py:283 +#, python-format +msgid "User %s is not a member of project %s" +msgstr "" + +#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: nova/auth/manager.py:293 nova/auth/manager.py:304 +msgid "Signature does not match" +msgstr "" + +#: nova/auth/manager.py:374 +msgid "Must specify project" +msgstr "" + +#: nova/auth/manager.py:408 +#, python-format +msgid "The %s role can not be found" +msgstr "" + +#: nova/auth/manager.py:410 +#, python-format +msgid "The %s role is global only" +msgstr "" + +#: nova/auth/manager.py:412 +#, python-format +msgid "Adding role %s to user %s in project %s" +msgstr "" + +#: nova/auth/manager.py:438 +#, python-format +msgid "Removing role %s from user %s on project %s" +msgstr "" + +#: nova/auth/manager.py:505 +#, python-format +msgid "Created project %s with manager %s" +msgstr "" + +#: nova/auth/manager.py:523 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: nova/auth/manager.py:553 +#, python-format +msgid "Remove user %s from project %s" +msgstr "" + +#: nova/auth/manager.py:581 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: nova/auth/manager.py:637 +#, python-format +msgid "Created user %s (admin: %r)" +msgstr "" + +#: nova/auth/manager.py:645 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: nova/auth/manager.py:655 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:657 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:659 +#, python-format +msgid "Admin status set to %r for user %s" +msgstr "" + +#: nova/auth/manager.py:708 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "" + +#: nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:97 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: nova/compute/api.py:67 +#, python-format +msgid "Instance %d was not found in get_network_topic" +msgstr "" + +#: nova/compute/api.py:73 +#, python-format +msgid "Instance %d has no host" +msgstr "" + +#: nova/compute/api.py:92 +#, python-format +msgid "Quota exceeeded for %s, tried to run %s instances" +msgstr "" + +#: nova/compute/api.py:94 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." +msgstr "" + +#: nova/compute/api.py:109 +msgid "Creating a raw instance" +msgstr "" + +#: nova/compute/api.py:156 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: nova/compute/api.py:180 +#, python-format +msgid "Casting to scheduler for %s/%s's instance %s" +msgstr "" + +#: nova/compute/api.py:279 +#, python-format +msgid "Going to try and terminate %s" +msgstr "" + +#: nova/compute/api.py:283 +#, python-format +msgid "Instance %d was not found during terminate" +msgstr "" + +#: nova/compute/api.py:288 +#, python-format +msgid "Instance %d is already being terminated" +msgstr "" + +#: nova/compute/api.py:450 +#, python-format +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "" + +#: nova/compute/api.py:465 +msgid "Volume isn't attached to anything!" +msgstr "" + +#: nova/compute/disk.py:71 +#, python-format +msgid "Input partition size not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:75 +#, python-format +msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:128 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: nova/compute/disk.py:136 +#, python-format +msgid "Failed to load partition: %s" +msgstr "" + +#: nova/compute/disk.py:158 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "" + +#: nova/compute/manager.py:69 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: nova/compute/manager.py:71 +#, python-format +msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +msgstr "" + +#: nova/compute/manager.py:75 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: nova/compute/manager.py:77 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: nova/compute/manager.py:82 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: nova/compute/manager.py:86 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: nova/compute/manager.py:157 +msgid "Instance has already been created" +msgstr "" + +#: nova/compute/manager.py:158 +#, python-format +msgid "instance %s: starting..." +msgstr "" + +#: nova/compute/manager.py:197 +#, python-format +msgid "instance %s: Failed to spawn" +msgstr "" + +#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 +#, python-format +msgid "Terminating instance %s" +msgstr "" + +#: nova/compute/manager.py:217 +#, python-format +msgid "Disassociating address %s" +msgstr "" + +#: nova/compute/manager.py:230 +#, python-format +msgid "Deallocating address %s" +msgstr "" + +#: nova/compute/manager.py:243 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "" + +#: nova/compute/manager.py:257 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: nova/compute/manager.py:260 +#, python-format +msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:286 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: nova/compute/manager.py:289 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:301 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: nova/compute/manager.py:316 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: nova/compute/manager.py:335 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: nova/compute/manager.py:352 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: nova/compute/manager.py:369 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: nova/compute/manager.py:382 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: nova/compute/manager.py:401 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: nova/compute/manager.py:420 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: nova/compute/manager.py:432 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: nova/compute/manager.py:442 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: nova/compute/manager.py:462 +#, python-format +msgid "instance %s: attaching volume %s to %s" +msgstr "" + +#: nova/compute/manager.py:478 +#, python-format +msgid "instance %s: attach failed %s, removing" +msgstr "" + +#: nova/compute/manager.py:493 +#, python-format +msgid "Detach volume %s from mountpoint %s on instance %s" +msgstr "" + +#: nova/compute/manager.py:497 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: nova/compute/monitor.py:259 +#, python-format +msgid "updating %s..." +msgstr "" + +#: nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "" + +#: nova/compute/monitor.py:355 +#, python-format +msgid "Cannot get blockstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:377 +#, python-format +msgid "Cannot get ifstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:412 +msgid "unexpected exception getting connection" +msgstr "" + +#: nova/compute/monitor.py:427 +#, python-format +msgid "Found instance: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:43 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: nova/db/sqlalchemy/api.py:132 +#, python-format +msgid "No service for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:229 +#, python-format +msgid "No service for %s, %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:574 +#, python-format +msgid "No floating ip for address %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:668 +#, python-format +msgid "No instance for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 +#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#, python-format +msgid "Instance %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:891 +#, python-format +msgid "no keypair for user %s, name %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 +#, python-format +msgid "No network for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1036 +#, python-format +msgid "No network for bridge %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1050 +#, python-format +msgid "No network for instance %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1180 +#, python-format +msgid "Token %s does not exist" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1205 +#, python-format +msgid "No quota for project_id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1356 +#, python-format +msgid "No volume for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1401 +#, python-format +msgid "Volume %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1413 +#, python-format +msgid "No export device found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1426 +#, python-format +msgid "No target id found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1471 +#, python-format +msgid "No security group with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1488 +#, python-format +msgid "No security group named %s for project: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1576 +#, python-format +msgid "No secuity group rule with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1650 +#, python-format +msgid "No user for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1666 +#, python-format +msgid "No user for access key %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1728 +#, python-format +msgid "No project with id %s" +msgstr "" + +#: nova/image/glance.py:78 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images" +msgstr "" + +#: nova/image/glance.py:97 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images/detail" +msgstr "" + +#: nova/image/s3.py:82 +#, python-format +msgid "Image %s could not be found" +msgstr "" + +#: nova/network/api.py:39 +#, python-format +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "" + +#: nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" + +#: nova/network/linux_net.py:176 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: nova/network/linux_net.py:186 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#: nova/network/linux_net.py:254 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: nova/network/linux_net.py:256 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: nova/network/linux_net.py:334 +#, python-format +msgid "Killing dnsmasq threw %s" +msgstr "" + +#: nova/network/manager.py:135 +msgid "setting network host" +msgstr "" + +#: nova/network/manager.py:190 +#, python-format +msgid "Leasing IP %s" +msgstr "" + +#: nova/network/manager.py:194 +#, python-format +msgid "IP %s leased that isn't associated" +msgstr "" + +#: nova/network/manager.py:197 +#, python-format +msgid "IP %s leased to bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:205 +#, python-format +msgid "IP %s leased that was already deallocated" +msgstr "" + +#: nova/network/manager.py:214 +#, python-format +msgid "IP %s released that isn't associated" +msgstr "" + +#: nova/network/manager.py:217 +#, python-format +msgid "IP %s released from bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:220 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: nova/network/manager.py:442 +#, python-format +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "" + +#: nova/objectstore/handler.py:106 +#, python-format +msgid "Unknown S3 value type %r" +msgstr "" + +#: nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "" + +#: nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "" + +#: nova/objectstore/handler.py:209 +#, python-format +msgid "List keys for bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:217 +#, python-format +msgid "Unauthorized attempt to access bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:235 +#, python-format +msgid "Creating bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:245 +#, python-format +msgid "Deleting bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:249 +#, python-format +msgid "Unauthorized attempt to delete bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:271 +#, python-format +msgid "Getting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:274 +#, python-format +msgid "Unauthorized attempt to get object %s from bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:292 +#, python-format +msgid "Putting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:295 +#, python-format +msgid "Unauthorized attempt to upload object %s to bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:314 +#, python-format +msgid "Deleting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:393 +#, python-format +msgid "Not authorized to upload image: invalid directory %s" +msgstr "" + +#: nova/objectstore/handler.py:401 +#, python-format +msgid "Not authorized to upload image: unauthorized bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:406 +#, python-format +msgid "Starting image upload: %s" +msgstr "" + +#: nova/objectstore/handler.py:420 +#, python-format +msgid "Not authorized to update attributes of image %s" +msgstr "" + +#: nova/objectstore/handler.py:428 +#, python-format +msgid "Toggling publicity flag of image %s %r" +msgstr "" + +#: nova/objectstore/handler.py:433 +#, python-format +msgid "Updating user fields on image %s" +msgstr "" + +#: nova/objectstore/handler.py:447 +#, python-format +msgid "Unauthorized attempt to delete image %s" +msgstr "" + +#: nova/objectstore/handler.py:452 +#, python-format +msgid "Deleted image: %s" +msgstr "" + +#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 +#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 +msgid "No hosts found" +msgstr "" + +#: nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "" + +#: nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %s %s for %s" +msgstr "" + +#: nova/scheduler/simple.py:63 +msgid "All hosts have too many cores" +msgstr "" + +#: nova/scheduler/simple.py:95 +msgid "All hosts have too many gigabytes" +msgstr "" + +#: nova/scheduler/simple.py:115 +msgid "All hosts have too many networks" +msgstr "" + +#: nova/tests/test_cloud.py:198 +msgid "Can't test instances without a real virtual env." +msgstr "" + +#: nova/tests/test_cloud.py:210 +#, python-format +msgid "Need to watch instance %s until it's running..." +msgstr "" + +#: nova/tests/test_compute.py:104 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: nova/tests/test_compute.py:110 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: nova/tests/test_rpc.py:89 +#, python-format +msgid "Nested received %s, %s" +msgstr "" + +#: nova/tests/test_rpc.py:94 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#, python-format +msgid "Received %s" +msgstr "" + +#: nova/tests/test_volume.py:162 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: nova/virt/fake.py:210 +#, python-format +msgid "Instance %s Not Found" +msgstr "" + +#: nova/virt/hyperv.py:118 +msgid "In init host" +msgstr "" + +#: nova/virt/hyperv.py:131 +#, python-format +msgid "Attempt to create duplicate vm %s" +msgstr "" + +#: nova/virt/hyperv.py:148 +#, python-format +msgid "Starting VM %s " +msgstr "" + +#: nova/virt/hyperv.py:150 +#, python-format +msgid "Started VM %s " +msgstr "" + +#: nova/virt/hyperv.py:152 +#, python-format +msgid "spawn vm failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:169 +#, python-format +msgid "Failed to create VM %s" +msgstr "" + +#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 +#, python-format +msgid "Created VM %s..." +msgstr "" + +#: nova/virt/hyperv.py:188 +#, python-format +msgid "Set memory for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:198 +#, python-format +msgid "Set vcpus for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:202 +#, python-format +msgid "Creating disk for %s by attaching disk file %s" +msgstr "" + +#: nova/virt/hyperv.py:227 +#, python-format +msgid "Failed to add diskdrive to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:230 +#, python-format +msgid "New disk drive path is %s" +msgstr "" + +#: nova/virt/hyperv.py:247 +#, python-format +msgid "Failed to add vhd file to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:249 +#, python-format +msgid "Created disk for %s" +msgstr "" + +#: nova/virt/hyperv.py:253 +#, python-format +msgid "Creating nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:272 +msgid "Failed creating a port on the external vswitch" +msgstr "" + +#: nova/virt/hyperv.py:273 +#, python-format +msgid "Failed creating port for %s" +msgstr "" + +#: nova/virt/hyperv.py:275 +#, python-format +msgid "Created switch port %s on switch %s" +msgstr "" + +#: nova/virt/hyperv.py:285 +#, python-format +msgid "Failed to add nic to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:287 +#, python-format +msgid "Created nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:320 +#, python-format +msgid "WMI job failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:322 +#, python-format +msgid "WMI job succeeded: %s, Elapsed=%s " +msgstr "" + +#: nova/virt/hyperv.py:358 +#, python-format +msgid "Got request to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:383 +#, python-format +msgid "Failed to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:389 +#, python-format +msgid "Del: disk %s vm %s" +msgstr "" + +#: nova/virt/hyperv.py:405 +#, python-format +msgid "" +"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " +"cpu_time=%s" +msgstr "" + +#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#, python-format +msgid "duplicate name found: %s" +msgstr "" + +#: nova/virt/hyperv.py:444 +#, python-format +msgid "Successfully changed vm state of %s to %s" +msgstr "" + +#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#, python-format +msgid "Failed to change vm state of %s to %s" +msgstr "" + +#: nova/virt/images.py:70 +#, python-format +msgid "Finished retreving %s -- placed in %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:144 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:157 +msgid "Connection to libvirt broke" +msgstr "" + +#: nova/virt/libvirt_conn.py:229 +#, python-format +msgid "instance %s: deleting instance files %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:271 +#, python-format +msgid "No disk at %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:278 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "" + +#: nova/virt/libvirt_conn.py:294 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: nova/virt/libvirt_conn.py:297 +#, python-format +msgid "_wait_for_reboot failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:340 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: nova/virt/libvirt_conn.py:343 +#, python-format +msgid "_wait_for_rescue failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:370 +#, python-format +msgid "instance %s: is running" +msgstr "" + +#: nova/virt/libvirt_conn.py:381 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 +#, python-format +msgid "instance %s: failed to boot" +msgstr "" + +#: nova/virt/libvirt_conn.py:395 +#, python-format +msgid "virsh said: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:399 +msgid "cool, it's a device" +msgstr "" + +#: nova/virt/libvirt_conn.py:407 +#, python-format +msgid "data: %r, fpath: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:415 +#, python-format +msgid "Contents of file %s: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:449 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: nova/virt/libvirt_conn.py:505 +#, python-format +msgid "instance %s: injecting key into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:508 +#, python-format +msgid "instance %s: injecting net into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:516 +#, python-format +msgid "instance %s: ignoring error injecting data into image %s (%s)" +msgstr "" + +#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: nova/virt/libvirt_conn.py:589 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: nova/virt/xenapi_conn.py:113 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" +msgstr "" + +#: nova/virt/xenapi_conn.py:263 +#, python-format +msgid "Task [%s] %s status: success %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:271 +#, python-format +msgid "Task [%s] %s status: %s %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:72 +#, python-format +msgid "%s: _db_content => %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 +#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 +msgid "Raising NotImplemented" +msgstr "" + +#: nova/virt/xenapi/fake.py:249 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:283 +#, python-format +msgid "Calling %s %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:288 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:340 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:127 +#, python-format +msgid "Created VM %s as %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:147 +#, python-format +msgid "Creating VBD for VM %s, VDI %s ... " +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:149 +#, python-format +msgid "Created VBD %s for VM %s, VDI %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:165 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:175 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:202 +#, python-format +msgid "Creating VIF for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:205 +#, python-format +msgid "Created VIF %s for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:216 +#, python-format +msgid "Snapshotting VM %s with label '%s'..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:229 +#, python-format +msgid "Created snapshot %s from VM %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:243 +#, python-format +msgid "Asking xapi to upload %s as '%s'" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:261 +#, python-format +msgid "Asking xapi to fetch %s as %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:279 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:290 +#, python-format +msgid "PV Kernel in VDI:%d" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:318 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:331 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:333 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:390 +#, python-format +msgid "VHD %s has parent %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:407 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:448 +#, python-format +msgid "No VDIs found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:452 +#, python-format +msgid "Unexpected number of VDIs (%s) found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:62 +#, python-format +msgid "Attempted to create non-unique name %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:99 +#, python-format +msgid "Starting VM %s..." +msgstr "" + +#: nova/virt/xenapi/vmops.py:101 +#, python-format +msgid "Spawning VM %s created %s." +msgstr "" + +#: nova/virt/xenapi/vmops.py:112 +#, python-format +msgid "Instance %s: booted" +msgstr "" + +#: nova/virt/xenapi/vmops.py:137 +#, python-format +msgid "Instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:166 +#, python-format +msgid "Starting snapshot for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:174 +#, python-format +msgid "Unable to Snapshot %s: %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:184 +#, python-format +msgid "Finished snapshot and upload for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:252 +#, python-format +msgid "suspend: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:262 +#, python-format +msgid "resume: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:271 +#, python-format +msgid "Instance not found %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:57 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:74 +#, python-format +msgid "Introduced %s as %s." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:90 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:96 +#, python-format +msgid "Forgetting SR %s ... " +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:101 +#, python-format +msgid "Ignoring exception %s when getting PBDs for %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:107 +#, python-format +msgid "Ignoring exception %s when unplugging PBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:111 +#, python-format +msgid "Forgetting SR %s done." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:113 +#, python-format +msgid "Ignoring exception %s when forgetting SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:123 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:128 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:146 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:175 +#, python-format +msgid "Unable to obtain target information %s, %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:197 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %s, %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Unable to use SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:95 +#, python-format +msgid "Mountpoint %s attached to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:106 +#, python-format +msgid "Detach_volume: %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:113 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:128 +#, python-format +msgid "Mountpoint %s detached from instance %s" +msgstr "" + +#: nova/volume/api.py:44 +#, python-format +msgid "Quota exceeeded for %s, tried to create %sG volume" +msgstr "" + +#: nova/volume/api.py:46 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %s" +msgstr "" + +#: nova/volume/api.py:70 nova/volume/api.py:95 +msgid "Volume status must be available" +msgstr "" + +#: nova/volume/api.py:97 +msgid "Volume is already attached" +msgstr "" + +#: nova/volume/api.py:103 +msgid "Volume is already detached" +msgstr "" + +#: nova/volume/driver.py:76 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: nova/volume/driver.py:85 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: nova/volume/driver.py:210 +#, python-format +msgid "FAKE AOE: %s" +msgstr "" + +#: nova/volume/driver.py:315 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: nova/volume/manager.py:85 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: nova/volume/manager.py:93 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: nova/volume/manager.py:102 +#, python-format +msgid "volume %s: creating lv of size %sG" +msgstr "" + +#: nova/volume/manager.py:106 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: nova/volume/manager.py:113 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: nova/volume/manager.py:121 +msgid "Volume is still attached" +msgstr "" + +#: nova/volume/manager.py:123 +msgid "Volume is not local to this node" +msgstr "" + +#: nova/volume/manager.py:124 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: nova/volume/manager.py:126 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: nova/volume/manager.py:129 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" diff --git a/locale/nova.pot b/po/da.po index a96411e33..f845f11b0 100644 --- a/locale/nova.pot +++ b/po/da.po @@ -1,21 +1,21 @@ -# Translations template for nova. -# Copyright (C) 2011 ORGANIZATION -# This file is distributed under the same license as the nova project. +# Danish translation for nova +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the nova package. # FIRST AUTHOR <EMAIL@ADDRESS>, 2011. # -#, fuzzy msgid "" msgstr "" -"Project-Id-Version: nova 2011.1\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"Project-Id-Version: nova\n" +"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n" "POT-Creation-Date: 2011-01-10 11:25-0800\n" -"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" -"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n" -"Language-Team: LANGUAGE <LL@li.org>\n" +"PO-Revision-Date: 2011-01-15 21:46+0000\n" +"Last-Translator: Soren Hansen <soren@linux2go.dk>\n" +"Language-Team: Danish <da@li.org>\n" "MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" +"Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 0.9.4\n" +"X-Launchpad-Export-Date: 2011-02-05 05:36+0000\n" +"X-Generator: Launchpad (build 12177)\n" #: nova/crypto.py:46 msgid "Filename of root CA" @@ -23,7 +23,7 @@ msgstr "" #: nova/crypto.py:49 msgid "Filename of private key" -msgstr "" +msgstr "Filnavn for privatnøgle" #: nova/crypto.py:51 msgid "Filename of root Certificate Revokation List" @@ -283,8 +283,8 @@ msgstr "" #: nova/api/ec2/__init__.py:142 #, python-format msgid "" -"Access key %s has had %d failed authentications and will be locked out " -"for %d minutes." +"Access key %s has had %d failed authentications and will be locked out for " +"%d minutes." msgstr "" #: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 @@ -805,7 +805,8 @@ msgstr "" #: nova/compute/api.py:94 #, python-format -msgid "Instance quota exceeded. You can only run %s more instances of this type." +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." msgstr "" #: nova/compute/api.py:109 @@ -957,7 +958,8 @@ msgstr "" #: nova/compute/manager.py:289 #, python-format -msgid "trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgid "" +"trying to snapshot a non-running instance: %s (state: %s excepted: %s)" msgstr "" #: nova/compute/manager.py:301 @@ -1697,9 +1699,8 @@ msgstr "" #: nova/virt/xenapi_conn.py:113 msgid "" -"Must specify xenapi_connection_url, xenapi_connection_username " -"(optionally), and xenapi_connection_password to use " -"connection_type=xenapi" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" msgstr "" #: nova/virt/xenapi_conn.py:263 @@ -2126,5 +2127,4 @@ msgstr "" #: nova/volume/manager.py:129 #, python-format msgid "volume %s: deleted successfully" -msgstr "" - +msgstr "bind %s: slettet" diff --git a/po/de.po b/po/de.po new file mode 100644 index 000000000..3b30c2fa9 --- /dev/null +++ b/po/de.po @@ -0,0 +1,2137 @@ +# German translation for nova +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the nova package. +# FIRST AUTHOR <EMAIL@ADDRESS>, 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: nova\n" +"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n" +"POT-Creation-Date: 2011-01-10 11:25-0800\n" +"PO-Revision-Date: 2011-02-09 10:49+0000\n" +"Last-Translator: Christian Berendt <Unknown>\n" +"Language-Team: German <de@li.org>\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Launchpad-Export-Date: 2011-02-10 05:13+0000\n" +"X-Generator: Launchpad (build 12177)\n" + +#: nova/crypto.py:46 +msgid "Filename of root CA" +msgstr "Dateiname der Root CA" + +#: nova/crypto.py:49 +msgid "Filename of private key" +msgstr "Dateiname des Private Key" + +#: nova/crypto.py:51 +msgid "Filename of root Certificate Revokation List" +msgstr "Dateiname der Certificate Revocation List" + +#: nova/crypto.py:53 +msgid "Where we keep our keys" +msgstr "" + +#: nova/crypto.py:55 +msgid "Where we keep our root CA" +msgstr "" + +#: nova/crypto.py:57 +msgid "Should we use a CA for each project?" +msgstr "Soll eine eigenständige CA für jedes Projekt verwendet werden?" + +#: nova/crypto.py:61 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" + +#: nova/crypto.py:66 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "" + +#: nova/crypto.py:71 +#, python-format +msgid "Subject for certificate for vpns, %s for project, timestamp" +msgstr "" + +#: nova/crypto.py:258 +#, python-format +msgid "Flags path: %s" +msgstr "" + +#: nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "Unerwarteter Fehler bei Ausführung des Kommandos." + +#: nova/exception.py:36 +#, python-format +msgid "" +"%s\n" +"Command: %s\n" +"Exit code: %s\n" +"Stdout: %r\n" +"Stderr: %r" +msgstr "" +"%s\n" +"Kommando: %s\n" +"Exit Code: %s\n" +"Stdout: %r\n" +"Stderr: %r" + +#: nova/exception.py:86 +msgid "Uncaught exception" +msgstr "Nicht abgefangene Ausnahme" + +#: nova/fakerabbit.py:48 +#, python-format +msgid "(%s) publish (key: %s) %s" +msgstr "(%s) öffentlich (Schlüssel: %s) %s" + +#: nova/fakerabbit.py:53 +#, python-format +msgid "Publishing to route %s" +msgstr "" + +#: nova/fakerabbit.py:83 +#, python-format +msgid "Declaring queue %s" +msgstr "" + +#: nova/fakerabbit.py:89 +#, python-format +msgid "Declaring exchange %s" +msgstr "" + +#: nova/fakerabbit.py:95 +#, python-format +msgid "Binding %s to %s with key %s" +msgstr "" + +#: nova/fakerabbit.py:120 +#, python-format +msgid "Getting from %s: %s" +msgstr "Beziehe von %s: %s" + +#: nova/rpc.py:92 +#, python-format +msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +msgstr "" +"Der AMQP server %s:%d ist nicht erreichbar. Erneuter Versuch in %d Sekunden." + +#: nova/rpc.py:99 +#, python-format +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "" + +#: nova/rpc.py:118 +msgid "Reconnected to queue" +msgstr "" + +#: nova/rpc.py:125 +msgid "Failed to fetch message from queue" +msgstr "" + +#: nova/rpc.py:155 +#, python-format +msgid "Initing the Adapter Consumer for %s" +msgstr "" + +#: nova/rpc.py:170 +#, python-format +msgid "received %s" +msgstr "" + +#: nova/rpc.py:183 +#, python-format +msgid "no method for message: %s" +msgstr "keine Methode für diese Nachricht gefunden: %s" + +#: nova/rpc.py:184 +#, python-format +msgid "No method for message: %s" +msgstr "keine Methode für diese Nachricht gefunden: %s" + +#: nova/rpc.py:245 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: nova/rpc.py:286 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: nova/rpc.py:305 +msgid "Making asynchronous call..." +msgstr "führe asynchronen Aufruf durch..." + +#: nova/rpc.py:308 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID ist %s" + +#: nova/rpc.py:356 +#, python-format +msgid "response %s" +msgstr "" + +#: nova/rpc.py:365 +#, python-format +msgid "topic is %s" +msgstr "Betreff ist %s" + +#: nova/rpc.py:366 +#, python-format +msgid "message %s" +msgstr "Nachricht %s" + +#: nova/service.py:157 +#, python-format +msgid "Starting %s node" +msgstr "" + +#: nova/service.py:169 +msgid "Service killed that has no database entry" +msgstr "" + +#: nova/service.py:190 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: nova/service.py:202 +msgid "Recovered model server connection!" +msgstr "" + +#: nova/service.py:208 +msgid "model server went away" +msgstr "" + +#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#, python-format +msgid "Data store %s is unreachable. Trying again in %d seconds." +msgstr "" +"Datastore %s ist nicht erreichbar. Versuche es erneut in %d Sekunden." + +#: nova/service.py:232 nova/twistd.py:232 +#, python-format +msgid "Serving %s" +msgstr "" + +#: nova/service.py:234 nova/twistd.py:264 +msgid "Full set of FLAGS:" +msgstr "Alle vorhandenen FLAGS:" + +#: nova/twistd.py:211 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "PID-Datei %s existiert nicht. Läuft der Daemon nicht?\n" + +#: nova/twistd.py:268 +#, python-format +msgid "Starting %s" +msgstr "%s wird gestartet" + +#: nova/utils.py:53 +#, python-format +msgid "Inner Exception: %s" +msgstr "" + +#: nova/utils.py:54 +#, python-format +msgid "Class %s cannot be found" +msgstr "Klasse %s konnte nicht gefunden werden" + +#: nova/utils.py:113 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: nova/utils.py:125 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Führe Kommando (subprocess) aus: %s" + +#: nova/utils.py:138 +#, python-format +msgid "Result was %s" +msgstr "Ergebnis war %s" + +#: nova/utils.py:171 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: nova/utils.py:176 +#, python-format +msgid "Running %s" +msgstr "" + +#: nova/utils.py:207 +#, python-format +msgid "Couldn't get IP, using 127.0.0.1 %s" +msgstr "" + +#: nova/utils.py:289 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: nova/utils.py:300 +#, python-format +msgid "backend %s" +msgstr "" + +#: nova/api/ec2/__init__.py:133 +msgid "Too many failed authentications." +msgstr "" + +#: nova/api/ec2/__init__.py:142 +#, python-format +msgid "" +"Access key %s has had %d failed authentications and will be locked out for " +"%d minutes." +msgstr "" + +#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#, python-format +msgid "Authentication Failure: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:190 +#, python-format +msgid "Authenticated Request For %s:%s)" +msgstr "" + +#: nova/api/ec2/__init__.py:227 +#, python-format +msgid "action: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:229 +#, python-format +msgid "arg: %s\t\tval: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:301 +#, python-format +msgid "Unauthorized request for controller=%s and action=%s" +msgstr "" + +#: nova/api/ec2/__init__.py:339 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:342 +#, python-format +msgid "ApiError raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:349 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:354 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: nova/api/ec2/admin.py:84 +#, python-format +msgid "Creating new user: %s" +msgstr "" + +#: nova/api/ec2/admin.py:92 +#, python-format +msgid "Deleting user: %s" +msgstr "" + +#: nova/api/ec2/admin.py:114 +#, python-format +msgid "Adding role %s to user %s for project %s" +msgstr "" + +#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#, python-format +msgid "Adding sitewide role %s to user %s" +msgstr "" + +#: nova/api/ec2/admin.py:122 +#, python-format +msgid "Removing role %s from user %s for project %s" +msgstr "" + +#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#, python-format +msgid "Removing sitewide role %s from user %s" +msgstr "" + +#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 +msgid "operation must be add or remove" +msgstr "" + +#: nova/api/ec2/admin.py:142 +#, python-format +msgid "Getting x509 for user: %s on project: %s" +msgstr "" + +#: nova/api/ec2/admin.py:159 +#, python-format +msgid "Create project %s managed by %s" +msgstr "" + +#: nova/api/ec2/admin.py:170 +#, python-format +msgid "Delete project: %s" +msgstr "" + +#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#, python-format +msgid "Adding user %s to project %s" +msgstr "" + +#: nova/api/ec2/admin.py:188 +#, python-format +msgid "Removing user %s from project %s" +msgstr "" + +#: nova/api/ec2/apirequest.py:95 +#, python-format +msgid "Unsupported API request: controller = %s,action = %s" +msgstr "" + +#: nova/api/ec2/cloud.py:117 +#, python-format +msgid "Generating root CA: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:277 +#, python-format +msgid "Create key pair %s" +msgstr "" + +#: nova/api/ec2/cloud.py:285 +#, python-format +msgid "Delete key pair %s" +msgstr "" + +#: nova/api/ec2/cloud.py:357 +#, python-format +msgid "%s is not a valid ipProtocol" +msgstr "" + +#: nova/api/ec2/cloud.py:361 +msgid "Invalid port range" +msgstr "" + +#: nova/api/ec2/cloud.py:392 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 +msgid "No rule for the specified parameters." +msgstr "" + +#: nova/api/ec2/cloud.py:421 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: nova/api/ec2/cloud.py:432 +#, python-format +msgid "This rule already exists in group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:460 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:463 +#, python-format +msgid "group %s already exists" +msgstr "" + +#: nova/api/ec2/cloud.py:475 +#, python-format +msgid "Delete security group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 +#, python-format +msgid "Get console output for instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:543 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: nova/api/ec2/cloud.py:567 +#, python-format +msgid "Attach volume %s to instacne %s at %s" +msgstr "" + +#: nova/api/ec2/cloud.py:579 +#, python-format +msgid "Detach volume %s" +msgstr "" + +#: nova/api/ec2/cloud.py:686 +msgid "Allocate address" +msgstr "" + +#: nova/api/ec2/cloud.py:691 +#, python-format +msgid "Release address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:696 +#, python-format +msgid "Associate address %s to instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:703 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:730 +msgid "Going to start terminating instances" +msgstr "" + +#: nova/api/ec2/cloud.py:738 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: nova/api/ec2/cloud.py:775 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: nova/api/ec2/cloud.py:783 +#, python-format +msgid "Registered image %s with id %s" +msgstr "" + +#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#, python-format +msgid "attribute not supported: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:794 +#, python-format +msgid "invalid id: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:807 +msgid "user or group not specified" +msgstr "" + +#: nova/api/ec2/cloud.py:809 +msgid "only group \"all\" is supported" +msgstr "" + +#: nova/api/ec2/cloud.py:811 +msgid "operation_type must be add or remove" +msgstr "" + +#: nova/api/ec2/cloud.py:812 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: nova/api/ec2/metadatarequesthandler.py:75 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:70 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:86 +msgid "Including admin operations in API." +msgstr "" + +#: nova/api/openstack/servers.py:184 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:199 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: nova/api/openstack/servers.py:213 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:224 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: nova/api/openstack/servers.py:235 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: nova/api/openstack/servers.py:246 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: nova/api/openstack/servers.py:257 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" +msgstr "" + +#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "" + +#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#, python-format +msgid "Project can't be created because project %s already exists" +msgstr "" + +#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#, python-format +msgid "Project can't be modified because manager %s doesn't exist" +msgstr "" + +#: nova/auth/dbdriver.py:245 +#, python-format +msgid "User \"%s\" not found" +msgstr "" + +#: nova/auth/dbdriver.py:248 +#, python-format +msgid "Project \"%s\" not found" +msgstr "" + +#: nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: nova/auth/ldapdriver.py:181 +#, python-format +msgid "LDAP object for %s doesn't exist" +msgstr "" + +#: nova/auth/ldapdriver.py:218 +#, python-format +msgid "Project can't be created because user %s doesn't exist" +msgstr "" + +#: nova/auth/ldapdriver.py:478 +#, python-format +msgid "User %s is already a member of the group %s" +msgstr "" + +#: nova/auth/ldapdriver.py:507 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: nova/auth/ldapdriver.py:528 +#, python-format +msgid "Group at dn %s doesn't exist" +msgstr "" + +#: nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" +msgstr "" + +#: nova/auth/manager.py:270 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: nova/auth/manager.py:275 +#, python-format +msgid "failed authorization: no project named %s (user=%s)" +msgstr "" + +#: nova/auth/manager.py:277 +#, python-format +msgid "No project called %s could be found" +msgstr "" + +#: nova/auth/manager.py:281 +#, python-format +msgid "Failed authorization: user %s not admin and not member of project %s" +msgstr "" + +#: nova/auth/manager.py:283 +#, python-format +msgid "User %s is not a member of project %s" +msgstr "" + +#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: nova/auth/manager.py:293 nova/auth/manager.py:304 +msgid "Signature does not match" +msgstr "" + +#: nova/auth/manager.py:374 +msgid "Must specify project" +msgstr "" + +#: nova/auth/manager.py:408 +#, python-format +msgid "The %s role can not be found" +msgstr "" + +#: nova/auth/manager.py:410 +#, python-format +msgid "The %s role is global only" +msgstr "" + +#: nova/auth/manager.py:412 +#, python-format +msgid "Adding role %s to user %s in project %s" +msgstr "" + +#: nova/auth/manager.py:438 +#, python-format +msgid "Removing role %s from user %s on project %s" +msgstr "" + +#: nova/auth/manager.py:505 +#, python-format +msgid "Created project %s with manager %s" +msgstr "" + +#: nova/auth/manager.py:523 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: nova/auth/manager.py:553 +#, python-format +msgid "Remove user %s from project %s" +msgstr "" + +#: nova/auth/manager.py:581 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: nova/auth/manager.py:637 +#, python-format +msgid "Created user %s (admin: %r)" +msgstr "" + +#: nova/auth/manager.py:645 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: nova/auth/manager.py:655 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:657 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:659 +#, python-format +msgid "Admin status set to %r for user %s" +msgstr "" + +#: nova/auth/manager.py:708 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "" + +#: nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:97 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: nova/compute/api.py:67 +#, python-format +msgid "Instance %d was not found in get_network_topic" +msgstr "" + +#: nova/compute/api.py:73 +#, python-format +msgid "Instance %d has no host" +msgstr "" + +#: nova/compute/api.py:92 +#, python-format +msgid "Quota exceeeded for %s, tried to run %s instances" +msgstr "" + +#: nova/compute/api.py:94 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." +msgstr "" + +#: nova/compute/api.py:109 +msgid "Creating a raw instance" +msgstr "" + +#: nova/compute/api.py:156 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: nova/compute/api.py:180 +#, python-format +msgid "Casting to scheduler for %s/%s's instance %s" +msgstr "" + +#: nova/compute/api.py:279 +#, python-format +msgid "Going to try and terminate %s" +msgstr "" + +#: nova/compute/api.py:283 +#, python-format +msgid "Instance %d was not found during terminate" +msgstr "" + +#: nova/compute/api.py:288 +#, python-format +msgid "Instance %d is already being terminated" +msgstr "" + +#: nova/compute/api.py:450 +#, python-format +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "" + +#: nova/compute/api.py:465 +msgid "Volume isn't attached to anything!" +msgstr "" + +#: nova/compute/disk.py:71 +#, python-format +msgid "Input partition size not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:75 +#, python-format +msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:128 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: nova/compute/disk.py:136 +#, python-format +msgid "Failed to load partition: %s" +msgstr "" + +#: nova/compute/disk.py:158 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "" + +#: nova/compute/manager.py:69 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: nova/compute/manager.py:71 +#, python-format +msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +msgstr "" + +#: nova/compute/manager.py:75 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: nova/compute/manager.py:77 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: nova/compute/manager.py:82 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: nova/compute/manager.py:86 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: nova/compute/manager.py:157 +msgid "Instance has already been created" +msgstr "" + +#: nova/compute/manager.py:158 +#, python-format +msgid "instance %s: starting..." +msgstr "" + +#: nova/compute/manager.py:197 +#, python-format +msgid "instance %s: Failed to spawn" +msgstr "" + +#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 +#, python-format +msgid "Terminating instance %s" +msgstr "" + +#: nova/compute/manager.py:217 +#, python-format +msgid "Disassociating address %s" +msgstr "" + +#: nova/compute/manager.py:230 +#, python-format +msgid "Deallocating address %s" +msgstr "" + +#: nova/compute/manager.py:243 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "" + +#: nova/compute/manager.py:257 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: nova/compute/manager.py:260 +#, python-format +msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:286 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: nova/compute/manager.py:289 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:301 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: nova/compute/manager.py:316 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: nova/compute/manager.py:335 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: nova/compute/manager.py:352 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: nova/compute/manager.py:369 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: nova/compute/manager.py:382 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: nova/compute/manager.py:401 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: nova/compute/manager.py:420 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: nova/compute/manager.py:432 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: nova/compute/manager.py:442 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: nova/compute/manager.py:462 +#, python-format +msgid "instance %s: attaching volume %s to %s" +msgstr "" + +#: nova/compute/manager.py:478 +#, python-format +msgid "instance %s: attach failed %s, removing" +msgstr "" + +#: nova/compute/manager.py:493 +#, python-format +msgid "Detach volume %s from mountpoint %s on instance %s" +msgstr "" + +#: nova/compute/manager.py:497 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: nova/compute/monitor.py:259 +#, python-format +msgid "updating %s..." +msgstr "" + +#: nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "" + +#: nova/compute/monitor.py:355 +#, python-format +msgid "Cannot get blockstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:377 +#, python-format +msgid "Cannot get ifstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:412 +msgid "unexpected exception getting connection" +msgstr "" + +#: nova/compute/monitor.py:427 +#, python-format +msgid "Found instance: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:43 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: nova/db/sqlalchemy/api.py:132 +#, python-format +msgid "No service for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:229 +#, python-format +msgid "No service for %s, %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:574 +#, python-format +msgid "No floating ip for address %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:668 +#, python-format +msgid "No instance for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 +#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#, python-format +msgid "Instance %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:891 +#, python-format +msgid "no keypair for user %s, name %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 +#, python-format +msgid "No network for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1036 +#, python-format +msgid "No network for bridge %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1050 +#, python-format +msgid "No network for instance %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1180 +#, python-format +msgid "Token %s does not exist" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1205 +#, python-format +msgid "No quota for project_id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1356 +#, python-format +msgid "No volume for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1401 +#, python-format +msgid "Volume %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1413 +#, python-format +msgid "No export device found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1426 +#, python-format +msgid "No target id found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1471 +#, python-format +msgid "No security group with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1488 +#, python-format +msgid "No security group named %s for project: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1576 +#, python-format +msgid "No secuity group rule with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1650 +#, python-format +msgid "No user for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1666 +#, python-format +msgid "No user for access key %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1728 +#, python-format +msgid "No project with id %s" +msgstr "" + +#: nova/image/glance.py:78 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images" +msgstr "" + +#: nova/image/glance.py:97 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images/detail" +msgstr "" + +#: nova/image/s3.py:82 +#, python-format +msgid "Image %s could not be found" +msgstr "" + +#: nova/network/api.py:39 +#, python-format +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "" + +#: nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" + +#: nova/network/linux_net.py:176 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: nova/network/linux_net.py:186 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#: nova/network/linux_net.py:254 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: nova/network/linux_net.py:256 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: nova/network/linux_net.py:334 +#, python-format +msgid "Killing dnsmasq threw %s" +msgstr "" + +#: nova/network/manager.py:135 +msgid "setting network host" +msgstr "" + +#: nova/network/manager.py:190 +#, python-format +msgid "Leasing IP %s" +msgstr "" + +#: nova/network/manager.py:194 +#, python-format +msgid "IP %s leased that isn't associated" +msgstr "" + +#: nova/network/manager.py:197 +#, python-format +msgid "IP %s leased to bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:205 +#, python-format +msgid "IP %s leased that was already deallocated" +msgstr "" + +#: nova/network/manager.py:214 +#, python-format +msgid "IP %s released that isn't associated" +msgstr "" + +#: nova/network/manager.py:217 +#, python-format +msgid "IP %s released from bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:220 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: nova/network/manager.py:442 +#, python-format +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "" + +#: nova/objectstore/handler.py:106 +#, python-format +msgid "Unknown S3 value type %r" +msgstr "" + +#: nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "" + +#: nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "" + +#: nova/objectstore/handler.py:209 +#, python-format +msgid "List keys for bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:217 +#, python-format +msgid "Unauthorized attempt to access bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:235 +#, python-format +msgid "Creating bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:245 +#, python-format +msgid "Deleting bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:249 +#, python-format +msgid "Unauthorized attempt to delete bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:271 +#, python-format +msgid "Getting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:274 +#, python-format +msgid "Unauthorized attempt to get object %s from bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:292 +#, python-format +msgid "Putting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:295 +#, python-format +msgid "Unauthorized attempt to upload object %s to bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:314 +#, python-format +msgid "Deleting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:393 +#, python-format +msgid "Not authorized to upload image: invalid directory %s" +msgstr "" + +#: nova/objectstore/handler.py:401 +#, python-format +msgid "Not authorized to upload image: unauthorized bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:406 +#, python-format +msgid "Starting image upload: %s" +msgstr "" + +#: nova/objectstore/handler.py:420 +#, python-format +msgid "Not authorized to update attributes of image %s" +msgstr "" + +#: nova/objectstore/handler.py:428 +#, python-format +msgid "Toggling publicity flag of image %s %r" +msgstr "" + +#: nova/objectstore/handler.py:433 +#, python-format +msgid "Updating user fields on image %s" +msgstr "" + +#: nova/objectstore/handler.py:447 +#, python-format +msgid "Unauthorized attempt to delete image %s" +msgstr "" + +#: nova/objectstore/handler.py:452 +#, python-format +msgid "Deleted image: %s" +msgstr "" + +#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 +#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 +msgid "No hosts found" +msgstr "" + +#: nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "" + +#: nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %s %s for %s" +msgstr "" + +#: nova/scheduler/simple.py:63 +msgid "All hosts have too many cores" +msgstr "" + +#: nova/scheduler/simple.py:95 +msgid "All hosts have too many gigabytes" +msgstr "" + +#: nova/scheduler/simple.py:115 +msgid "All hosts have too many networks" +msgstr "" + +#: nova/tests/test_cloud.py:198 +msgid "Can't test instances without a real virtual env." +msgstr "" + +#: nova/tests/test_cloud.py:210 +#, python-format +msgid "Need to watch instance %s until it's running..." +msgstr "" + +#: nova/tests/test_compute.py:104 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: nova/tests/test_compute.py:110 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: nova/tests/test_rpc.py:89 +#, python-format +msgid "Nested received %s, %s" +msgstr "" + +#: nova/tests/test_rpc.py:94 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#, python-format +msgid "Received %s" +msgstr "" + +#: nova/tests/test_volume.py:162 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: nova/virt/fake.py:210 +#, python-format +msgid "Instance %s Not Found" +msgstr "" + +#: nova/virt/hyperv.py:118 +msgid "In init host" +msgstr "" + +#: nova/virt/hyperv.py:131 +#, python-format +msgid "Attempt to create duplicate vm %s" +msgstr "" + +#: nova/virt/hyperv.py:148 +#, python-format +msgid "Starting VM %s " +msgstr "" + +#: nova/virt/hyperv.py:150 +#, python-format +msgid "Started VM %s " +msgstr "" + +#: nova/virt/hyperv.py:152 +#, python-format +msgid "spawn vm failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:169 +#, python-format +msgid "Failed to create VM %s" +msgstr "" + +#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 +#, python-format +msgid "Created VM %s..." +msgstr "" + +#: nova/virt/hyperv.py:188 +#, python-format +msgid "Set memory for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:198 +#, python-format +msgid "Set vcpus for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:202 +#, python-format +msgid "Creating disk for %s by attaching disk file %s" +msgstr "" + +#: nova/virt/hyperv.py:227 +#, python-format +msgid "Failed to add diskdrive to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:230 +#, python-format +msgid "New disk drive path is %s" +msgstr "" + +#: nova/virt/hyperv.py:247 +#, python-format +msgid "Failed to add vhd file to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:249 +#, python-format +msgid "Created disk for %s" +msgstr "" + +#: nova/virt/hyperv.py:253 +#, python-format +msgid "Creating nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:272 +msgid "Failed creating a port on the external vswitch" +msgstr "" + +#: nova/virt/hyperv.py:273 +#, python-format +msgid "Failed creating port for %s" +msgstr "" + +#: nova/virt/hyperv.py:275 +#, python-format +msgid "Created switch port %s on switch %s" +msgstr "" + +#: nova/virt/hyperv.py:285 +#, python-format +msgid "Failed to add nic to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:287 +#, python-format +msgid "Created nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:320 +#, python-format +msgid "WMI job failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:322 +#, python-format +msgid "WMI job succeeded: %s, Elapsed=%s " +msgstr "" + +#: nova/virt/hyperv.py:358 +#, python-format +msgid "Got request to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:383 +#, python-format +msgid "Failed to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:389 +#, python-format +msgid "Del: disk %s vm %s" +msgstr "" + +#: nova/virt/hyperv.py:405 +#, python-format +msgid "" +"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " +"cpu_time=%s" +msgstr "" + +#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#, python-format +msgid "duplicate name found: %s" +msgstr "" + +#: nova/virt/hyperv.py:444 +#, python-format +msgid "Successfully changed vm state of %s to %s" +msgstr "" + +#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#, python-format +msgid "Failed to change vm state of %s to %s" +msgstr "" + +#: nova/virt/images.py:70 +#, python-format +msgid "Finished retreving %s -- placed in %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:144 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:157 +msgid "Connection to libvirt broke" +msgstr "" + +#: nova/virt/libvirt_conn.py:229 +#, python-format +msgid "instance %s: deleting instance files %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:271 +#, python-format +msgid "No disk at %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:278 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "" + +#: nova/virt/libvirt_conn.py:294 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: nova/virt/libvirt_conn.py:297 +#, python-format +msgid "_wait_for_reboot failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:340 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: nova/virt/libvirt_conn.py:343 +#, python-format +msgid "_wait_for_rescue failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:370 +#, python-format +msgid "instance %s: is running" +msgstr "" + +#: nova/virt/libvirt_conn.py:381 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 +#, python-format +msgid "instance %s: failed to boot" +msgstr "" + +#: nova/virt/libvirt_conn.py:395 +#, python-format +msgid "virsh said: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:399 +msgid "cool, it's a device" +msgstr "" + +#: nova/virt/libvirt_conn.py:407 +#, python-format +msgid "data: %r, fpath: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:415 +#, python-format +msgid "Contents of file %s: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:449 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: nova/virt/libvirt_conn.py:505 +#, python-format +msgid "instance %s: injecting key into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:508 +#, python-format +msgid "instance %s: injecting net into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:516 +#, python-format +msgid "instance %s: ignoring error injecting data into image %s (%s)" +msgstr "" + +#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: nova/virt/libvirt_conn.py:589 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: nova/virt/xenapi_conn.py:113 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" +msgstr "" + +#: nova/virt/xenapi_conn.py:263 +#, python-format +msgid "Task [%s] %s status: success %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:271 +#, python-format +msgid "Task [%s] %s status: %s %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:72 +#, python-format +msgid "%s: _db_content => %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 +#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 +msgid "Raising NotImplemented" +msgstr "" + +#: nova/virt/xenapi/fake.py:249 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:283 +#, python-format +msgid "Calling %s %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:288 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:340 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:127 +#, python-format +msgid "Created VM %s as %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:147 +#, python-format +msgid "Creating VBD for VM %s, VDI %s ... " +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:149 +#, python-format +msgid "Created VBD %s for VM %s, VDI %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:165 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:175 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:202 +#, python-format +msgid "Creating VIF for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:205 +#, python-format +msgid "Created VIF %s for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:216 +#, python-format +msgid "Snapshotting VM %s with label '%s'..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:229 +#, python-format +msgid "Created snapshot %s from VM %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:243 +#, python-format +msgid "Asking xapi to upload %s as '%s'" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:261 +#, python-format +msgid "Asking xapi to fetch %s as %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:279 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:290 +#, python-format +msgid "PV Kernel in VDI:%d" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:318 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:331 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:333 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:390 +#, python-format +msgid "VHD %s has parent %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:407 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:448 +#, python-format +msgid "No VDIs found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:452 +#, python-format +msgid "Unexpected number of VDIs (%s) found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:62 +#, python-format +msgid "Attempted to create non-unique name %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:99 +#, python-format +msgid "Starting VM %s..." +msgstr "" + +#: nova/virt/xenapi/vmops.py:101 +#, python-format +msgid "Spawning VM %s created %s." +msgstr "" + +#: nova/virt/xenapi/vmops.py:112 +#, python-format +msgid "Instance %s: booted" +msgstr "" + +#: nova/virt/xenapi/vmops.py:137 +#, python-format +msgid "Instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:166 +#, python-format +msgid "Starting snapshot for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:174 +#, python-format +msgid "Unable to Snapshot %s: %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:184 +#, python-format +msgid "Finished snapshot and upload for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:252 +#, python-format +msgid "suspend: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:262 +#, python-format +msgid "resume: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:271 +#, python-format +msgid "Instance not found %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:57 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:74 +#, python-format +msgid "Introduced %s as %s." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:90 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:96 +#, python-format +msgid "Forgetting SR %s ... " +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:101 +#, python-format +msgid "Ignoring exception %s when getting PBDs for %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:107 +#, python-format +msgid "Ignoring exception %s when unplugging PBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:111 +#, python-format +msgid "Forgetting SR %s done." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:113 +#, python-format +msgid "Ignoring exception %s when forgetting SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:123 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:128 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:146 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:175 +#, python-format +msgid "Unable to obtain target information %s, %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:197 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %s, %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Unable to use SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:95 +#, python-format +msgid "Mountpoint %s attached to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:106 +#, python-format +msgid "Detach_volume: %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:113 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:128 +#, python-format +msgid "Mountpoint %s detached from instance %s" +msgstr "" + +#: nova/volume/api.py:44 +#, python-format +msgid "Quota exceeeded for %s, tried to create %sG volume" +msgstr "" + +#: nova/volume/api.py:46 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %s" +msgstr "" + +#: nova/volume/api.py:70 nova/volume/api.py:95 +msgid "Volume status must be available" +msgstr "" + +#: nova/volume/api.py:97 +msgid "Volume is already attached" +msgstr "" + +#: nova/volume/api.py:103 +msgid "Volume is already detached" +msgstr "" + +#: nova/volume/driver.py:76 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: nova/volume/driver.py:85 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: nova/volume/driver.py:210 +#, python-format +msgid "FAKE AOE: %s" +msgstr "" + +#: nova/volume/driver.py:315 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: nova/volume/manager.py:85 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: nova/volume/manager.py:93 +#, python-format +msgid "volume %s: creating" +msgstr "Volume %s: wird erstellt" + +#: nova/volume/manager.py:102 +#, python-format +msgid "volume %s: creating lv of size %sG" +msgstr "Volume %s: erstelle LV mit %sG" + +#: nova/volume/manager.py:106 +#, python-format +msgid "volume %s: creating export" +msgstr "Volume %s: erstelle Export" + +#: nova/volume/manager.py:113 +#, python-format +msgid "volume %s: created successfully" +msgstr "Volume %s: erfolgreich erstellt" + +#: nova/volume/manager.py:121 +msgid "Volume is still attached" +msgstr "" + +#: nova/volume/manager.py:123 +msgid "Volume is not local to this node" +msgstr "" + +#: nova/volume/manager.py:124 +#, python-format +msgid "volume %s: removing export" +msgstr "Volume %s: entferne Export" + +#: nova/volume/manager.py:126 +#, python-format +msgid "volume %s: deleting" +msgstr "Volume %s: wird entfernt" + +#: nova/volume/manager.py:129 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "Volume %s: erfolgreich entfernt" diff --git a/po/es.po b/po/es.po new file mode 100644 index 000000000..8d4f90b26 --- /dev/null +++ b/po/es.po @@ -0,0 +1,2177 @@ +# Spanish translation for nova +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the nova package. +# FIRST AUTHOR <EMAIL@ADDRESS>, 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: nova\n" +"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n" +"POT-Creation-Date: 2011-01-10 11:25-0800\n" +"PO-Revision-Date: 2011-01-18 14:56+0000\n" +"Last-Translator: Javier Turégano <Unknown>\n" +"Language-Team: Spanish <es@li.org>\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Launchpad-Export-Date: 2011-02-05 05:36+0000\n" +"X-Generator: Launchpad (build 12177)\n" + +#: nova/crypto.py:46 +msgid "Filename of root CA" +msgstr "Nombre de fichero de la CA raíz" + +#: nova/crypto.py:49 +msgid "Filename of private key" +msgstr "Nombre de fichero de la clave privada" + +#: nova/crypto.py:51 +msgid "Filename of root Certificate Revokation List" +msgstr "Nombre de fichero de la lista de certificados de revocación raíz" + +#: nova/crypto.py:53 +msgid "Where we keep our keys" +msgstr "Donde guardamos nuestras claves" + +#: nova/crypto.py:55 +msgid "Where we keep our root CA" +msgstr "Dónde guardamos nuestra CA raíz" + +#: nova/crypto.py:57 +msgid "Should we use a CA for each project?" +msgstr "¿Deberíamos usar una CA para cada proyecto?" + +#: nova/crypto.py:61 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" +"Sujeto (Subject) para el certificado de usuarios, %s para el proyecto, " +"usuario, marca de tiempo" + +#: nova/crypto.py:66 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "" +"Sujeto (Subject) para el certificado del proyecto, %s para el proyecto, " +"marca de tiempo" + +#: nova/crypto.py:71 +#, python-format +msgid "Subject for certificate for vpns, %s for project, timestamp" +msgstr "" +"Sujeto (Subject) para el certificado para vpns, %s para el proyecto, marca " +"de tiempo" + +#: nova/crypto.py:258 +#, python-format +msgid "Flags path: %s" +msgstr "" + +#: nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "Sucedió un error inesperado mientras el comando se ejecutaba." + +#: nova/exception.py:36 +#, python-format +msgid "" +"%s\n" +"Command: %s\n" +"Exit code: %s\n" +"Stdout: %r\n" +"Stderr: %r" +msgstr "" +"%s\n" +"Comando: %s\n" +"Código de salida: %s\n" +"Stdout: %s\n" +"Stderr: %r" + +#: nova/exception.py:86 +msgid "Uncaught exception" +msgstr "Excepción no controlada" + +#: nova/fakerabbit.py:48 +#, python-format +msgid "(%s) publish (key: %s) %s" +msgstr "(%s) públicar (clave: %s) %s" + +#: nova/fakerabbit.py:53 +#, python-format +msgid "Publishing to route %s" +msgstr "Publicando la ruta %s" + +#: nova/fakerabbit.py:83 +#, python-format +msgid "Declaring queue %s" +msgstr "Declarando cola %s" + +#: nova/fakerabbit.py:89 +#, python-format +msgid "Declaring exchange %s" +msgstr "Declarando intercambio %s" + +#: nova/fakerabbit.py:95 +#, python-format +msgid "Binding %s to %s with key %s" +msgstr "Asociando %s a %s con clave %s" + +#: nova/fakerabbit.py:120 +#, python-format +msgid "Getting from %s: %s" +msgstr "Obteniendo desde %s: %s" + +#: nova/rpc.py:92 +#, python-format +msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +msgstr "" +"El servidor AMQP en %s:%d no se puede alcanzar. Se reintentará en %d " +"segundos." + +#: nova/rpc.py:99 +#, python-format +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "" +"Imposible conectar al servidor AMQP después de %d intentos. Apagando." + +#: nova/rpc.py:118 +msgid "Reconnected to queue" +msgstr "Reconectado a la cola" + +#: nova/rpc.py:125 +msgid "Failed to fetch message from queue" +msgstr "Fallo al obtener el mensaje de la cola" + +#: nova/rpc.py:155 +#, python-format +msgid "Initing the Adapter Consumer for %s" +msgstr "" + +#: nova/rpc.py:170 +#, python-format +msgid "received %s" +msgstr "recibido %s" + +#: nova/rpc.py:183 +#, python-format +msgid "no method for message: %s" +msgstr "no hay método para el mensaje: %s" + +#: nova/rpc.py:184 +#, python-format +msgid "No method for message: %s" +msgstr "No hay método para el mensaje: %s" + +#: nova/rpc.py:245 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: nova/rpc.py:286 +#, python-format +msgid "unpacked context: %s" +msgstr "contenido desempaquetado: %s" + +#: nova/rpc.py:305 +msgid "Making asynchronous call..." +msgstr "Haciendo una llamada asíncrona..." + +#: nova/rpc.py:308 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID es %s" + +#: nova/rpc.py:356 +#, python-format +msgid "response %s" +msgstr "respuesta %s" + +#: nova/rpc.py:365 +#, python-format +msgid "topic is %s" +msgstr "" + +#: nova/rpc.py:366 +#, python-format +msgid "message %s" +msgstr "mensaje %s" + +#: nova/service.py:157 +#, python-format +msgid "Starting %s node" +msgstr "Inciando nodo %s" + +#: nova/service.py:169 +msgid "Service killed that has no database entry" +msgstr "Se detuvo un servicio sin entrada en la base de datos" + +#: nova/service.py:190 +msgid "The service database object disappeared, Recreating it." +msgstr "El servicio objeto de base de datos ha desaparecido, recreándolo." + +#: nova/service.py:202 +msgid "Recovered model server connection!" +msgstr "Recuperada la conexión al servidor de modelos." + +#: nova/service.py:208 +msgid "model server went away" +msgstr "el servidor de modelos se ha ido" + +#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#, python-format +msgid "Data store %s is unreachable. Trying again in %d seconds." +msgstr "" +"El almacen de datos %s es inalcanzable. Reintentandolo en %d segundos." + +#: nova/service.py:232 nova/twistd.py:232 +#, python-format +msgid "Serving %s" +msgstr "Sirviendo %s" + +#: nova/service.py:234 nova/twistd.py:264 +msgid "Full set of FLAGS:" +msgstr "Conjunto completo de opciones:" + +#: nova/twistd.py:211 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "el pidfile %s no existe. ¿No estará el demonio parado?\n" + +#: nova/twistd.py:268 +#, python-format +msgid "Starting %s" +msgstr "Comenzando %s" + +#: nova/utils.py:53 +#, python-format +msgid "Inner Exception: %s" +msgstr "Excepción interna: %s" + +#: nova/utils.py:54 +#, python-format +msgid "Class %s cannot be found" +msgstr "La clase %s no ha podido ser encontrada." + +#: nova/utils.py:113 +#, python-format +msgid "Fetching %s" +msgstr "Obteniendo %s" + +#: nova/utils.py:125 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Ejecutando cmd (subprocesos): %s" + +#: nova/utils.py:138 +#, python-format +msgid "Result was %s" +msgstr "El resultado fue %s" + +#: nova/utils.py:171 +#, python-format +msgid "debug in callback: %s" +msgstr "Depuración de la devolución de llamada: %s" + +#: nova/utils.py:176 +#, python-format +msgid "Running %s" +msgstr "Ejecutando %s" + +#: nova/utils.py:207 +#, python-format +msgid "Couldn't get IP, using 127.0.0.1 %s" +msgstr "No puedo obtener IP, usando 127.0.0.1 %s" + +#: nova/utils.py:289 +#, python-format +msgid "Invalid backend: %s" +msgstr "backend inválido: %s" + +#: nova/utils.py:300 +#, python-format +msgid "backend %s" +msgstr "backend %s" + +#: nova/api/ec2/__init__.py:133 +msgid "Too many failed authentications." +msgstr "Demasiados intentos de autenticacion fallidos." + +#: nova/api/ec2/__init__.py:142 +#, python-format +msgid "" +"Access key %s has had %d failed authentications and will be locked out for " +"%d minutes." +msgstr "" +"La clave de acceso %s ha tenido %d fallos de autenticación y se bloqueará " +"por %d minutos." + +#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#, python-format +msgid "Authentication Failure: %s" +msgstr "Fallo de autenticación: %s" + +#: nova/api/ec2/__init__.py:190 +#, python-format +msgid "Authenticated Request For %s:%s)" +msgstr "Solicitud de autenticación para %s:%s" + +#: nova/api/ec2/__init__.py:227 +#, python-format +msgid "action: %s" +msgstr "acción: %s" + +#: nova/api/ec2/__init__.py:229 +#, python-format +msgid "arg: %s\t\tval: %s" +msgstr "arg: %s \t \t val: %s" + +#: nova/api/ec2/__init__.py:301 +#, python-format +msgid "Unauthorized request for controller=%s and action=%s" +msgstr "Solicitud no autorizada para controller=%s y action=%s" + +#: nova/api/ec2/__init__.py:339 +#, python-format +msgid "NotFound raised: %s" +msgstr "No encontrado: %s" + +#: nova/api/ec2/__init__.py:342 +#, python-format +msgid "ApiError raised: %s" +msgstr "Sucedió un ApiError: %s" + +#: nova/api/ec2/__init__.py:349 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "Sucedió un error inexperado: %s" + +#: nova/api/ec2/__init__.py:354 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" +"Ha sucedido un error desconocido. Por favor repite el intento de nuevo." + +#: nova/api/ec2/admin.py:84 +#, python-format +msgid "Creating new user: %s" +msgstr "Creando nuevo usuario: %s" + +#: nova/api/ec2/admin.py:92 +#, python-format +msgid "Deleting user: %s" +msgstr "Eliminando usuario: %s" + +#: nova/api/ec2/admin.py:114 +#, python-format +msgid "Adding role %s to user %s for project %s" +msgstr "Añadiendo rol %s al usuario %s para el proyecto %s" + +#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#, python-format +msgid "Adding sitewide role %s to user %s" +msgstr "Añadiendo rol global %s al usuario %s" + +#: nova/api/ec2/admin.py:122 +#, python-format +msgid "Removing role %s from user %s for project %s" +msgstr "Eliminando rol %s del usuario %s para el proyecto %s" + +#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#, python-format +msgid "Removing sitewide role %s from user %s" +msgstr "Eliminando rol global %s del usuario %s" + +#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 +msgid "operation must be add or remove" +msgstr "la operación debe ser añadir o eliminar" + +#: nova/api/ec2/admin.py:142 +#, python-format +msgid "Getting x509 for user: %s on project: %s" +msgstr "Obteniendo x509 para el usuario: %s en el proyecto %s" + +#: nova/api/ec2/admin.py:159 +#, python-format +msgid "Create project %s managed by %s" +msgstr "Creación del proyecto %s gestionada por %s" + +#: nova/api/ec2/admin.py:170 +#, python-format +msgid "Delete project: %s" +msgstr "Borrar proyecto: %s" + +#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#, python-format +msgid "Adding user %s to project %s" +msgstr "Añadiendo usuario %s al proyecto %s" + +#: nova/api/ec2/admin.py:188 +#, python-format +msgid "Removing user %s from project %s" +msgstr "Eliminando usuario %s del proyecto %s" + +#: nova/api/ec2/apirequest.py:95 +#, python-format +msgid "Unsupported API request: controller = %s,action = %s" +msgstr "Solicitud de API no soportada: controller=%s,action=%s" + +#: nova/api/ec2/cloud.py:117 +#, python-format +msgid "Generating root CA: %s" +msgstr "Generando CA raiz: %s" + +#: nova/api/ec2/cloud.py:277 +#, python-format +msgid "Create key pair %s" +msgstr "Creando par de claves %s" + +#: nova/api/ec2/cloud.py:285 +#, python-format +msgid "Delete key pair %s" +msgstr "Borrar para de claves %s" + +#: nova/api/ec2/cloud.py:357 +#, python-format +msgid "%s is not a valid ipProtocol" +msgstr "%s no es un ipProtocol valido" + +#: nova/api/ec2/cloud.py:361 +msgid "Invalid port range" +msgstr "Rango de puerto inválido" + +#: nova/api/ec2/cloud.py:392 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "Revocar ingreso al grupo de seguridad %s" + +#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 +msgid "No rule for the specified parameters." +msgstr "No hay regla para los parámetros especificados." + +#: nova/api/ec2/cloud.py:421 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "Autorizar ingreso al grupo de seguridad %s" + +#: nova/api/ec2/cloud.py:432 +#, python-format +msgid "This rule already exists in group %s" +msgstr "Esta regla ya existe en el grupo %s" + +#: nova/api/ec2/cloud.py:460 +#, python-format +msgid "Create Security Group %s" +msgstr "Crear Grupo de Seguridad %s" + +#: nova/api/ec2/cloud.py:463 +#, python-format +msgid "group %s already exists" +msgstr "el grupo %s ya existe" + +#: nova/api/ec2/cloud.py:475 +#, python-format +msgid "Delete security group %s" +msgstr "Borrar grupo de seguridad %s" + +#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 +#, python-format +msgid "Get console output for instance %s" +msgstr "Obtener salida de la consola para la instancia %s" + +#: nova/api/ec2/cloud.py:543 +#, python-format +msgid "Create volume of %s GB" +msgstr "Crear volumen de %s GB" + +#: nova/api/ec2/cloud.py:567 +#, python-format +msgid "Attach volume %s to instacne %s at %s" +msgstr "Asociar volumen %s a la instancia %s en %s" + +#: nova/api/ec2/cloud.py:579 +#, python-format +msgid "Detach volume %s" +msgstr "Desasociar volumen %s" + +#: nova/api/ec2/cloud.py:686 +msgid "Allocate address" +msgstr "Asignar dirección" + +#: nova/api/ec2/cloud.py:691 +#, python-format +msgid "Release address %s" +msgstr "Liberar dirección %s" + +#: nova/api/ec2/cloud.py:696 +#, python-format +msgid "Associate address %s to instance %s" +msgstr "Asociar dirección %s a la instancia %s" + +#: nova/api/ec2/cloud.py:703 +#, python-format +msgid "Disassociate address %s" +msgstr "Desasociar dirección %s" + +#: nova/api/ec2/cloud.py:730 +msgid "Going to start terminating instances" +msgstr "Se va a iniciar la finalización de las instancias" + +#: nova/api/ec2/cloud.py:738 +#, python-format +msgid "Reboot instance %r" +msgstr "Reiniciar instancia %r" + +#: nova/api/ec2/cloud.py:775 +#, python-format +msgid "De-registering image %s" +msgstr "Des-registrando la imagen %s" + +#: nova/api/ec2/cloud.py:783 +#, python-format +msgid "Registered image %s with id %s" +msgstr "Registrada imagen %s con id %s" + +#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#, python-format +msgid "attribute not supported: %s" +msgstr "atributo no soportado: %s" + +#: nova/api/ec2/cloud.py:794 +#, python-format +msgid "invalid id: %s" +msgstr "id no valido: %s" + +#: nova/api/ec2/cloud.py:807 +msgid "user or group not specified" +msgstr "usuario o grupo no especificado" + +#: nova/api/ec2/cloud.py:809 +msgid "only group \"all\" is supported" +msgstr "sólo el grupo \"all\" está soportado" + +#: nova/api/ec2/cloud.py:811 +msgid "operation_type must be add or remove" +msgstr "operation_type debe ser añadir o eliminar" + +#: nova/api/ec2/cloud.py:812 +#, python-format +msgid "Updating image %s publicity" +msgstr "Actualizando imagen %s públicamente" + +#: nova/api/ec2/metadatarequesthandler.py:75 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "Fallo al generar metadatos para la ip %s" + +#: nova/api/openstack/__init__.py:70 +#, python-format +msgid "Caught error: %s" +msgstr "Capturado error: %s" + +#: nova/api/openstack/__init__.py:86 +msgid "Including admin operations in API." +msgstr "Incluyendo operaciones de administración in API." + +#: nova/api/openstack/servers.py:184 +#, python-format +msgid "Compute.api::lock %s" +msgstr "Compute.api::lock %s" + +#: nova/api/openstack/servers.py:199 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "Compute.api::unlock %s" + +#: nova/api/openstack/servers.py:213 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "Compute.api::get_lock %s" + +#: nova/api/openstack/servers.py:224 +#, python-format +msgid "Compute.api::pause %s" +msgstr "Compute.api::pause %s" + +#: nova/api/openstack/servers.py:235 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "Compute.api::unpause %s" + +#: nova/api/openstack/servers.py:246 +#, python-format +msgid "compute.api::suspend %s" +msgstr "compute.api::suspend %s" + +#: nova/api/openstack/servers.py:257 +#, python-format +msgid "compute.api::resume %s" +msgstr "compute.api::resume %s" + +#: nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" +msgstr "El usuario %s ya existe" + +#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "El proyecto no puede ser creado porque el administrador %s no existe" + +#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#, python-format +msgid "Project can't be created because project %s already exists" +msgstr "El proyecto no puede ser creado porque el proyecto %s ya existe" + +#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#, python-format +msgid "Project can't be modified because manager %s doesn't exist" +msgstr "" +"El proyecto no puede ser modificado porque el administrador %s no existe" + +#: nova/auth/dbdriver.py:245 +#, python-format +msgid "User \"%s\" not found" +msgstr "No se ha encontrado el usuario \"%s\"" + +#: nova/auth/dbdriver.py:248 +#, python-format +msgid "Project \"%s\" not found" +msgstr "No se ha encontrado el proyecto \"%s\"" + +#: nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "Intento de instanciar sigleton" + +#: nova/auth/ldapdriver.py:181 +#, python-format +msgid "LDAP object for %s doesn't exist" +msgstr "El objeto LDAP para %s no existe" + +#: nova/auth/ldapdriver.py:218 +#, python-format +msgid "Project can't be created because user %s doesn't exist" +msgstr "El proyecto no puede ser creado porque el usuario %s no existe" + +#: nova/auth/ldapdriver.py:478 +#, python-format +msgid "User %s is already a member of the group %s" +msgstr "El usuario %s ya es miembro de el grupo %s" + +#: nova/auth/ldapdriver.py:507 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" +"Se ha intentado eliminar el último miembro de un grupo. Eliminando el grupo " +"%s en su lugar." + +#: nova/auth/ldapdriver.py:528 +#, python-format +msgid "Group at dn %s doesn't exist" +msgstr "El grupo con dn %s no existe" + +#: nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" +msgstr "Buscando usuario: %r" + +#: nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "Fallo de autorización para la clave de acceso %s" + +#: nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" +msgstr "No se ha encontrado usuario para la clave de acceso %s" + +#: nova/auth/manager.py:270 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "Utilizando nombre de proyecto = nombre de usuario (%s)" + +#: nova/auth/manager.py:275 +#, python-format +msgid "failed authorization: no project named %s (user=%s)" +msgstr "" +"fallo de autorización: no existe proyecto con el nombre %s (usuario=%s)" + +#: nova/auth/manager.py:277 +#, python-format +msgid "No project called %s could be found" +msgstr "No se ha podido encontrar un proyecto con nombre %s" + +#: nova/auth/manager.py:281 +#, python-format +msgid "Failed authorization: user %s not admin and not member of project %s" +msgstr "" +"Fallo de autorización: el usuario %s no es administrador y no es miembro del " +"proyecto %s" + +#: nova/auth/manager.py:283 +#, python-format +msgid "User %s is not a member of project %s" +msgstr "El usuario %s no es miembro del proyecto %s" + +#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#, python-format +msgid "Invalid signature for user %s" +msgstr "Firma invalida para el usuario %s" + +#: nova/auth/manager.py:293 nova/auth/manager.py:304 +msgid "Signature does not match" +msgstr "Las firmas no concuerdan" + +#: nova/auth/manager.py:374 +msgid "Must specify project" +msgstr "Debes especificar un proyecto" + +#: nova/auth/manager.py:408 +#, python-format +msgid "The %s role can not be found" +msgstr "El rol %s no se ha podido encontrar" + +#: nova/auth/manager.py:410 +#, python-format +msgid "The %s role is global only" +msgstr "El rol %s es únicamente global" + +#: nova/auth/manager.py:412 +#, python-format +msgid "Adding role %s to user %s in project %s" +msgstr "Añadiendo rol %s al usuario %s en el proyecto %s" + +#: nova/auth/manager.py:438 +#, python-format +msgid "Removing role %s from user %s on project %s" +msgstr "Eliminando rol %s al usuario %s en el proyecto %s" + +#: nova/auth/manager.py:505 +#, python-format +msgid "Created project %s with manager %s" +msgstr "Proyecto %s creado con administrador %s" + +#: nova/auth/manager.py:523 +#, python-format +msgid "modifying project %s" +msgstr "modificando proyecto %s" + +#: nova/auth/manager.py:553 +#, python-format +msgid "Remove user %s from project %s" +msgstr "Eliminar usuario %s del proyecto %s" + +#: nova/auth/manager.py:581 +#, python-format +msgid "Deleting project %s" +msgstr "Eliminando proyecto %s" + +#: nova/auth/manager.py:637 +#, python-format +msgid "Created user %s (admin: %r)" +msgstr "Creado usuario %s (administrador: %r)" + +#: nova/auth/manager.py:645 +#, python-format +msgid "Deleting user %s" +msgstr "Eliminando usuario %s" + +#: nova/auth/manager.py:655 +#, python-format +msgid "Access Key change for user %s" +msgstr "Cambio de clave de acceso para el usuario %s" + +#: nova/auth/manager.py:657 +#, python-format +msgid "Secret Key change for user %s" +msgstr "Cambio de clave secreta para el usuario %s" + +#: nova/auth/manager.py:659 +#, python-format +msgid "Admin status set to %r for user %s" +msgstr "El estado del administrador se ha fijado a %r para el usuario %s" + +#: nova/auth/manager.py:708 +#, python-format +msgid "No vpn data for project %s" +msgstr "No hay datos vpn para el proyecto %s" + +#: nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "" + +#: nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "Red a insertar en la configuración de openvpn" + +#: nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "Mascara de red a insertar en la configuración de openvpn" + +#: nova/cloudpipe/pipelib.py:97 +#, python-format +msgid "Launching VPN for %s" +msgstr "Lanzando VPN para %s" + +#: nova/compute/api.py:67 +#, python-format +msgid "Instance %d was not found in get_network_topic" +msgstr "La instancia %d no se ha encontrado en get_network_topic" + +#: nova/compute/api.py:73 +#, python-format +msgid "Instance %d has no host" +msgstr "La instancia %d no tiene host" + +#: nova/compute/api.py:92 +#, python-format +msgid "Quota exceeeded for %s, tried to run %s instances" +msgstr "Quota superada por %s, intentando lanzar %s instancias" + +#: nova/compute/api.py:94 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." +msgstr "" +"Quota de instancias superada. Sólo puedes ejecutar %s instancias más de este " +"tipo." + +#: nova/compute/api.py:109 +msgid "Creating a raw instance" +msgstr "Creando una instancia raw" + +#: nova/compute/api.py:156 +#, python-format +msgid "Going to run %s instances..." +msgstr "Vamos a ejecutar %s insntacias..." + +#: nova/compute/api.py:180 +#, python-format +msgid "Casting to scheduler for %s/%s's instance %s" +msgstr "Llamando al planificar para %s/%s insntancia %s" + +#: nova/compute/api.py:279 +#, python-format +msgid "Going to try and terminate %s" +msgstr "Se va a probar y terminar %s" + +#: nova/compute/api.py:283 +#, python-format +msgid "Instance %d was not found during terminate" +msgstr "La instancia %d no se ha encontrado durante la finalización" + +#: nova/compute/api.py:288 +#, python-format +msgid "Instance %d is already being terminated" +msgstr "La instancia %d ha sido finalizada" + +#: nova/compute/api.py:450 +#, python-format +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "" +"El dispositivo especificado no es válido: %s. Ejemplo de dispositivo: " +"/dev/vdb" + +#: nova/compute/api.py:465 +msgid "Volume isn't attached to anything!" +msgstr "¡El volumen no está unido a nada!" + +#: nova/compute/disk.py:71 +#, python-format +msgid "Input partition size not evenly divisible by sector size: %d / %d" +msgstr "" +"El tamaño de la partición de entrada no es divisible de forma uniforme por " +"el tamaño del sector: %d / %d" + +#: nova/compute/disk.py:75 +#, python-format +msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +msgstr "" +"Los bytes del almacenamiento local no son divisibles de forma uniforme por " +"el tamaño del sector: %d / %d" + +#: nova/compute/disk.py:128 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "No se puede unir la imagen con el loopback: %s" + +#: nova/compute/disk.py:136 +#, python-format +msgid "Failed to load partition: %s" +msgstr "Fallo al cargar la partición: %s" + +#: nova/compute/disk.py:158 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "Fallo al montar el sistema de ficheros: %s" + +#: nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "Tipo de instancia desconocido: %s" + +#: nova/compute/manager.py:69 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "check_instance_lock: decorating: |%s|" + +#: nova/compute/manager.py:71 +#, python-format +msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +msgstr "check_instance_lock: arguments: |%s| |%s| |%s|" + +#: nova/compute/manager.py:75 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "check_instance_lock: locked: |%s|" + +#: nova/compute/manager.py:77 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "check_instance_lock: admin: |%s|" + +#: nova/compute/manager.py:82 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "check_instance_lock: ejecutando: |%s|" + +#: nova/compute/manager.py:86 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "check_instance_lock: no ejecutando |%s|" + +#: nova/compute/manager.py:157 +msgid "Instance has already been created" +msgstr "La instancia ha sido creada previamente" + +#: nova/compute/manager.py:158 +#, python-format +msgid "instance %s: starting..." +msgstr "instancia %s: iniciando..." + +#: nova/compute/manager.py:197 +#, python-format +msgid "instance %s: Failed to spawn" +msgstr "Instancia %s: no se pudo iniciar" + +#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 +#, python-format +msgid "Terminating instance %s" +msgstr "Finalizando la instancia %s" + +#: nova/compute/manager.py:217 +#, python-format +msgid "Disassociating address %s" +msgstr "Desasociando la dirección %s" + +#: nova/compute/manager.py:230 +#, python-format +msgid "Deallocating address %s" +msgstr "Desasociando la dirección %s" + +#: nova/compute/manager.py:243 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "intentando finalizar una instancia que ya había sido finalizada: %s" + +#: nova/compute/manager.py:257 +#, python-format +msgid "Rebooting instance %s" +msgstr "Reiniciando instancia %s" + +#: nova/compute/manager.py:260 +#, python-format +msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" +"intentando reiniciar una instancia que no está en ejecución: %s (estado: %s " +"esperado: %s)" + +#: nova/compute/manager.py:286 +#, python-format +msgid "instance %s: snapshotting" +msgstr "instancia %s: creando snapshot" + +#: nova/compute/manager.py:289 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" +"intentando crear un snapshot de una instancia que no está en ejecución: %s " +"(estado: %s esperado: %s)" + +#: nova/compute/manager.py:301 +#, python-format +msgid "instance %s: rescuing" +msgstr "instancia %s: rescatando" + +#: nova/compute/manager.py:316 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: nova/compute/manager.py:335 +#, python-format +msgid "instance %s: pausing" +msgstr "instancia %s: pausando" + +#: nova/compute/manager.py:352 +#, python-format +msgid "instance %s: unpausing" +msgstr "instnacia %s: continuando tras pausa" + +#: nova/compute/manager.py:369 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "instancia %s: obteniendo los diagnosticos" + +#: nova/compute/manager.py:382 +#, python-format +msgid "instance %s: suspending" +msgstr "instancia %s: suspendiendo" + +#: nova/compute/manager.py:401 +#, python-format +msgid "instance %s: resuming" +msgstr "instancia %s: continuando" + +#: nova/compute/manager.py:420 +#, python-format +msgid "instance %s: locking" +msgstr "instancia %s: bloqueando" + +#: nova/compute/manager.py:432 +#, python-format +msgid "instance %s: unlocking" +msgstr "instancia %s: desbloqueando" + +#: nova/compute/manager.py:442 +#, python-format +msgid "instance %s: getting locked state" +msgstr "instancia %s: pasando a estado bloqueado" + +#: nova/compute/manager.py:462 +#, python-format +msgid "instance %s: attaching volume %s to %s" +msgstr "instancia %s: asociando volumen %s a %s" + +#: nova/compute/manager.py:478 +#, python-format +msgid "instance %s: attach failed %s, removing" +msgstr "instalación %s: asociación fallida %s, eliminando" + +#: nova/compute/manager.py:493 +#, python-format +msgid "Detach volume %s from mountpoint %s on instance %s" +msgstr "Desvinculando volumen %s del punto de montaje %s en la instancia %s" + +#: nova/compute/manager.py:497 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "Desvinculando volumen de instancia desconocida %s" + +#: nova/compute/monitor.py:259 +#, python-format +msgid "updating %s..." +msgstr "actualizando %s..." + +#: nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "error inesperado durante la actualización" + +#: nova/compute/monitor.py:355 +#, python-format +msgid "Cannot get blockstats for \"%s\" on \"%s\"" +msgstr "No puedo obtener estadísticas del bloque para \"%s\" en \"%s\"" + +#: nova/compute/monitor.py:377 +#, python-format +msgid "Cannot get ifstats for \"%s\" on \"%s\"" +msgstr "No puedo obtener estadísticas de la interfaz para \"%s\" en \"%s\"" + +#: nova/compute/monitor.py:412 +msgid "unexpected exception getting connection" +msgstr "excepción inexperada al obtener la conexión" + +#: nova/compute/monitor.py:427 +#, python-format +msgid "Found instance: %s" +msgstr "Encontrada interfaz: %s" + +#: nova/db/sqlalchemy/api.py:43 +msgid "Use of empty request context is deprecated" +msgstr "El uso de una petición de contexto vacía está en desuso" + +#: nova/db/sqlalchemy/api.py:132 +#, python-format +msgid "No service for id %s" +msgstr "No hay servicio para el id %s" + +#: nova/db/sqlalchemy/api.py:229 +#, python-format +msgid "No service for %s, %s" +msgstr "No hay servicio para %s, %s" + +#: nova/db/sqlalchemy/api.py:574 +#, python-format +msgid "No floating ip for address %s" +msgstr "No hay ip flotante para la dirección %s" + +#: nova/db/sqlalchemy/api.py:668 +#, python-format +msgid "No instance for id %s" +msgstr "No hay instancia con id %s" + +#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 +#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#, python-format +msgid "Instance %s not found" +msgstr "La instancia %s no se ha encontrado" + +#: nova/db/sqlalchemy/api.py:891 +#, python-format +msgid "no keypair for user %s, name %s" +msgstr "no hay par de claves para el usuario %s, nombre %s" + +#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 +#, python-format +msgid "No network for id %s" +msgstr "No hay red para el id %s" + +#: nova/db/sqlalchemy/api.py:1036 +#, python-format +msgid "No network for bridge %s" +msgstr "No hay red para el puente %s" + +#: nova/db/sqlalchemy/api.py:1050 +#, python-format +msgid "No network for instance %s" +msgstr "No hay red para la instancia %s" + +#: nova/db/sqlalchemy/api.py:1180 +#, python-format +msgid "Token %s does not exist" +msgstr "El token %s no existe" + +#: nova/db/sqlalchemy/api.py:1205 +#, python-format +msgid "No quota for project_id %s" +msgstr "No hay quota para el project:id %s" + +#: nova/db/sqlalchemy/api.py:1356 +#, python-format +msgid "No volume for id %s" +msgstr "No hay volumen para el id %s" + +#: nova/db/sqlalchemy/api.py:1401 +#, python-format +msgid "Volume %s not found" +msgstr "El volumen %s no se ha encontrado" + +#: nova/db/sqlalchemy/api.py:1413 +#, python-format +msgid "No export device found for volume %s" +msgstr "No se ha encontrado dispositivo exportado para el volumen %s" + +#: nova/db/sqlalchemy/api.py:1426 +#, python-format +msgid "No target id found for volume %s" +msgstr "No se ha encontrado id de destino para el volumen %s" + +#: nova/db/sqlalchemy/api.py:1471 +#, python-format +msgid "No security group with id %s" +msgstr "No hay un grupo de seguridad con el id %s" + +#: nova/db/sqlalchemy/api.py:1488 +#, python-format +msgid "No security group named %s for project: %s" +msgstr "No hay un grupo de seguridad con nombre %s para el proyecto: %s" + +#: nova/db/sqlalchemy/api.py:1576 +#, python-format +msgid "No secuity group rule with id %s" +msgstr "No hay una regla para el grupo de seguridad con el id %s" + +#: nova/db/sqlalchemy/api.py:1650 +#, python-format +msgid "No user for id %s" +msgstr "No hay un usuario con el id %s" + +#: nova/db/sqlalchemy/api.py:1666 +#, python-format +msgid "No user for access key %s" +msgstr "No hay un usuario para la clave de acceso %s" + +#: nova/db/sqlalchemy/api.py:1728 +#, python-format +msgid "No project with id %s" +msgstr "No hay proyecto con id %s" + +#: nova/image/glance.py:78 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images" +msgstr "Parallax ha devuelto un error HTTP %d a la petición para /images" + +#: nova/image/glance.py:97 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images/detail" +msgstr "" +"Parallax ha devuelto un error HTTP %d para la petición para /images/detail" + +#: nova/image/s3.py:82 +#, python-format +msgid "Image %s could not be found" +msgstr "La imagen %s no ha podido ser encontrada" + +#: nova/network/api.py:39 +#, python-format +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "Quota excedida para %s, intentando asignar direcciones" + +#: nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" +"La quota de direcciones ha sido excedida. No puedes asignar más direcciones" + +#: nova/network/linux_net.py:176 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "Iniciando interfaz VLAN %s" + +#: nova/network/linux_net.py:186 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "Iniciando interfaz puente para %s" + +#: nova/network/linux_net.py:254 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "Excepción al recargar la configuración de dnsmasq: %s" + +#: nova/network/linux_net.py:256 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "El pid %d está pasado, relanzando dnsmasq" + +#: nova/network/linux_net.py:334 +#, python-format +msgid "Killing dnsmasq threw %s" +msgstr "Al matar dnsmasq se lanzó %s" + +#: nova/network/manager.py:135 +msgid "setting network host" +msgstr "configurando la red del host" + +#: nova/network/manager.py:190 +#, python-format +msgid "Leasing IP %s" +msgstr "Liberando IP %s" + +#: nova/network/manager.py:194 +#, python-format +msgid "IP %s leased that isn't associated" +msgstr "" + +#: nova/network/manager.py:197 +#, python-format +msgid "IP %s leased to bad mac %s vs %s" +msgstr "IP %s asociada a una mac incorrecta %s vs %s" + +#: nova/network/manager.py:205 +#, python-format +msgid "IP %s leased that was already deallocated" +msgstr "" + +#: nova/network/manager.py:214 +#, python-format +msgid "IP %s released that isn't associated" +msgstr "" + +#: nova/network/manager.py:217 +#, python-format +msgid "IP %s released from bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:220 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: nova/network/manager.py:442 +#, python-format +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "" + +#: nova/objectstore/handler.py:106 +#, python-format +msgid "Unknown S3 value type %r" +msgstr "Tipo de valor S3 %r desconocido" + +#: nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "Petición autenticada" + +#: nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "Listado de cubos solicitado" + +#: nova/objectstore/handler.py:209 +#, python-format +msgid "List keys for bucket %s" +msgstr "Lista de claves para el cubo %s" + +#: nova/objectstore/handler.py:217 +#, python-format +msgid "Unauthorized attempt to access bucket %s" +msgstr "Intento no autorizado para acceder al cubo %s" + +#: nova/objectstore/handler.py:235 +#, python-format +msgid "Creating bucket %s" +msgstr "Creando el cubo %s" + +#: nova/objectstore/handler.py:245 +#, python-format +msgid "Deleting bucket %s" +msgstr "Eliminando el cubo %s" + +#: nova/objectstore/handler.py:249 +#, python-format +msgid "Unauthorized attempt to delete bucket %s" +msgstr "Intento no autorizado de eliminar el cubo %s" + +#: nova/objectstore/handler.py:271 +#, python-format +msgid "Getting object: %s / %s" +msgstr "Obteniendo objeto: %s / %s" + +#: nova/objectstore/handler.py:274 +#, python-format +msgid "Unauthorized attempt to get object %s from bucket %s" +msgstr "Intento no autorizado de obtener el objeto %s en el cubo %s" + +#: nova/objectstore/handler.py:292 +#, python-format +msgid "Putting object: %s / %s" +msgstr "Colocando objeto: %s / %s" + +#: nova/objectstore/handler.py:295 +#, python-format +msgid "Unauthorized attempt to upload object %s to bucket %s" +msgstr "Intento no autorizado de subir el objeto %s al cubo %s" + +#: nova/objectstore/handler.py:314 +#, python-format +msgid "Deleting object: %s / %s" +msgstr "Eliminando objeto: %s / %s" + +#: nova/objectstore/handler.py:393 +#, python-format +msgid "Not authorized to upload image: invalid directory %s" +msgstr "No autorizado para subir imagen: directorio incorrecto %s" + +#: nova/objectstore/handler.py:401 +#, python-format +msgid "Not authorized to upload image: unauthorized bucket %s" +msgstr "No autorizado para subir imagen: cubo %s no autorizado" + +#: nova/objectstore/handler.py:406 +#, python-format +msgid "Starting image upload: %s" +msgstr "Comenzando la subida de la imagen: %s" + +#: nova/objectstore/handler.py:420 +#, python-format +msgid "Not authorized to update attributes of image %s" +msgstr "No autorizado para actualizar los atributos de la imagen %s" + +#: nova/objectstore/handler.py:428 +#, python-format +msgid "Toggling publicity flag of image %s %r" +msgstr "Cambiando los atributos de publicidad de la imagen %s %r" + +#: nova/objectstore/handler.py:433 +#, python-format +msgid "Updating user fields on image %s" +msgstr "Actualizando los campos de usuario de la imagen %s" + +#: nova/objectstore/handler.py:447 +#, python-format +msgid "Unauthorized attempt to delete image %s" +msgstr "Intento no autorizado de borrar la imagen %s" + +#: nova/objectstore/handler.py:452 +#, python-format +msgid "Deleted image: %s" +msgstr "Eliminada imagen: %s" + +#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 +#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 +msgid "No hosts found" +msgstr "No se han encontrado hosts" + +#: nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "Debe de implementar un horario de reserva" + +#: nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %s %s for %s" +msgstr "" + +#: nova/scheduler/simple.py:63 +msgid "All hosts have too many cores" +msgstr "Todos los hosts tienen demasiados cores" + +#: nova/scheduler/simple.py:95 +msgid "All hosts have too many gigabytes" +msgstr "Todos los hosts tienen demasiados gigabytes" + +#: nova/scheduler/simple.py:115 +msgid "All hosts have too many networks" +msgstr "Todos los hosts tienen demasiadas redes" + +#: nova/tests/test_cloud.py:198 +msgid "Can't test instances without a real virtual env." +msgstr "No puedo probar las imágenes sin un entorno real virtual" + +#: nova/tests/test_cloud.py:210 +#, python-format +msgid "Need to watch instance %s until it's running..." +msgstr "Hay que vigilar la instancia %s hasta que este en ejecución..." + +#: nova/tests/test_compute.py:104 +#, python-format +msgid "Running instances: %s" +msgstr "Ejecutando instancias: %s" + +#: nova/tests/test_compute.py:110 +#, python-format +msgid "After terminating instances: %s" +msgstr "Después de terminar las instancias: %s" + +#: nova/tests/test_rpc.py:89 +#, python-format +msgid "Nested received %s, %s" +msgstr "" + +#: nova/tests/test_rpc.py:94 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#, python-format +msgid "Received %s" +msgstr "Recibido %s" + +#: nova/tests/test_volume.py:162 +#, python-format +msgid "Target %s allocated" +msgstr "Destino %s asignado" + +#: nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "Fallo al abrir conexión con el hypervisor" + +#: nova/virt/fake.py:210 +#, python-format +msgid "Instance %s Not Found" +msgstr "La instancia %s no ha sido encontrada" + +#: nova/virt/hyperv.py:118 +msgid "In init host" +msgstr "En el host inicial" + +#: nova/virt/hyperv.py:131 +#, python-format +msgid "Attempt to create duplicate vm %s" +msgstr "Intento de crear una vm duplicada %s" + +#: nova/virt/hyperv.py:148 +#, python-format +msgid "Starting VM %s " +msgstr "Comenzando VM %s " + +#: nova/virt/hyperv.py:150 +#, python-format +msgid "Started VM %s " +msgstr "VM %s iniciada " + +#: nova/virt/hyperv.py:152 +#, python-format +msgid "spawn vm failed: %s" +msgstr "Inicio de vm fallido: %s" + +#: nova/virt/hyperv.py:169 +#, python-format +msgid "Failed to create VM %s" +msgstr "Fallo al crear la VM %s" + +#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 +#, python-format +msgid "Created VM %s..." +msgstr "Creada VM %s..." + +#: nova/virt/hyperv.py:188 +#, python-format +msgid "Set memory for vm %s..." +msgstr "Se ha establecido la memoria para vm %s..." + +#: nova/virt/hyperv.py:198 +#, python-format +msgid "Set vcpus for vm %s..." +msgstr "Establecidas vcpus para vm %s..." + +#: nova/virt/hyperv.py:202 +#, python-format +msgid "Creating disk for %s by attaching disk file %s" +msgstr "" +"Creando disco para %s a través de la asignación del fichero de disco %s" + +#: nova/virt/hyperv.py:227 +#, python-format +msgid "Failed to add diskdrive to VM %s" +msgstr "Fallo al añadir unidad de disco a la VM %s" + +#: nova/virt/hyperv.py:230 +#, python-format +msgid "New disk drive path is %s" +msgstr "La nueva ruta para unidad de disco es %s" + +#: nova/virt/hyperv.py:247 +#, python-format +msgid "Failed to add vhd file to VM %s" +msgstr "Fallo al añadir el fichero vhd a la VM %s" + +#: nova/virt/hyperv.py:249 +#, python-format +msgid "Created disk for %s" +msgstr "Discos creados para %s" + +#: nova/virt/hyperv.py:253 +#, python-format +msgid "Creating nic for %s " +msgstr "Creando nic para %s " + +#: nova/virt/hyperv.py:272 +msgid "Failed creating a port on the external vswitch" +msgstr "Fallo al crear un puerto en el vswitch externo" + +#: nova/virt/hyperv.py:273 +#, python-format +msgid "Failed creating port for %s" +msgstr "Fallo creando puerto para %s" + +#: nova/virt/hyperv.py:275 +#, python-format +msgid "Created switch port %s on switch %s" +msgstr "Creado puerto %s en el switch %s" + +#: nova/virt/hyperv.py:285 +#, python-format +msgid "Failed to add nic to VM %s" +msgstr "Fallo al añadir nic a la VM %s" + +#: nova/virt/hyperv.py:287 +#, python-format +msgid "Created nic for %s " +msgstr "Creando nic para %s " + +#: nova/virt/hyperv.py:320 +#, python-format +msgid "WMI job failed: %s" +msgstr "Trabajo WMI falló: %s" + +#: nova/virt/hyperv.py:322 +#, python-format +msgid "WMI job succeeded: %s, Elapsed=%s " +msgstr "Trabajo WMI ha tenido exito: %s, Transcurrido=%s " + +#: nova/virt/hyperv.py:358 +#, python-format +msgid "Got request to destroy vm %s" +msgstr "Recibida solicitud para destruir vm %s" + +#: nova/virt/hyperv.py:383 +#, python-format +msgid "Failed to destroy vm %s" +msgstr "Fallo al destruir vm %s" + +#: nova/virt/hyperv.py:389 +#, python-format +msgid "Del: disk %s vm %s" +msgstr "Del: disco %s vm %s" + +#: nova/virt/hyperv.py:405 +#, python-format +msgid "" +"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " +"cpu_time=%s" +msgstr "" +"Obtenida información para vm %s: state=%s, mem=%s, num_cpu=%s, cpu_time=%s" + +#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#, python-format +msgid "duplicate name found: %s" +msgstr "se ha encontrado un nombre duplicado: %s" + +#: nova/virt/hyperv.py:444 +#, python-format +msgid "Successfully changed vm state of %s to %s" +msgstr "Cambio de estado de la vm con éxito de %s a %s" + +#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#, python-format +msgid "Failed to change vm state of %s to %s" +msgstr "Fallo al cambiar el estado de la vm de %s a %s" + +#: nova/virt/images.py:70 +#, python-format +msgid "Finished retreving %s -- placed in %s" +msgstr "Finalizada la obtención de %s -- coloado en %s" + +#: nova/virt/libvirt_conn.py:144 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "Conectando a libvirt: %s" + +#: nova/virt/libvirt_conn.py:157 +msgid "Connection to libvirt broke" +msgstr "Conexión a libvirt rota" + +#: nova/virt/libvirt_conn.py:229 +#, python-format +msgid "instance %s: deleting instance files %s" +msgstr "instancia %s: eliminando los ficheros de la instancia %s" + +#: nova/virt/libvirt_conn.py:271 +#, python-format +msgid "No disk at %s" +msgstr "No hay disco en %s" + +#: nova/virt/libvirt_conn.py:278 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "" +"El snapshotting de instancias no está soportado en libvirt en este momento" + +#: nova/virt/libvirt_conn.py:294 +#, python-format +msgid "instance %s: rebooted" +msgstr "instancia %s: reiniciada" + +#: nova/virt/libvirt_conn.py:297 +#, python-format +msgid "_wait_for_reboot failed: %s" +msgstr "_wait_for_reboot falló: %s" + +#: nova/virt/libvirt_conn.py:340 +#, python-format +msgid "instance %s: rescued" +msgstr "instancia %s: rescatada" + +#: nova/virt/libvirt_conn.py:343 +#, python-format +msgid "_wait_for_rescue failed: %s" +msgstr "_wait_for_rescue falló: %s" + +#: nova/virt/libvirt_conn.py:370 +#, python-format +msgid "instance %s: is running" +msgstr "instancia %s: está ejecutándose" + +#: nova/virt/libvirt_conn.py:381 +#, python-format +msgid "instance %s: booted" +msgstr "instancia %s: arrancada" + +#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 +#, python-format +msgid "instance %s: failed to boot" +msgstr "insntancia %s: falló al arrancar" + +#: nova/virt/libvirt_conn.py:395 +#, python-format +msgid "virsh said: %r" +msgstr "virsh dijo: %r" + +#: nova/virt/libvirt_conn.py:399 +msgid "cool, it's a device" +msgstr "genial, es un dispositivo" + +#: nova/virt/libvirt_conn.py:407 +#, python-format +msgid "data: %r, fpath: %r" +msgstr "datos: %r, fpath: %r" + +#: nova/virt/libvirt_conn.py:415 +#, python-format +msgid "Contents of file %s: %r" +msgstr "Contenidos del fichero %s: %r" + +#: nova/virt/libvirt_conn.py:449 +#, python-format +msgid "instance %s: Creating image" +msgstr "instancia %s: Creando imagen" + +#: nova/virt/libvirt_conn.py:505 +#, python-format +msgid "instance %s: injecting key into image %s" +msgstr "instancia %s: inyectando clave en la imagen %s" + +#: nova/virt/libvirt_conn.py:508 +#, python-format +msgid "instance %s: injecting net into image %s" +msgstr "instancia %s: inyectando red en la imagen %s" + +#: nova/virt/libvirt_conn.py:516 +#, python-format +msgid "instance %s: ignoring error injecting data into image %s (%s)" +msgstr "" +"instancia %s: ignorando el error al inyectar datos en la imagen %s (%s)" + +#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "instancia %s: comenzando método toXML" + +#: nova/virt/libvirt_conn.py:589 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "instancia %s: finalizado método toXML" + +#: nova/virt/xenapi_conn.py:113 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" +msgstr "" +"Debes especificar xenapi_connection_url, xenapi_connection_username " +"(opcional), y xenapi_connection_password para usar connection_type=xenapi" + +#: nova/virt/xenapi_conn.py:263 +#, python-format +msgid "Task [%s] %s status: success %s" +msgstr "Tarea [%s] %s estado: éxito %s" + +#: nova/virt/xenapi_conn.py:271 +#, python-format +msgid "Task [%s] %s status: %s %s" +msgstr "Tarea [%s] %s estado: %s %s" + +#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 +#, python-format +msgid "Got exception: %s" +msgstr "Obtenida excepción %s" + +#: nova/virt/xenapi/fake.py:72 +#, python-format +msgid "%s: _db_content => %s" +msgstr "%s: _db_content => %s" + +#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 +#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 +msgid "Raising NotImplemented" +msgstr "Lanzando NotImplemented" + +#: nova/virt/xenapi/fake.py:249 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "xenapi.fake no tiene una implementación para %s" + +#: nova/virt/xenapi/fake.py:283 +#, python-format +msgid "Calling %s %s" +msgstr "Llamando %s %s" + +#: nova/virt/xenapi/fake.py:288 +#, python-format +msgid "Calling getter %s" +msgstr "Llanado al adquiridor %s" + +#: nova/virt/xenapi/fake.py:340 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" +"xenapi.fake no tiene una implementación para %s o ha sido llamada con un " +"número incorrecto de argumentos" + +#: nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "Encontrada una red no única para el puente %s" + +#: nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "No se ha encontrado red para el puente %s" + +#: nova/virt/xenapi/vm_utils.py:127 +#, python-format +msgid "Created VM %s as %s." +msgstr "Creada VM %s cómo %s" + +#: nova/virt/xenapi/vm_utils.py:147 +#, python-format +msgid "Creating VBD for VM %s, VDI %s ... " +msgstr "Creando VBD para VM %s, VDI %s... " + +#: nova/virt/xenapi/vm_utils.py:149 +#, python-format +msgid "Created VBD %s for VM %s, VDI %s." +msgstr "Creado VBD %s for VM %s, VDI %s." + +#: nova/virt/xenapi/vm_utils.py:165 +#, python-format +msgid "VBD not found in instance %s" +msgstr "VBD no encontrado en la instancia %s" + +#: nova/virt/xenapi/vm_utils.py:175 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "Imposible desconectar VBD %s" + +#: nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "Imposible destruir VBD %s" + +#: nova/virt/xenapi/vm_utils.py:202 +#, python-format +msgid "Creating VIF for VM %s, network %s." +msgstr "Creando VIF para VM %s, red %s." + +#: nova/virt/xenapi/vm_utils.py:205 +#, python-format +msgid "Created VIF %s for VM %s, network %s." +msgstr "Creado VIF %s para VM %s, red %s." + +#: nova/virt/xenapi/vm_utils.py:216 +#, python-format +msgid "Snapshotting VM %s with label '%s'..." +msgstr "Creando snapshot de la VM %s con la etiqueta '%s'..." + +#: nova/virt/xenapi/vm_utils.py:229 +#, python-format +msgid "Created snapshot %s from VM %s." +msgstr "Creando snapshot %s de la VM %s" + +#: nova/virt/xenapi/vm_utils.py:243 +#, python-format +msgid "Asking xapi to upload %s as '%s'" +msgstr "Solicitando a xapi la subida de %s cómo %s'" + +#: nova/virt/xenapi/vm_utils.py:261 +#, python-format +msgid "Asking xapi to fetch %s as %s" +msgstr "Solicitando a xapi obtener %s cómo %s" + +#: nova/virt/xenapi/vm_utils.py:279 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "Buscando vid %s para el kernel PV" + +#: nova/virt/xenapi/vm_utils.py:290 +#, python-format +msgid "PV Kernel in VDI:%d" +msgstr "PV Kernel en VDI:%d" + +#: nova/virt/xenapi/vm_utils.py:318 +#, python-format +msgid "VDI %s is still available" +msgstr "VDI %s está todavía disponible" + +#: nova/virt/xenapi/vm_utils.py:331 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "(VM_UTILS) xenserver vm state -> |%s|" + +#: nova/virt/xenapi/vm_utils.py:333 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "(VM_UTILS) xenapi power_state -> |%s|" + +#: nova/virt/xenapi/vm_utils.py:390 +#, python-format +msgid "VHD %s has parent %s" +msgstr "VHD %s tiene cómo padre a %s" + +#: nova/virt/xenapi/vm_utils.py:407 +#, python-format +msgid "Re-scanning SR %s" +msgstr "Re-escaneando SR %s" + +#: nova/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +msgstr "" +"El padre %s no concuerda con el padre original %s, esperando la unión..." + +#: nova/virt/xenapi/vm_utils.py:448 +#, python-format +msgid "No VDIs found for VM %s" +msgstr "No se han encontrado VDI's para VM %s" + +#: nova/virt/xenapi/vm_utils.py:452 +#, python-format +msgid "Unexpected number of VDIs (%s) found for VM %s" +msgstr "Número no esperado de VDIs (%s) encontrados para VM %s" + +#: nova/virt/xenapi/vmops.py:62 +#, python-format +msgid "Attempted to create non-unique name %s" +msgstr "Intentado la creación del nombre no único %s" + +#: nova/virt/xenapi/vmops.py:99 +#, python-format +msgid "Starting VM %s..." +msgstr "Iniciando VM %s..." + +#: nova/virt/xenapi/vmops.py:101 +#, python-format +msgid "Spawning VM %s created %s." +msgstr "Iniciando VM %s creado %s." + +#: nova/virt/xenapi/vmops.py:112 +#, python-format +msgid "Instance %s: booted" +msgstr "Instancia %s: iniciada" + +#: nova/virt/xenapi/vmops.py:137 +#, python-format +msgid "Instance not present %s" +msgstr "Instancia no existente %s" + +#: nova/virt/xenapi/vmops.py:166 +#, python-format +msgid "Starting snapshot for VM %s" +msgstr "Comenzando snapshot para la VM %s" + +#: nova/virt/xenapi/vmops.py:174 +#, python-format +msgid "Unable to Snapshot %s: %s" +msgstr "Incapaz de realizar snapshot %s: %s" + +#: nova/virt/xenapi/vmops.py:184 +#, python-format +msgid "Finished snapshot and upload for VM %s" +msgstr "Finalizado el snapshot y la subida de la VM %s" + +#: nova/virt/xenapi/vmops.py:252 +#, python-format +msgid "suspend: instance not present %s" +msgstr "suspendido: instancia no encontrada: %s" + +#: nova/virt/xenapi/vmops.py:262 +#, python-format +msgid "resume: instance not present %s" +msgstr "reanudar: instancia no encontrada %s" + +#: nova/virt/xenapi/vmops.py:271 +#, python-format +msgid "Instance not found %s" +msgstr "instancia no encontrada %s" + +#: nova/virt/xenapi/volume_utils.py:57 +#, python-format +msgid "Introducing %s..." +msgstr "Introduciendo %s..." + +#: nova/virt/xenapi/volume_utils.py:74 +#, python-format +msgid "Introduced %s as %s." +msgstr "Introducido %s cómo %s." + +#: nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "Imposible crear el repositorio de almacenamiento" + +#: nova/virt/xenapi/volume_utils.py:90 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "Imposible encontrar SR en VBD %s" + +#: nova/virt/xenapi/volume_utils.py:96 +#, python-format +msgid "Forgetting SR %s ... " +msgstr "Olvidando SR %s... " + +#: nova/virt/xenapi/volume_utils.py:101 +#, python-format +msgid "Ignoring exception %s when getting PBDs for %s" +msgstr "Ignorando excepción %s al obtener PBDs de %s" + +#: nova/virt/xenapi/volume_utils.py:107 +#, python-format +msgid "Ignoring exception %s when unplugging PBD %s" +msgstr "Ignorando excepción %s al desconectar PBD %s" + +#: nova/virt/xenapi/volume_utils.py:111 +#, python-format +msgid "Forgetting SR %s done." +msgstr "Olvidando SR %s completado." + +#: nova/virt/xenapi/volume_utils.py:113 +#, python-format +msgid "Ignoring exception %s when forgetting SR %s" +msgstr "Ignorando excepción %s al olvidar SR %s" + +#: nova/virt/xenapi/volume_utils.py:123 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "Incapaz de insertar VDI en SR %s" + +#: nova/virt/xenapi/volume_utils.py:128 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "Imposible obtener copia del VDI %s en" + +#: nova/virt/xenapi/volume_utils.py:146 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "Inposible insertar VDI para SR %s" + +#: nova/virt/xenapi/volume_utils.py:175 +#, python-format +msgid "Unable to obtain target information %s, %s" +msgstr "Imposible obtener información del destino %s, %s" + +#: nova/virt/xenapi/volume_utils.py:197 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "Punto de montaje no puede ser traducido: %s" + +#: nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %s, %s, %s" +msgstr "Attach_volume: %s, %s, %s" + +#: nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %s for instance %s" +msgstr "Inpoisble crear VDI en SR %s para la instancia %s" + +#: nova/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Unable to use SR %s for instance %s" +msgstr "Imposible utilizar SR %s para la instancia %s" + +#: nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "Imposible adjuntar volumen a la instancia %s" + +#: nova/virt/xenapi/volumeops.py:95 +#, python-format +msgid "Mountpoint %s attached to instance %s" +msgstr "Punto de montaje %s unido a la instancia %s" + +#: nova/virt/xenapi/volumeops.py:106 +#, python-format +msgid "Detach_volume: %s, %s" +msgstr "Detach_volume: %s, %s" + +#: nova/virt/xenapi/volumeops.py:113 +#, python-format +msgid "Unable to locate volume %s" +msgstr "Imposible encontrar volumen %s" + +#: nova/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Unable to detach volume %s" +msgstr "Imposible desasociar volumen %s" + +#: nova/virt/xenapi/volumeops.py:128 +#, python-format +msgid "Mountpoint %s detached from instance %s" +msgstr "Punto d emontaje %s desasociado de la instancia %s" + +#: nova/volume/api.py:44 +#, python-format +msgid "Quota exceeeded for %s, tried to create %sG volume" +msgstr "Quota excedida para %s, intentando crear el volumen %sG" + +#: nova/volume/api.py:46 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %s" +msgstr "Quota de volumen superada. No puedes crear un volumen de tamaño %s" + +#: nova/volume/api.py:70 nova/volume/api.py:95 +msgid "Volume status must be available" +msgstr "El estado del volumen debe estar disponible" + +#: nova/volume/api.py:97 +msgid "Volume is already attached" +msgstr "El volumen ya está asociado previamente" + +#: nova/volume/api.py:103 +msgid "Volume is already detached" +msgstr "El volumen ya ha sido desasociado previamente" + +#: nova/volume/driver.py:76 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "Recuperandose de una ejecución fallida. Intenta el número %s" + +#: nova/volume/driver.py:85 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "el grupo de volumenes %s no existe" + +#: nova/volume/driver.py:210 +#, python-format +msgid "FAKE AOE: %s" +msgstr "Falso AOE: %s" + +#: nova/volume/driver.py:315 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "Falso ISCSI: %s" + +#: nova/volume/manager.py:85 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "Exportando de nuevo los volumenes %s" + +#: nova/volume/manager.py:93 +#, python-format +msgid "volume %s: creating" +msgstr "volumen %s: creando" + +#: nova/volume/manager.py:102 +#, python-format +msgid "volume %s: creating lv of size %sG" +msgstr "volumen %s: creando lv de tamaño %sG" + +#: nova/volume/manager.py:106 +#, python-format +msgid "volume %s: creating export" +msgstr "volumen %s: exportando" + +#: nova/volume/manager.py:113 +#, python-format +msgid "volume %s: created successfully" +msgstr "volumen %s: creado satisfactoriamente" + +#: nova/volume/manager.py:121 +msgid "Volume is still attached" +msgstr "El volumen todavía está asociado" + +#: nova/volume/manager.py:123 +msgid "Volume is not local to this node" +msgstr "Volumen no local a este nodo" + +#: nova/volume/manager.py:124 +#, python-format +msgid "volume %s: removing export" +msgstr "volumen %s: eliminando exportación" + +#: nova/volume/manager.py:126 +#, python-format +msgid "volume %s: deleting" +msgstr "volumen %s: eliminando" + +#: nova/volume/manager.py:129 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "volumen %s: eliminado satisfactoriamente" diff --git a/po/it.po b/po/it.po new file mode 100644 index 000000000..3f439f9dd --- /dev/null +++ b/po/it.po @@ -0,0 +1,2141 @@ +# Italian translation for nova +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the nova package. +# FIRST AUTHOR <EMAIL@ADDRESS>, 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: nova\n" +"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n" +"POT-Creation-Date: 2011-01-10 11:25-0800\n" +"PO-Revision-Date: 2011-01-14 17:17+0000\n" +"Last-Translator: Armando Migliaccio <Unknown>\n" +"Language-Team: Italian <it@li.org>\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Launchpad-Export-Date: 2011-02-05 05:36+0000\n" +"X-Generator: Launchpad (build 12177)\n" + +#: nova/crypto.py:46 +msgid "Filename of root CA" +msgstr "Nome del file root CA" + +#: nova/crypto.py:49 +msgid "Filename of private key" +msgstr "Nome del file della chiave privata" + +#: nova/crypto.py:51 +msgid "Filename of root Certificate Revokation List" +msgstr "" + +#: nova/crypto.py:53 +msgid "Where we keep our keys" +msgstr "Dove si conservano le chiavi" + +#: nova/crypto.py:55 +msgid "Where we keep our root CA" +msgstr "Dove si conserva root CA" + +#: nova/crypto.py:57 +msgid "Should we use a CA for each project?" +msgstr "Si dovrebbe usare un CA per ogni progetto?" + +#: nova/crypto.py:61 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" +"Soggetto per il certificato degli utenti, %s per progetto, utente, orario" + +#: nova/crypto.py:66 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "Soggetto per il certificato dei progetti, %s per progetto, orario" + +#: nova/crypto.py:71 +#, python-format +msgid "Subject for certificate for vpns, %s for project, timestamp" +msgstr "Soggetto per il certificato delle vpn, %s per progetto, orario" + +#: nova/crypto.py:258 +#, python-format +msgid "Flags path: %s" +msgstr "Percorso dei flags: %s" + +#: nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "" +"Si e' verificato un errore inatteso durante l'esecuzione del comando." + +#: nova/exception.py:36 +#, python-format +msgid "" +"%s\n" +"Command: %s\n" +"Exit code: %s\n" +"Stdout: %r\n" +"Stderr: %r" +msgstr "" +"%s\n" +"Comando: %s\n" +"Exit code: %s\n" +"Stdout: %r\n" +"Stderr: %r" + +#: nova/exception.py:86 +msgid "Uncaught exception" +msgstr "Eccezione non gestita" + +#: nova/fakerabbit.py:48 +#, python-format +msgid "(%s) publish (key: %s) %s" +msgstr "(%s) pubblica (chiave: %s) %s" + +#: nova/fakerabbit.py:53 +#, python-format +msgid "Publishing to route %s" +msgstr "Pubblicando sulla route %s" + +#: nova/fakerabbit.py:83 +#, python-format +msgid "Declaring queue %s" +msgstr "Dichiarando la coda %s" + +#: nova/fakerabbit.py:89 +#, python-format +msgid "Declaring exchange %s" +msgstr "Dichiarando il centralino %s" + +#: nova/fakerabbit.py:95 +#, python-format +msgid "Binding %s to %s with key %s" +msgstr "Collegando %s a %s con la chiave %s" + +#: nova/fakerabbit.py:120 +#, python-format +msgid "Getting from %s: %s" +msgstr "" + +#: nova/rpc.py:92 +#, python-format +msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +msgstr "" +"Il server AMQP su %s:%d non é raggiungibile. Riprovare in %d secondi." + +#: nova/rpc.py:99 +#, python-format +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "" +"Impossibile connettersi al server AMQP dopo %d tentativi. Terminando " +"l'applicazione." + +#: nova/rpc.py:118 +msgid "Reconnected to queue" +msgstr "Riconnesso alla coda" + +#: nova/rpc.py:125 +msgid "Failed to fetch message from queue" +msgstr "Impossibile prelevare il messaggio dalla coda" + +#: nova/rpc.py:155 +#, python-format +msgid "Initing the Adapter Consumer for %s" +msgstr "Inizializzando il Consumer Adapter per %s" + +#: nova/rpc.py:170 +#, python-format +msgid "received %s" +msgstr "ricevuto %s" + +#: nova/rpc.py:183 +#, python-format +msgid "no method for message: %s" +msgstr "nessun metodo per il messaggio: %s" + +#: nova/rpc.py:184 +#, python-format +msgid "No method for message: %s" +msgstr "nessun metodo per il messagggio: %s" + +#: nova/rpc.py:245 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Sollevando eccezione %s al chiamante" + +#: nova/rpc.py:286 +#, python-format +msgid "unpacked context: %s" +msgstr "contesto decompresso: %s" + +#: nova/rpc.py:305 +msgid "Making asynchronous call..." +msgstr "Facendo chiamata asincrona..." + +#: nova/rpc.py:308 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID é %s" + +#: nova/rpc.py:356 +#, python-format +msgid "response %s" +msgstr "risposta %s" + +#: nova/rpc.py:365 +#, python-format +msgid "topic is %s" +msgstr "argomento é %s" + +#: nova/rpc.py:366 +#, python-format +msgid "message %s" +msgstr "messaggio %s" + +#: nova/service.py:157 +#, python-format +msgid "Starting %s node" +msgstr "Avviando il nodo %s" + +#: nova/service.py:169 +msgid "Service killed that has no database entry" +msgstr "Servizio terminato che non ha entry nel database" + +#: nova/service.py:190 +msgid "The service database object disappeared, Recreating it." +msgstr "Il servizio é scomparso dal database, ricreo." + +#: nova/service.py:202 +msgid "Recovered model server connection!" +msgstr "Connessione al model server ripristinata!" + +#: nova/service.py:208 +msgid "model server went away" +msgstr "model server é scomparso" + +#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#, python-format +msgid "Data store %s is unreachable. Trying again in %d seconds." +msgstr "Datastore %s é irrangiungibile. Riprovare in %d seconds." + +#: nova/service.py:232 nova/twistd.py:232 +#, python-format +msgid "Serving %s" +msgstr "Servire %s" + +#: nova/service.py:234 nova/twistd.py:264 +msgid "Full set of FLAGS:" +msgstr "Insieme di FLAGS:" + +#: nova/twistd.py:211 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "" +"Il pidfile %s non esiste. Assicurarsi che il demone é in esecuzione.\n" + +#: nova/twistd.py:268 +#, python-format +msgid "Starting %s" +msgstr "Avvio di %s" + +#: nova/utils.py:53 +#, python-format +msgid "Inner Exception: %s" +msgstr "Eccezione interna: %s" + +#: nova/utils.py:54 +#, python-format +msgid "Class %s cannot be found" +msgstr "Classe %s non può essere trovata" + +#: nova/utils.py:113 +#, python-format +msgid "Fetching %s" +msgstr "Prelievo %s" + +#: nova/utils.py:125 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Esecuzione del comando (sottoprocesso): %s" + +#: nova/utils.py:138 +#, python-format +msgid "Result was %s" +msgstr "Il risultato é %s" + +#: nova/utils.py:171 +#, python-format +msgid "debug in callback: %s" +msgstr "debug in callback: %s" + +#: nova/utils.py:176 +#, python-format +msgid "Running %s" +msgstr "" + +#: nova/utils.py:207 +#, python-format +msgid "Couldn't get IP, using 127.0.0.1 %s" +msgstr "" + +#: nova/utils.py:289 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: nova/utils.py:300 +#, python-format +msgid "backend %s" +msgstr "" + +#: nova/api/ec2/__init__.py:133 +msgid "Too many failed authentications." +msgstr "" + +#: nova/api/ec2/__init__.py:142 +#, python-format +msgid "" +"Access key %s has had %d failed authentications and will be locked out for " +"%d minutes." +msgstr "" + +#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#, python-format +msgid "Authentication Failure: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:190 +#, python-format +msgid "Authenticated Request For %s:%s)" +msgstr "" + +#: nova/api/ec2/__init__.py:227 +#, python-format +msgid "action: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:229 +#, python-format +msgid "arg: %s\t\tval: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:301 +#, python-format +msgid "Unauthorized request for controller=%s and action=%s" +msgstr "" + +#: nova/api/ec2/__init__.py:339 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:342 +#, python-format +msgid "ApiError raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:349 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:354 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: nova/api/ec2/admin.py:84 +#, python-format +msgid "Creating new user: %s" +msgstr "" + +#: nova/api/ec2/admin.py:92 +#, python-format +msgid "Deleting user: %s" +msgstr "" + +#: nova/api/ec2/admin.py:114 +#, python-format +msgid "Adding role %s to user %s for project %s" +msgstr "" + +#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#, python-format +msgid "Adding sitewide role %s to user %s" +msgstr "" + +#: nova/api/ec2/admin.py:122 +#, python-format +msgid "Removing role %s from user %s for project %s" +msgstr "" + +#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#, python-format +msgid "Removing sitewide role %s from user %s" +msgstr "" + +#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 +msgid "operation must be add or remove" +msgstr "" + +#: nova/api/ec2/admin.py:142 +#, python-format +msgid "Getting x509 for user: %s on project: %s" +msgstr "" + +#: nova/api/ec2/admin.py:159 +#, python-format +msgid "Create project %s managed by %s" +msgstr "" + +#: nova/api/ec2/admin.py:170 +#, python-format +msgid "Delete project: %s" +msgstr "" + +#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#, python-format +msgid "Adding user %s to project %s" +msgstr "" + +#: nova/api/ec2/admin.py:188 +#, python-format +msgid "Removing user %s from project %s" +msgstr "" + +#: nova/api/ec2/apirequest.py:95 +#, python-format +msgid "Unsupported API request: controller = %s,action = %s" +msgstr "" + +#: nova/api/ec2/cloud.py:117 +#, python-format +msgid "Generating root CA: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:277 +#, python-format +msgid "Create key pair %s" +msgstr "" + +#: nova/api/ec2/cloud.py:285 +#, python-format +msgid "Delete key pair %s" +msgstr "" + +#: nova/api/ec2/cloud.py:357 +#, python-format +msgid "%s is not a valid ipProtocol" +msgstr "" + +#: nova/api/ec2/cloud.py:361 +msgid "Invalid port range" +msgstr "" + +#: nova/api/ec2/cloud.py:392 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 +msgid "No rule for the specified parameters." +msgstr "" + +#: nova/api/ec2/cloud.py:421 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: nova/api/ec2/cloud.py:432 +#, python-format +msgid "This rule already exists in group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:460 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:463 +#, python-format +msgid "group %s already exists" +msgstr "" + +#: nova/api/ec2/cloud.py:475 +#, python-format +msgid "Delete security group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 +#, python-format +msgid "Get console output for instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:543 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: nova/api/ec2/cloud.py:567 +#, python-format +msgid "Attach volume %s to instacne %s at %s" +msgstr "" + +#: nova/api/ec2/cloud.py:579 +#, python-format +msgid "Detach volume %s" +msgstr "" + +#: nova/api/ec2/cloud.py:686 +msgid "Allocate address" +msgstr "" + +#: nova/api/ec2/cloud.py:691 +#, python-format +msgid "Release address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:696 +#, python-format +msgid "Associate address %s to instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:703 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:730 +msgid "Going to start terminating instances" +msgstr "" + +#: nova/api/ec2/cloud.py:738 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: nova/api/ec2/cloud.py:775 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: nova/api/ec2/cloud.py:783 +#, python-format +msgid "Registered image %s with id %s" +msgstr "" + +#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#, python-format +msgid "attribute not supported: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:794 +#, python-format +msgid "invalid id: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:807 +msgid "user or group not specified" +msgstr "" + +#: nova/api/ec2/cloud.py:809 +msgid "only group \"all\" is supported" +msgstr "" + +#: nova/api/ec2/cloud.py:811 +msgid "operation_type must be add or remove" +msgstr "" + +#: nova/api/ec2/cloud.py:812 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: nova/api/ec2/metadatarequesthandler.py:75 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:70 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:86 +msgid "Including admin operations in API." +msgstr "" + +#: nova/api/openstack/servers.py:184 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:199 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: nova/api/openstack/servers.py:213 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:224 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: nova/api/openstack/servers.py:235 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: nova/api/openstack/servers.py:246 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: nova/api/openstack/servers.py:257 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" +msgstr "" + +#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "" + +#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#, python-format +msgid "Project can't be created because project %s already exists" +msgstr "" + +#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#, python-format +msgid "Project can't be modified because manager %s doesn't exist" +msgstr "" + +#: nova/auth/dbdriver.py:245 +#, python-format +msgid "User \"%s\" not found" +msgstr "" + +#: nova/auth/dbdriver.py:248 +#, python-format +msgid "Project \"%s\" not found" +msgstr "" + +#: nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: nova/auth/ldapdriver.py:181 +#, python-format +msgid "LDAP object for %s doesn't exist" +msgstr "" + +#: nova/auth/ldapdriver.py:218 +#, python-format +msgid "Project can't be created because user %s doesn't exist" +msgstr "" + +#: nova/auth/ldapdriver.py:478 +#, python-format +msgid "User %s is already a member of the group %s" +msgstr "" + +#: nova/auth/ldapdriver.py:507 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: nova/auth/ldapdriver.py:528 +#, python-format +msgid "Group at dn %s doesn't exist" +msgstr "" + +#: nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" +msgstr "" + +#: nova/auth/manager.py:270 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: nova/auth/manager.py:275 +#, python-format +msgid "failed authorization: no project named %s (user=%s)" +msgstr "" + +#: nova/auth/manager.py:277 +#, python-format +msgid "No project called %s could be found" +msgstr "" + +#: nova/auth/manager.py:281 +#, python-format +msgid "Failed authorization: user %s not admin and not member of project %s" +msgstr "" + +#: nova/auth/manager.py:283 +#, python-format +msgid "User %s is not a member of project %s" +msgstr "" + +#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: nova/auth/manager.py:293 nova/auth/manager.py:304 +msgid "Signature does not match" +msgstr "" + +#: nova/auth/manager.py:374 +msgid "Must specify project" +msgstr "" + +#: nova/auth/manager.py:408 +#, python-format +msgid "The %s role can not be found" +msgstr "" + +#: nova/auth/manager.py:410 +#, python-format +msgid "The %s role is global only" +msgstr "" + +#: nova/auth/manager.py:412 +#, python-format +msgid "Adding role %s to user %s in project %s" +msgstr "" + +#: nova/auth/manager.py:438 +#, python-format +msgid "Removing role %s from user %s on project %s" +msgstr "" + +#: nova/auth/manager.py:505 +#, python-format +msgid "Created project %s with manager %s" +msgstr "" + +#: nova/auth/manager.py:523 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: nova/auth/manager.py:553 +#, python-format +msgid "Remove user %s from project %s" +msgstr "" + +#: nova/auth/manager.py:581 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: nova/auth/manager.py:637 +#, python-format +msgid "Created user %s (admin: %r)" +msgstr "" + +#: nova/auth/manager.py:645 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: nova/auth/manager.py:655 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:657 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:659 +#, python-format +msgid "Admin status set to %r for user %s" +msgstr "" + +#: nova/auth/manager.py:708 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "" + +#: nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:97 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: nova/compute/api.py:67 +#, python-format +msgid "Instance %d was not found in get_network_topic" +msgstr "" + +#: nova/compute/api.py:73 +#, python-format +msgid "Instance %d has no host" +msgstr "" + +#: nova/compute/api.py:92 +#, python-format +msgid "Quota exceeeded for %s, tried to run %s instances" +msgstr "" + +#: nova/compute/api.py:94 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." +msgstr "" + +#: nova/compute/api.py:109 +msgid "Creating a raw instance" +msgstr "" + +#: nova/compute/api.py:156 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: nova/compute/api.py:180 +#, python-format +msgid "Casting to scheduler for %s/%s's instance %s" +msgstr "" + +#: nova/compute/api.py:279 +#, python-format +msgid "Going to try and terminate %s" +msgstr "" + +#: nova/compute/api.py:283 +#, python-format +msgid "Instance %d was not found during terminate" +msgstr "" + +#: nova/compute/api.py:288 +#, python-format +msgid "Instance %d is already being terminated" +msgstr "" + +#: nova/compute/api.py:450 +#, python-format +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "" + +#: nova/compute/api.py:465 +msgid "Volume isn't attached to anything!" +msgstr "" + +#: nova/compute/disk.py:71 +#, python-format +msgid "Input partition size not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:75 +#, python-format +msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:128 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: nova/compute/disk.py:136 +#, python-format +msgid "Failed to load partition: %s" +msgstr "" + +#: nova/compute/disk.py:158 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "" + +#: nova/compute/manager.py:69 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: nova/compute/manager.py:71 +#, python-format +msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +msgstr "" + +#: nova/compute/manager.py:75 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: nova/compute/manager.py:77 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: nova/compute/manager.py:82 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: nova/compute/manager.py:86 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: nova/compute/manager.py:157 +msgid "Instance has already been created" +msgstr "" + +#: nova/compute/manager.py:158 +#, python-format +msgid "instance %s: starting..." +msgstr "" + +#: nova/compute/manager.py:197 +#, python-format +msgid "instance %s: Failed to spawn" +msgstr "" + +#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 +#, python-format +msgid "Terminating instance %s" +msgstr "" + +#: nova/compute/manager.py:217 +#, python-format +msgid "Disassociating address %s" +msgstr "" + +#: nova/compute/manager.py:230 +#, python-format +msgid "Deallocating address %s" +msgstr "" + +#: nova/compute/manager.py:243 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "" + +#: nova/compute/manager.py:257 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: nova/compute/manager.py:260 +#, python-format +msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:286 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: nova/compute/manager.py:289 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:301 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: nova/compute/manager.py:316 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: nova/compute/manager.py:335 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: nova/compute/manager.py:352 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: nova/compute/manager.py:369 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: nova/compute/manager.py:382 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: nova/compute/manager.py:401 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: nova/compute/manager.py:420 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: nova/compute/manager.py:432 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: nova/compute/manager.py:442 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: nova/compute/manager.py:462 +#, python-format +msgid "instance %s: attaching volume %s to %s" +msgstr "" + +#: nova/compute/manager.py:478 +#, python-format +msgid "instance %s: attach failed %s, removing" +msgstr "" + +#: nova/compute/manager.py:493 +#, python-format +msgid "Detach volume %s from mountpoint %s on instance %s" +msgstr "" + +#: nova/compute/manager.py:497 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: nova/compute/monitor.py:259 +#, python-format +msgid "updating %s..." +msgstr "" + +#: nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "" + +#: nova/compute/monitor.py:355 +#, python-format +msgid "Cannot get blockstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:377 +#, python-format +msgid "Cannot get ifstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:412 +msgid "unexpected exception getting connection" +msgstr "" + +#: nova/compute/monitor.py:427 +#, python-format +msgid "Found instance: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:43 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: nova/db/sqlalchemy/api.py:132 +#, python-format +msgid "No service for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:229 +#, python-format +msgid "No service for %s, %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:574 +#, python-format +msgid "No floating ip for address %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:668 +#, python-format +msgid "No instance for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 +#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#, python-format +msgid "Instance %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:891 +#, python-format +msgid "no keypair for user %s, name %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 +#, python-format +msgid "No network for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1036 +#, python-format +msgid "No network for bridge %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1050 +#, python-format +msgid "No network for instance %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1180 +#, python-format +msgid "Token %s does not exist" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1205 +#, python-format +msgid "No quota for project_id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1356 +#, python-format +msgid "No volume for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1401 +#, python-format +msgid "Volume %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1413 +#, python-format +msgid "No export device found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1426 +#, python-format +msgid "No target id found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1471 +#, python-format +msgid "No security group with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1488 +#, python-format +msgid "No security group named %s for project: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1576 +#, python-format +msgid "No secuity group rule with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1650 +#, python-format +msgid "No user for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1666 +#, python-format +msgid "No user for access key %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1728 +#, python-format +msgid "No project with id %s" +msgstr "" + +#: nova/image/glance.py:78 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images" +msgstr "" + +#: nova/image/glance.py:97 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images/detail" +msgstr "" + +#: nova/image/s3.py:82 +#, python-format +msgid "Image %s could not be found" +msgstr "" + +#: nova/network/api.py:39 +#, python-format +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "" + +#: nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" + +#: nova/network/linux_net.py:176 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: nova/network/linux_net.py:186 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#: nova/network/linux_net.py:254 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: nova/network/linux_net.py:256 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: nova/network/linux_net.py:334 +#, python-format +msgid "Killing dnsmasq threw %s" +msgstr "" + +#: nova/network/manager.py:135 +msgid "setting network host" +msgstr "" + +#: nova/network/manager.py:190 +#, python-format +msgid "Leasing IP %s" +msgstr "" + +#: nova/network/manager.py:194 +#, python-format +msgid "IP %s leased that isn't associated" +msgstr "" + +#: nova/network/manager.py:197 +#, python-format +msgid "IP %s leased to bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:205 +#, python-format +msgid "IP %s leased that was already deallocated" +msgstr "" + +#: nova/network/manager.py:214 +#, python-format +msgid "IP %s released that isn't associated" +msgstr "" + +#: nova/network/manager.py:217 +#, python-format +msgid "IP %s released from bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:220 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: nova/network/manager.py:442 +#, python-format +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "" + +#: nova/objectstore/handler.py:106 +#, python-format +msgid "Unknown S3 value type %r" +msgstr "" + +#: nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "" + +#: nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "" + +#: nova/objectstore/handler.py:209 +#, python-format +msgid "List keys for bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:217 +#, python-format +msgid "Unauthorized attempt to access bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:235 +#, python-format +msgid "Creating bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:245 +#, python-format +msgid "Deleting bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:249 +#, python-format +msgid "Unauthorized attempt to delete bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:271 +#, python-format +msgid "Getting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:274 +#, python-format +msgid "Unauthorized attempt to get object %s from bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:292 +#, python-format +msgid "Putting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:295 +#, python-format +msgid "Unauthorized attempt to upload object %s to bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:314 +#, python-format +msgid "Deleting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:393 +#, python-format +msgid "Not authorized to upload image: invalid directory %s" +msgstr "" + +#: nova/objectstore/handler.py:401 +#, python-format +msgid "Not authorized to upload image: unauthorized bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:406 +#, python-format +msgid "Starting image upload: %s" +msgstr "" + +#: nova/objectstore/handler.py:420 +#, python-format +msgid "Not authorized to update attributes of image %s" +msgstr "" + +#: nova/objectstore/handler.py:428 +#, python-format +msgid "Toggling publicity flag of image %s %r" +msgstr "" + +#: nova/objectstore/handler.py:433 +#, python-format +msgid "Updating user fields on image %s" +msgstr "" + +#: nova/objectstore/handler.py:447 +#, python-format +msgid "Unauthorized attempt to delete image %s" +msgstr "" + +#: nova/objectstore/handler.py:452 +#, python-format +msgid "Deleted image: %s" +msgstr "" + +#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 +#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 +msgid "No hosts found" +msgstr "" + +#: nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "" + +#: nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %s %s for %s" +msgstr "" + +#: nova/scheduler/simple.py:63 +msgid "All hosts have too many cores" +msgstr "" + +#: nova/scheduler/simple.py:95 +msgid "All hosts have too many gigabytes" +msgstr "" + +#: nova/scheduler/simple.py:115 +msgid "All hosts have too many networks" +msgstr "" + +#: nova/tests/test_cloud.py:198 +msgid "Can't test instances without a real virtual env." +msgstr "" + +#: nova/tests/test_cloud.py:210 +#, python-format +msgid "Need to watch instance %s until it's running..." +msgstr "" + +#: nova/tests/test_compute.py:104 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: nova/tests/test_compute.py:110 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: nova/tests/test_rpc.py:89 +#, python-format +msgid "Nested received %s, %s" +msgstr "" + +#: nova/tests/test_rpc.py:94 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#, python-format +msgid "Received %s" +msgstr "" + +#: nova/tests/test_volume.py:162 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: nova/virt/fake.py:210 +#, python-format +msgid "Instance %s Not Found" +msgstr "" + +#: nova/virt/hyperv.py:118 +msgid "In init host" +msgstr "" + +#: nova/virt/hyperv.py:131 +#, python-format +msgid "Attempt to create duplicate vm %s" +msgstr "" + +#: nova/virt/hyperv.py:148 +#, python-format +msgid "Starting VM %s " +msgstr "" + +#: nova/virt/hyperv.py:150 +#, python-format +msgid "Started VM %s " +msgstr "" + +#: nova/virt/hyperv.py:152 +#, python-format +msgid "spawn vm failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:169 +#, python-format +msgid "Failed to create VM %s" +msgstr "" + +#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 +#, python-format +msgid "Created VM %s..." +msgstr "" + +#: nova/virt/hyperv.py:188 +#, python-format +msgid "Set memory for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:198 +#, python-format +msgid "Set vcpus for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:202 +#, python-format +msgid "Creating disk for %s by attaching disk file %s" +msgstr "" + +#: nova/virt/hyperv.py:227 +#, python-format +msgid "Failed to add diskdrive to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:230 +#, python-format +msgid "New disk drive path is %s" +msgstr "" + +#: nova/virt/hyperv.py:247 +#, python-format +msgid "Failed to add vhd file to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:249 +#, python-format +msgid "Created disk for %s" +msgstr "" + +#: nova/virt/hyperv.py:253 +#, python-format +msgid "Creating nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:272 +msgid "Failed creating a port on the external vswitch" +msgstr "" + +#: nova/virt/hyperv.py:273 +#, python-format +msgid "Failed creating port for %s" +msgstr "" + +#: nova/virt/hyperv.py:275 +#, python-format +msgid "Created switch port %s on switch %s" +msgstr "" + +#: nova/virt/hyperv.py:285 +#, python-format +msgid "Failed to add nic to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:287 +#, python-format +msgid "Created nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:320 +#, python-format +msgid "WMI job failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:322 +#, python-format +msgid "WMI job succeeded: %s, Elapsed=%s " +msgstr "" + +#: nova/virt/hyperv.py:358 +#, python-format +msgid "Got request to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:383 +#, python-format +msgid "Failed to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:389 +#, python-format +msgid "Del: disk %s vm %s" +msgstr "" + +#: nova/virt/hyperv.py:405 +#, python-format +msgid "" +"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " +"cpu_time=%s" +msgstr "" + +#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#, python-format +msgid "duplicate name found: %s" +msgstr "" + +#: nova/virt/hyperv.py:444 +#, python-format +msgid "Successfully changed vm state of %s to %s" +msgstr "" + +#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#, python-format +msgid "Failed to change vm state of %s to %s" +msgstr "" + +#: nova/virt/images.py:70 +#, python-format +msgid "Finished retreving %s -- placed in %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:144 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:157 +msgid "Connection to libvirt broke" +msgstr "" + +#: nova/virt/libvirt_conn.py:229 +#, python-format +msgid "instance %s: deleting instance files %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:271 +#, python-format +msgid "No disk at %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:278 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "" + +#: nova/virt/libvirt_conn.py:294 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: nova/virt/libvirt_conn.py:297 +#, python-format +msgid "_wait_for_reboot failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:340 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: nova/virt/libvirt_conn.py:343 +#, python-format +msgid "_wait_for_rescue failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:370 +#, python-format +msgid "instance %s: is running" +msgstr "" + +#: nova/virt/libvirt_conn.py:381 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 +#, python-format +msgid "instance %s: failed to boot" +msgstr "" + +#: nova/virt/libvirt_conn.py:395 +#, python-format +msgid "virsh said: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:399 +msgid "cool, it's a device" +msgstr "" + +#: nova/virt/libvirt_conn.py:407 +#, python-format +msgid "data: %r, fpath: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:415 +#, python-format +msgid "Contents of file %s: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:449 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: nova/virt/libvirt_conn.py:505 +#, python-format +msgid "instance %s: injecting key into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:508 +#, python-format +msgid "instance %s: injecting net into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:516 +#, python-format +msgid "instance %s: ignoring error injecting data into image %s (%s)" +msgstr "" + +#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: nova/virt/libvirt_conn.py:589 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: nova/virt/xenapi_conn.py:113 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" +msgstr "" + +#: nova/virt/xenapi_conn.py:263 +#, python-format +msgid "Task [%s] %s status: success %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:271 +#, python-format +msgid "Task [%s] %s status: %s %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:72 +#, python-format +msgid "%s: _db_content => %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 +#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 +msgid "Raising NotImplemented" +msgstr "" + +#: nova/virt/xenapi/fake.py:249 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:283 +#, python-format +msgid "Calling %s %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:288 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:340 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:127 +#, python-format +msgid "Created VM %s as %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:147 +#, python-format +msgid "Creating VBD for VM %s, VDI %s ... " +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:149 +#, python-format +msgid "Created VBD %s for VM %s, VDI %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:165 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:175 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:202 +#, python-format +msgid "Creating VIF for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:205 +#, python-format +msgid "Created VIF %s for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:216 +#, python-format +msgid "Snapshotting VM %s with label '%s'..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:229 +#, python-format +msgid "Created snapshot %s from VM %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:243 +#, python-format +msgid "Asking xapi to upload %s as '%s'" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:261 +#, python-format +msgid "Asking xapi to fetch %s as %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:279 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:290 +#, python-format +msgid "PV Kernel in VDI:%d" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:318 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:331 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:333 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:390 +#, python-format +msgid "VHD %s has parent %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:407 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:448 +#, python-format +msgid "No VDIs found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:452 +#, python-format +msgid "Unexpected number of VDIs (%s) found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:62 +#, python-format +msgid "Attempted to create non-unique name %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:99 +#, python-format +msgid "Starting VM %s..." +msgstr "" + +#: nova/virt/xenapi/vmops.py:101 +#, python-format +msgid "Spawning VM %s created %s." +msgstr "" + +#: nova/virt/xenapi/vmops.py:112 +#, python-format +msgid "Instance %s: booted" +msgstr "" + +#: nova/virt/xenapi/vmops.py:137 +#, python-format +msgid "Instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:166 +#, python-format +msgid "Starting snapshot for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:174 +#, python-format +msgid "Unable to Snapshot %s: %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:184 +#, python-format +msgid "Finished snapshot and upload for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:252 +#, python-format +msgid "suspend: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:262 +#, python-format +msgid "resume: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:271 +#, python-format +msgid "Instance not found %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:57 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:74 +#, python-format +msgid "Introduced %s as %s." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:90 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:96 +#, python-format +msgid "Forgetting SR %s ... " +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:101 +#, python-format +msgid "Ignoring exception %s when getting PBDs for %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:107 +#, python-format +msgid "Ignoring exception %s when unplugging PBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:111 +#, python-format +msgid "Forgetting SR %s done." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:113 +#, python-format +msgid "Ignoring exception %s when forgetting SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:123 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:128 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:146 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:175 +#, python-format +msgid "Unable to obtain target information %s, %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:197 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %s, %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Unable to use SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:95 +#, python-format +msgid "Mountpoint %s attached to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:106 +#, python-format +msgid "Detach_volume: %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:113 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:128 +#, python-format +msgid "Mountpoint %s detached from instance %s" +msgstr "" + +#: nova/volume/api.py:44 +#, python-format +msgid "Quota exceeeded for %s, tried to create %sG volume" +msgstr "" + +#: nova/volume/api.py:46 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %s" +msgstr "" + +#: nova/volume/api.py:70 nova/volume/api.py:95 +msgid "Volume status must be available" +msgstr "" + +#: nova/volume/api.py:97 +msgid "Volume is already attached" +msgstr "" + +#: nova/volume/api.py:103 +msgid "Volume is already detached" +msgstr "" + +#: nova/volume/driver.py:76 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: nova/volume/driver.py:85 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: nova/volume/driver.py:210 +#, python-format +msgid "FAKE AOE: %s" +msgstr "" + +#: nova/volume/driver.py:315 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: nova/volume/manager.py:85 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: nova/volume/manager.py:93 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: nova/volume/manager.py:102 +#, python-format +msgid "volume %s: creating lv of size %sG" +msgstr "" + +#: nova/volume/manager.py:106 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: nova/volume/manager.py:113 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: nova/volume/manager.py:121 +msgid "Volume is still attached" +msgstr "" + +#: nova/volume/manager.py:123 +msgid "Volume is not local to this node" +msgstr "" + +#: nova/volume/manager.py:124 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: nova/volume/manager.py:126 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: nova/volume/manager.py:129 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" diff --git a/po/ja.po b/po/ja.po new file mode 100644 index 000000000..2cea24640 --- /dev/null +++ b/po/ja.po @@ -0,0 +1,2143 @@ +# Japanese translation for nova +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the nova package. +# FIRST AUTHOR <EMAIL@ADDRESS>, 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: nova\n" +"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n" +"POT-Creation-Date: 2011-01-10 11:25-0800\n" +"PO-Revision-Date: 2011-01-14 09:04+0000\n" +"Last-Translator: Koji Iida <Unknown>\n" +"Language-Team: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Launchpad-Export-Date: 2011-02-05 05:36+0000\n" +"X-Generator: Launchpad (build 12177)\n" + +#: nova/crypto.py:46 +msgid "Filename of root CA" +msgstr "ルートCAのファイル名" + +#: nova/crypto.py:49 +msgid "Filename of private key" +msgstr "プライベートキーのファイル名" + +#: nova/crypto.py:51 +msgid "Filename of root Certificate Revokation List" +msgstr "ルート証明書失効リストのファイル名" + +#: nova/crypto.py:53 +msgid "Where we keep our keys" +msgstr "キーを格納するパス" + +#: nova/crypto.py:55 +msgid "Where we keep our root CA" +msgstr "ルートCAを格納するパス" + +#: nova/crypto.py:57 +msgid "Should we use a CA for each project?" +msgstr "プロジェクトごとにCAを使用するか否かのフラグ" + +#: nova/crypto.py:61 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "ユーザの証明書のサブジェクト、%s はプロジェクト、ユーザ、タイムスタンプ" + +#: nova/crypto.py:66 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "プロジェクトの証明書のサブジェクト、%s はプロジェクト、およびタイムスタンプ" + +#: nova/crypto.py:71 +#, python-format +msgid "Subject for certificate for vpns, %s for project, timestamp" +msgstr "vpnの証明書のサブジェクト、%sはプロジェクト、およびタイムスタンプ" + +#: nova/crypto.py:258 +#, python-format +msgid "Flags path: %s" +msgstr "Flags のパス: %s" + +#: nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "コマンド実行において予期しないエラーが発生しました。" + +#: nova/exception.py:36 +#, python-format +msgid "" +"%s\n" +"Command: %s\n" +"Exit code: %s\n" +"Stdout: %r\n" +"Stderr: %r" +msgstr "" +"%s\n" +"コマンド: %s\n" +"終了コード: %s\n" +"標準出力: %r\n" +"標準エラー出力: %r" + +#: nova/exception.py:86 +msgid "Uncaught exception" +msgstr "キャッチされなかった例外" + +#: nova/fakerabbit.py:48 +#, python-format +msgid "(%s) publish (key: %s) %s" +msgstr "(%s) パブリッシュ (key: %s) %s" + +#: nova/fakerabbit.py:53 +#, python-format +msgid "Publishing to route %s" +msgstr "ルート %s へパブリッシュ" + +#: nova/fakerabbit.py:83 +#, python-format +msgid "Declaring queue %s" +msgstr "queue %s の宣言" + +#: nova/fakerabbit.py:89 +#, python-format +msgid "Declaring exchange %s" +msgstr "exchange %s の宣言" + +#: nova/fakerabbit.py:95 +#, python-format +msgid "Binding %s to %s with key %s" +msgstr "%s を %s にキー %s でバインドします。" + +#: nova/fakerabbit.py:120 +#, python-format +msgid "Getting from %s: %s" +msgstr "%s から %s を取得" + +#: nova/rpc.py:92 +#, python-format +msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +msgstr "AMQPサーバ %s:%d に接続できません。 %d 秒後に再度試みます。" + +#: nova/rpc.py:99 +#, python-format +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "AMQPサーバーに %d 回接続を試みましたが、接続できませんでした。シャットダウンします。" + +#: nova/rpc.py:118 +msgid "Reconnected to queue" +msgstr "キューに再接続しました。" + +#: nova/rpc.py:125 +msgid "Failed to fetch message from queue" +msgstr "キューからメッセージの取得に失敗しました。" + +#: nova/rpc.py:155 +#, python-format +msgid "Initing the Adapter Consumer for %s" +msgstr "%sのアダプターコンシューマー(Adapter Consumer)を初期化しています。" + +#: nova/rpc.py:170 +#, python-format +msgid "received %s" +msgstr "受信: %s" + +#: nova/rpc.py:183 +#, python-format +msgid "no method for message: %s" +msgstr "メッセージ %s に対するメソッドが存在しません。" + +#: nova/rpc.py:184 +#, python-format +msgid "No method for message: %s" +msgstr "メッセージ %s に対するメソッドが存在しません。" + +#: nova/rpc.py:245 +#, python-format +msgid "Returning exception %s to caller" +msgstr "呼び出し元に 例外 %s を返却します。" + +#: nova/rpc.py:286 +#, python-format +msgid "unpacked context: %s" +msgstr "context %s をアンパックしました。" + +#: nova/rpc.py:305 +msgid "Making asynchronous call..." +msgstr "非同期呼び出しを実行します…" + +#: nova/rpc.py:308 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_IDは %s です。" + +#: nova/rpc.py:356 +#, python-format +msgid "response %s" +msgstr "応答 %s" + +#: nova/rpc.py:365 +#, python-format +msgid "topic is %s" +msgstr "topic は %s です。" + +#: nova/rpc.py:366 +#, python-format +msgid "message %s" +msgstr "メッセージ %s" + +#: nova/service.py:157 +#, python-format +msgid "Starting %s node" +msgstr "ノード %s を開始します。" + +#: nova/service.py:169 +msgid "Service killed that has no database entry" +msgstr "データベースにエントリの存在しないサービスを終了します。" + +#: nova/service.py:190 +msgid "The service database object disappeared, Recreating it." +msgstr "サービスデータベースオブジェクトが消滅しました。再作成します。" + +#: nova/service.py:202 +msgid "Recovered model server connection!" +msgstr "モデルサーバへの接続を復旧しました。" + +#: nova/service.py:208 +msgid "model server went away" +msgstr "モデルサーバが消滅しました。" + +#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#, python-format +msgid "Data store %s is unreachable. Trying again in %d seconds." +msgstr "データストア %s に接続できません。 %d 秒後に再接続します。" + +#: nova/service.py:232 nova/twistd.py:232 +#, python-format +msgid "Serving %s" +msgstr "%s サービスの開始" + +#: nova/service.py:234 nova/twistd.py:264 +msgid "Full set of FLAGS:" +msgstr "FLAGSの一覧:" + +#: nova/twistd.py:211 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "pidfile %s が存在しません。デーモンは実行中ですか?\n" + +#: nova/twistd.py:268 +#, python-format +msgid "Starting %s" +msgstr "%s を開始します。" + +#: nova/utils.py:53 +#, python-format +msgid "Inner Exception: %s" +msgstr "内側で発生した例外: %s" + +#: nova/utils.py:54 +#, python-format +msgid "Class %s cannot be found" +msgstr "クラス %s が見つかりません。" + +#: nova/utils.py:113 +#, python-format +msgid "Fetching %s" +msgstr "ファイルをフェッチ: %s" + +#: nova/utils.py:125 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "コマンド実行(subprocess): %s" + +#: nova/utils.py:138 +#, python-format +msgid "Result was %s" +msgstr "コマンド実行結果: %s" + +#: nova/utils.py:171 +#, python-format +msgid "debug in callback: %s" +msgstr "コールバック中のデバッグ: %s" + +#: nova/utils.py:176 +#, python-format +msgid "Running %s" +msgstr "コマンド実行: %s" + +#: nova/utils.py:207 +#, python-format +msgid "Couldn't get IP, using 127.0.0.1 %s" +msgstr "IPを取得できません。127.0.0.1 を %s として使います。" + +#: nova/utils.py:289 +#, python-format +msgid "Invalid backend: %s" +msgstr "不正なバックエンドです: %s" + +#: nova/utils.py:300 +#, python-format +msgid "backend %s" +msgstr "バックエンドは %s です。" + +#: nova/api/ec2/__init__.py:133 +msgid "Too many failed authentications." +msgstr "認証失敗の回数が多すぎます。" + +#: nova/api/ec2/__init__.py:142 +#, python-format +msgid "" +"Access key %s has had %d failed authentications and will be locked out for " +"%d minutes." +msgstr "アクセスキー %s は %d 回認証に失敗したため、%d 分間ロックされます。" + +#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#, python-format +msgid "Authentication Failure: %s" +msgstr "%s の認証に失敗しました。" + +#: nova/api/ec2/__init__.py:190 +#, python-format +msgid "Authenticated Request For %s:%s)" +msgstr "リクエストを認証しました: %s:%s" + +#: nova/api/ec2/__init__.py:227 +#, python-format +msgid "action: %s" +msgstr "アクション(action): %s" + +#: nova/api/ec2/__init__.py:229 +#, python-format +msgid "arg: %s\t\tval: %s" +msgstr "引数(arg): %s\t値(val): %s" + +#: nova/api/ec2/__init__.py:301 +#, python-format +msgid "Unauthorized request for controller=%s and action=%s" +msgstr "許可されていないリクエスト: controller=%s, action %sです。" + +#: nova/api/ec2/__init__.py:339 +#, python-format +msgid "NotFound raised: %s" +msgstr "NotFound 発生: %s" + +#: nova/api/ec2/__init__.py:342 +#, python-format +msgid "ApiError raised: %s" +msgstr "APIエラー発生: %s" + +#: nova/api/ec2/__init__.py:349 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "予期しないエラー発生: %s" + +#: nova/api/ec2/__init__.py:354 +msgid "An unknown error has occurred. Please try your request again." +msgstr "未知のエラーが発生しました。再度リクエストを実行してください。" + +#: nova/api/ec2/admin.py:84 +#, python-format +msgid "Creating new user: %s" +msgstr "Creating new user: 新しいユーザ %s を作成します。" + +#: nova/api/ec2/admin.py:92 +#, python-format +msgid "Deleting user: %s" +msgstr "Deleting user: ユーザ %s を削除します。" + +#: nova/api/ec2/admin.py:114 +#, python-format +msgid "Adding role %s to user %s for project %s" +msgstr "Adding role: ロール %s をユーザ %s、プロジェクト %s に追加します。" + +#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#, python-format +msgid "Adding sitewide role %s to user %s" +msgstr "Adding sitewide role: サイトワイドのロール %s をユーザ %s に追加します。" + +#: nova/api/ec2/admin.py:122 +#, python-format +msgid "Removing role %s from user %s for project %s" +msgstr "Removing role: ロール %s をユーザ %s プロジェクト %s から削除します。" + +#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#, python-format +msgid "Removing sitewide role %s from user %s" +msgstr "Removing sitewide role: サイトワイドのロール %s をユーザ %s から削除します。" + +#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 +msgid "operation must be add or remove" +msgstr "operation は add または remove の何れかである必要があります。" + +#: nova/api/ec2/admin.py:142 +#, python-format +msgid "Getting x509 for user: %s on project: %s" +msgstr "Getting X509: x509の取得: ユーザ %s, プロジェクト %s" + +#: nova/api/ec2/admin.py:159 +#, python-format +msgid "Create project %s managed by %s" +msgstr "Create project: プロジェクト %s (%s により管理される)を作成します。" + +#: nova/api/ec2/admin.py:170 +#, python-format +msgid "Delete project: %s" +msgstr "Delete project: プロジェクト %s を削除しました。" + +#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#, python-format +msgid "Adding user %s to project %s" +msgstr "Adding user: ユーザ %s をプロジェクト %s に追加します。" + +#: nova/api/ec2/admin.py:188 +#, python-format +msgid "Removing user %s from project %s" +msgstr "Removing user: ユーザ %s をプロジェクト %s から削除します。" + +#: nova/api/ec2/apirequest.py:95 +#, python-format +msgid "Unsupported API request: controller = %s,action = %s" +msgstr "サポートされていないAPIリクエストです。 controller = %s,action = %s" + +#: nova/api/ec2/cloud.py:117 +#, python-format +msgid "Generating root CA: %s" +msgstr "ルートCA %s を生成しています。" + +#: nova/api/ec2/cloud.py:277 +#, python-format +msgid "Create key pair %s" +msgstr "Create key pair: キーペア %s を作成します。" + +#: nova/api/ec2/cloud.py:285 +#, python-format +msgid "Delete key pair %s" +msgstr "Delete key pair: キーペア %s を削除します。" + +#: nova/api/ec2/cloud.py:357 +#, python-format +msgid "%s is not a valid ipProtocol" +msgstr "%s は適切なipProtocolではありません。" + +#: nova/api/ec2/cloud.py:361 +msgid "Invalid port range" +msgstr "ポートの範囲が不正です。" + +#: nova/api/ec2/cloud.py:392 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "Revoke security group ingress: セキュリティグループ許可 %s の取消" + +#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 +msgid "No rule for the specified parameters." +msgstr "指定されたパラメータに該当するルールがありません。" + +#: nova/api/ec2/cloud.py:421 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "Authorize security group ingress: セキュリティグループ許可 %s" + +#: nova/api/ec2/cloud.py:432 +#, python-format +msgid "This rule already exists in group %s" +msgstr "指定されたルールは既にグループ %s に存在しています。" + +#: nova/api/ec2/cloud.py:460 +#, python-format +msgid "Create Security Group %s" +msgstr "Create Security Group: セキュリティグループ %s を作成します。" + +#: nova/api/ec2/cloud.py:463 +#, python-format +msgid "group %s already exists" +msgstr "グループ %s は既に存在しています。" + +#: nova/api/ec2/cloud.py:475 +#, python-format +msgid "Delete security group %s" +msgstr "Delete security group: セキュリティグループ %s を削除します。" + +#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 +#, python-format +msgid "Get console output for instance %s" +msgstr "Get console output: インスタンス %s のコンソール出力を取得します。" + +#: nova/api/ec2/cloud.py:543 +#, python-format +msgid "Create volume of %s GB" +msgstr "Create volume: %s GBのボリュームを作成します。" + +#: nova/api/ec2/cloud.py:567 +#, python-format +msgid "Attach volume %s to instacne %s at %s" +msgstr "Attach volume: ボリューム%s をインスタンス %s にデバイス %s でアタッチします。" + +#: nova/api/ec2/cloud.py:579 +#, python-format +msgid "Detach volume %s" +msgstr "Detach volume: ボリューム %s をデタッチします" + +#: nova/api/ec2/cloud.py:686 +msgid "Allocate address" +msgstr "Allocate address: アドレスを割り当てます。" + +#: nova/api/ec2/cloud.py:691 +#, python-format +msgid "Release address %s" +msgstr "Release address: アドレス %s を開放します。" + +#: nova/api/ec2/cloud.py:696 +#, python-format +msgid "Associate address %s to instance %s" +msgstr "Associate address: アドレス %s をインスタンス %s に関連付けます。" + +#: nova/api/ec2/cloud.py:703 +#, python-format +msgid "Disassociate address %s" +msgstr "Disassociate address: アドレス %s の関連付けを解除します。" + +#: nova/api/ec2/cloud.py:730 +msgid "Going to start terminating instances" +msgstr "インスタンス終了処理を開始します。" + +#: nova/api/ec2/cloud.py:738 +#, python-format +msgid "Reboot instance %r" +msgstr "Reboot instance: インスタンス %r を再起動します。" + +#: nova/api/ec2/cloud.py:775 +#, python-format +msgid "De-registering image %s" +msgstr "De-registering image: イメージ %s を登録解除します。" + +#: nova/api/ec2/cloud.py:783 +#, python-format +msgid "Registered image %s with id %s" +msgstr "Registered image: イメージ %s をid %s で登録します。" + +#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#, python-format +msgid "attribute not supported: %s" +msgstr "アトリビュート %s はサポートされていません。" + +#: nova/api/ec2/cloud.py:794 +#, python-format +msgid "invalid id: %s" +msgstr "id %s は不正です。" + +#: nova/api/ec2/cloud.py:807 +msgid "user or group not specified" +msgstr "ユーザまたはグループが指定されていません。" + +#: nova/api/ec2/cloud.py:809 +msgid "only group \"all\" is supported" +msgstr "グループ \"all\" のみサポートされています。" + +#: nova/api/ec2/cloud.py:811 +msgid "operation_type must be add or remove" +msgstr "operation_type は add または remove の何れかである必要があります。" + +#: nova/api/ec2/cloud.py:812 +#, python-format +msgid "Updating image %s publicity" +msgstr "イメージ %s の公開設定を更新します。" + +#: nova/api/ec2/metadatarequesthandler.py:75 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "ip %s に対するメタデータの取得に失敗しました。" + +#: nova/api/openstack/__init__.py:70 +#, python-format +msgid "Caught error: %s" +msgstr "エラー %s をキャッチしました。" + +#: nova/api/openstack/__init__.py:86 +msgid "Including admin operations in API." +msgstr "管理用オペレーション(admin operation)をAPIに登録します。" + +#: nova/api/openstack/servers.py:184 +#, python-format +msgid "Compute.api::lock %s" +msgstr "例外: Compute.api::lock %s" + +#: nova/api/openstack/servers.py:199 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "例外: Compute.api::unlock %s" + +#: nova/api/openstack/servers.py:213 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "例外: Compute.api::get_lock %s" + +#: nova/api/openstack/servers.py:224 +#, python-format +msgid "Compute.api::pause %s" +msgstr "例外: Compute.api::pause %s" + +#: nova/api/openstack/servers.py:235 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "例外: Compute.api::unpause %s" + +#: nova/api/openstack/servers.py:246 +#, python-format +msgid "compute.api::suspend %s" +msgstr "例外: compute.api::suspend %s" + +#: nova/api/openstack/servers.py:257 +#, python-format +msgid "compute.api::resume %s" +msgstr "例外: compute.api::resume %s" + +#: nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" +msgstr "ユーザー %s は既に存在しています。" + +#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "マネージャ %s が存在しないためプロジェクトを作成できません。" + +#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#, python-format +msgid "Project can't be created because project %s already exists" +msgstr "プロジェクト %s が既に存在するためプロジェクトを作成できません。" + +#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#, python-format +msgid "Project can't be modified because manager %s doesn't exist" +msgstr "マネージャ %s が存在しないためプロジェクトを更新できません。" + +#: nova/auth/dbdriver.py:245 +#, python-format +msgid "User \"%s\" not found" +msgstr "ユーザ \"%s\" が見つかりません。" + +#: nova/auth/dbdriver.py:248 +#, python-format +msgid "Project \"%s\" not found" +msgstr "プロジェクト \"%s\" が見つかりません。" + +#: nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "シングルトンをインスタンス化しようとしました。" + +#: nova/auth/ldapdriver.py:181 +#, python-format +msgid "LDAP object for %s doesn't exist" +msgstr "LDAPオブジェクト %s が存在しません。" + +#: nova/auth/ldapdriver.py:218 +#, python-format +msgid "Project can't be created because user %s doesn't exist" +msgstr "ユーザ %s が存在しないためプロジェクトを作成できません。" + +#: nova/auth/ldapdriver.py:478 +#, python-format +msgid "User %s is already a member of the group %s" +msgstr "ユーザ %s は既にグループ %s のメンバーです。" + +#: nova/auth/ldapdriver.py:507 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "グループの最後のメンバーを削除しようとしました。代わりにグループ %s を削除してください。" + +#: nova/auth/ldapdriver.py:528 +#, python-format +msgid "Group at dn %s doesn't exist" +msgstr "dnが %s のグループは存在しません。" + +#: nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" +msgstr "ユーザ %r を検索します。" + +#: nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "Failed authorization: アクセスキー %s の認証に失敗しました。" + +#: nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" +msgstr "アクセスキー %s に対するユーザが見つかりませんでした。" + +#: nova/auth/manager.py:270 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "ユーザ名 (%s) をプロジェクト名として使用します。" + +#: nova/auth/manager.py:275 +#, python-format +msgid "failed authorization: no project named %s (user=%s)" +msgstr "Failed authorization: 認証に失敗しました。プロジェクト名 %s (ユーザ = %s) は存在しません。" + +#: nova/auth/manager.py:277 +#, python-format +msgid "No project called %s could be found" +msgstr "プロジェクト %s は見つかりませんでした。" + +#: nova/auth/manager.py:281 +#, python-format +msgid "Failed authorization: user %s not admin and not member of project %s" +msgstr "" +"Failed authorization: 認証に失敗しました: ユーザ %s は管理者ではなくかつプロジェクト %s のメンバーではありません。" + +#: nova/auth/manager.py:283 +#, python-format +msgid "User %s is not a member of project %s" +msgstr "ユーザ %s はプロジェクト %s のメンバーではありません。" + +#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#, python-format +msgid "Invalid signature for user %s" +msgstr "Invalid signature: ユーザ %s の署名が不正です。" + +#: nova/auth/manager.py:293 nova/auth/manager.py:304 +msgid "Signature does not match" +msgstr "署名が一致しません。" + +#: nova/auth/manager.py:374 +msgid "Must specify project" +msgstr "プロジェクトを指定してください。" + +#: nova/auth/manager.py:408 +#, python-format +msgid "The %s role can not be found" +msgstr "ロール %s が見つかりません。" + +#: nova/auth/manager.py:410 +#, python-format +msgid "The %s role is global only" +msgstr "ロール %s はグローバルでのみ使用可能です。" + +#: nova/auth/manager.py:412 +#, python-format +msgid "Adding role %s to user %s in project %s" +msgstr "Adding role: ロール %s をユーザ %s (プロジェクト %s の) に追加します。" + +#: nova/auth/manager.py:438 +#, python-format +msgid "Removing role %s from user %s on project %s" +msgstr "Removing role: ロール %s をユーザ %s (プロジェクト %s の)から削除します。" + +#: nova/auth/manager.py:505 +#, python-format +msgid "Created project %s with manager %s" +msgstr "Created project: プロジェクト %s (マネージャ %s)を作成します。" + +#: nova/auth/manager.py:523 +#, python-format +msgid "modifying project %s" +msgstr "modifying project: プロジェクト %s を更新します。" + +#: nova/auth/manager.py:553 +#, python-format +msgid "Remove user %s from project %s" +msgstr "Remove user: ユーザ %s をプロジェクト %s から削除します。" + +#: nova/auth/manager.py:581 +#, python-format +msgid "Deleting project %s" +msgstr "Deleting project: プロジェクト %s を削除します。" + +#: nova/auth/manager.py:637 +#, python-format +msgid "Created user %s (admin: %r)" +msgstr "Created user: ユーザ %s (admin: %r) を作成しました。" + +#: nova/auth/manager.py:645 +#, python-format +msgid "Deleting user %s" +msgstr "Deleting user: ユーザ %s を削除します。" + +#: nova/auth/manager.py:655 +#, python-format +msgid "Access Key change for user %s" +msgstr "Access Key change: ユーザ %s のアクセスキーを更新します。" + +#: nova/auth/manager.py:657 +#, python-format +msgid "Secret Key change for user %s" +msgstr "Secret Key change: ユーザ %s のシークレットキーを更新します。" + +#: nova/auth/manager.py:659 +#, python-format +msgid "Admin status set to %r for user %s" +msgstr "Admin status set: 管理者ステータス %r をユーザ %s に設定します。" + +#: nova/auth/manager.py:708 +#, python-format +msgid "No vpn data for project %s" +msgstr "プロジェクト %s に関するvpnデータがありません。" + +#: nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "cloudpipeインスタンス起動時に実行するスクリプトのテンプレート" + +#: nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "openvpnの設定に入れるネットワークの値" + +#: nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "openvpnの設定に入れるネットマスクの値" + +#: nova/cloudpipe/pipelib.py:97 +#, python-format +msgid "Launching VPN for %s" +msgstr "%s 用のVPNを起動します。" + +#: nova/compute/api.py:67 +#, python-format +msgid "Instance %d was not found in get_network_topic" +msgstr "get_network_topicにおいてインスタンス %d が見つかりませんでした。" + +#: nova/compute/api.py:73 +#, python-format +msgid "Instance %d has no host" +msgstr "インスタンス %d にホストが登録されていません。" + +#: nova/compute/api.py:92 +#, python-format +msgid "Quota exceeeded for %s, tried to run %s instances" +msgstr "%s のクオータ上限を超えました。%s インスタンスを実行しようとしました。" + +#: nova/compute/api.py:94 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." +msgstr "インスタンスのクオータを超えました。このタイプにおいてはあと %s インスタンスしか実行できません。" + +#: nova/compute/api.py:109 +msgid "Creating a raw instance" +msgstr "raw instanceを生成します。" + +#: nova/compute/api.py:156 +#, python-format +msgid "Going to run %s instances..." +msgstr "%s 個のインスタンスの起動を始めます…" + +#: nova/compute/api.py:180 +#, python-format +msgid "Casting to scheduler for %s/%s's instance %s" +msgstr "スケジューラに対して %s/%s のインスタンス %s を送信します。" + +#: nova/compute/api.py:279 +#, python-format +msgid "Going to try and terminate %s" +msgstr "%s を終了します。" + +#: nova/compute/api.py:283 +#, python-format +msgid "Instance %d was not found during terminate" +msgstr "インスタンス %d が終了処理において見つかりませんでした。" + +#: nova/compute/api.py:288 +#, python-format +msgid "Instance %d is already being terminated" +msgstr "インスタンス %d は既に終了済みです。" + +#: nova/compute/api.py:450 +#, python-format +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "デバイスの指定 %s が不正です: デバイス指定の例: /dev/vdb" + +#: nova/compute/api.py:465 +msgid "Volume isn't attached to anything!" +msgstr "ボリュームはどこにもアタッチされていません。" + +#: nova/compute/disk.py:71 +#, python-format +msgid "Input partition size not evenly divisible by sector size: %d / %d" +msgstr "インプットパーティションサイズがセクターサイズで割り切れません。 %d / %d" + +#: nova/compute/disk.py:75 +#, python-format +msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +msgstr "ローカルストレージのバイト数がセクターサイズで割り切れません: %d / %d" + +#: nova/compute/disk.py:128 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "イメージをループバック %s にアタッチできません。" + +#: nova/compute/disk.py:136 +#, python-format +msgid "Failed to load partition: %s" +msgstr "パーティション %s のロードに失敗しました。" + +#: nova/compute/disk.py:158 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "ファイルシステム %s のマウントに失敗しました。" + +#: nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "%s は未知のインスタンスタイプです。" + +#: nova/compute/manager.py:69 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "check_instance_lock: decorating: |%s|" + +#: nova/compute/manager.py:71 +#, python-format +msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +msgstr "check_instance_lock: arguments: |%s| |%s| |%s|" + +#: nova/compute/manager.py:75 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "check_instance_lock: locked: |%s|" + +#: nova/compute/manager.py:77 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "check_instance_lock: admin: |%s|" + +#: nova/compute/manager.py:82 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "check_instance_lock: executing: |%s|" + +#: nova/compute/manager.py:86 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "check_instance_lock: not executing |%s|" + +#: nova/compute/manager.py:157 +msgid "Instance has already been created" +msgstr "インスタンスは既に生成されています。" + +#: nova/compute/manager.py:158 +#, python-format +msgid "instance %s: starting..." +msgstr "インスタンス %s を開始します。" + +#: nova/compute/manager.py:197 +#, python-format +msgid "instance %s: Failed to spawn" +msgstr "インスタンス %s の起動に失敗しました。" + +#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 +#, python-format +msgid "Terminating instance %s" +msgstr "Terminating instance: インスタンス %s を終了します。" + +#: nova/compute/manager.py:217 +#, python-format +msgid "Disassociating address %s" +msgstr "アドレス %s の関連付けを解除(disassociate)しています。" + +#: nova/compute/manager.py:230 +#, python-format +msgid "Deallocating address %s" +msgstr "アドレス %s の割当を解除(deallocate)します。" + +#: nova/compute/manager.py:243 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "既に消去済みのインスタンス%sを消去しようとしました。" + +#: nova/compute/manager.py:257 +#, python-format +msgid "Rebooting instance %s" +msgstr "Rebooting instance: インスタンス %s を再起動します。" + +#: nova/compute/manager.py:260 +#, python-format +msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +msgstr "実行していないインスタンスの再起動を試みます。%s (状態: %s 期待する状態: %s)" + +#: nova/compute/manager.py:286 +#, python-format +msgid "instance %s: snapshotting" +msgstr "snapshotting: インスタンス %s のスナップショットを取得します。" + +#: nova/compute/manager.py:289 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgstr "実行していないインスタンスのスナップショット取得を試みます。%s (状態: %s 期待する状態: %s)" + +#: nova/compute/manager.py:301 +#, python-format +msgid "instance %s: rescuing" +msgstr "Rescuing: インスタンス %s をレスキューします。" + +#: nova/compute/manager.py:316 +#, python-format +msgid "instance %s: unrescuing" +msgstr "Unrescuing: インスタンス %s をアンレスキューします。" + +#: nova/compute/manager.py:335 +#, python-format +msgid "instance %s: pausing" +msgstr "pausing: インスタンス %s を一時停止します。" + +#: nova/compute/manager.py:352 +#, python-format +msgid "instance %s: unpausing" +msgstr "unpausing: インスタンス %s の一時停止を解除します。" + +#: nova/compute/manager.py:369 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "retrieving diagnostics: インスタンス %s の診断情報を取得します。" + +#: nova/compute/manager.py:382 +#, python-format +msgid "instance %s: suspending" +msgstr "suspending: インスタンス %s をサスペンドします。" + +#: nova/compute/manager.py:401 +#, python-format +msgid "instance %s: resuming" +msgstr "resuming: インスタンス %s をレジュームします。" + +#: nova/compute/manager.py:420 +#, python-format +msgid "instance %s: locking" +msgstr "locking: インスタンス %s をロックします。" + +#: nova/compute/manager.py:432 +#, python-format +msgid "instance %s: unlocking" +msgstr "unlocking: インスタンス %s のロックを解除します。" + +#: nova/compute/manager.py:442 +#, python-format +msgid "instance %s: getting locked state" +msgstr "getting locked state: インスタンス %s のロックを取得しました。" + +#: nova/compute/manager.py:462 +#, python-format +msgid "instance %s: attaching volume %s to %s" +msgstr "attaching volume: インスタンス %s についてボリューム %s を %s にアタッチします。" + +#: nova/compute/manager.py:478 +#, python-format +msgid "instance %s: attach failed %s, removing" +msgstr "インスタンス %s: %sのアタッチに失敗しました。リムーブします。" + +#: nova/compute/manager.py:493 +#, python-format +msgid "Detach volume %s from mountpoint %s on instance %s" +msgstr "Detach volume: ボリューム %s をマウントポイント %s (インスタンス%s)からデタッチします。" + +#: nova/compute/manager.py:497 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "ボリュームを未知のインスタンス %s からデタッチします。" + +#: nova/compute/monitor.py:259 +#, python-format +msgid "updating %s..." +msgstr "%s の情報の更新…" + +#: nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "更新の最中に予期しないエラーが発生しました。" + +#: nova/compute/monitor.py:355 +#, python-format +msgid "Cannot get blockstats for \"%s\" on \"%s\"" +msgstr "ブロックデバイス \"%s\" の統計を \"%s\" について取得できません。" + +#: nova/compute/monitor.py:377 +#, python-format +msgid "Cannot get ifstats for \"%s\" on \"%s\"" +msgstr "インタフェース \"%s\" の統計を \"%s\" について取得できません。" + +#: nova/compute/monitor.py:412 +msgid "unexpected exception getting connection" +msgstr "接続に際し予期しないエラーが発生しました。" + +#: nova/compute/monitor.py:427 +#, python-format +msgid "Found instance: %s" +msgstr "インスタンス %s が見つかりました。" + +#: nova/db/sqlalchemy/api.py:43 +msgid "Use of empty request context is deprecated" +msgstr "Request context を空とすることは非推奨です。" + +#: nova/db/sqlalchemy/api.py:132 +#, python-format +msgid "No service for id %s" +msgstr "id %s のserviceが存在しません。" + +#: nova/db/sqlalchemy/api.py:229 +#, python-format +msgid "No service for %s, %s" +msgstr "%s, %s のserviceが存在しません。" + +#: nova/db/sqlalchemy/api.py:574 +#, python-format +msgid "No floating ip for address %s" +msgstr "アドレス %s の floating ip が存在しません。" + +#: nova/db/sqlalchemy/api.py:668 +#, python-format +msgid "No instance for id %s" +msgstr "id %s のinstanceが存在しません。" + +#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 +#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#, python-format +msgid "Instance %s not found" +msgstr "インスタンス %s が見つかりません。" + +#: nova/db/sqlalchemy/api.py:891 +#, python-format +msgid "no keypair for user %s, name %s" +msgstr "ユーザ %s, ネーム%s に該当するキーペアが存在しません。" + +#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 +#, python-format +msgid "No network for id %s" +msgstr "id %s に該当するnetwork が存在しません。" + +#: nova/db/sqlalchemy/api.py:1036 +#, python-format +msgid "No network for bridge %s" +msgstr "ブリッジ %s に該当する network が存在しません。" + +#: nova/db/sqlalchemy/api.py:1050 +#, python-format +msgid "No network for instance %s" +msgstr "instance %s に該当する network が存在しません。" + +#: nova/db/sqlalchemy/api.py:1180 +#, python-format +msgid "Token %s does not exist" +msgstr "トークン %s が存在しません。" + +#: nova/db/sqlalchemy/api.py:1205 +#, python-format +msgid "No quota for project_id %s" +msgstr "project_id %s に対するクオータが存在しません。" + +#: nova/db/sqlalchemy/api.py:1356 +#, python-format +msgid "No volume for id %s" +msgstr "id %s に該当するボリュームが存在しません。" + +#: nova/db/sqlalchemy/api.py:1401 +#, python-format +msgid "Volume %s not found" +msgstr "ボリューム %s が見つかりません。" + +#: nova/db/sqlalchemy/api.py:1413 +#, python-format +msgid "No export device found for volume %s" +msgstr "ボリューム %s に関してエクスポートされているデバイスがありません。" + +#: nova/db/sqlalchemy/api.py:1426 +#, python-format +msgid "No target id found for volume %s" +msgstr "ボリューム %s に対する target idが存在しません。" + +#: nova/db/sqlalchemy/api.py:1471 +#, python-format +msgid "No security group with id %s" +msgstr "id %s のセキュリティグループが存在しません。" + +#: nova/db/sqlalchemy/api.py:1488 +#, python-format +msgid "No security group named %s for project: %s" +msgstr "セキュリティグループ名 %s がプロジェクト %s に存在しません。" + +#: nova/db/sqlalchemy/api.py:1576 +#, python-format +msgid "No secuity group rule with id %s" +msgstr "id %s のセキュリティグループルールが存在しません。" + +#: nova/db/sqlalchemy/api.py:1650 +#, python-format +msgid "No user for id %s" +msgstr "id %s のユーザが存在しません。" + +#: nova/db/sqlalchemy/api.py:1666 +#, python-format +msgid "No user for access key %s" +msgstr "アクセスキー %s に該当するユーザが存在しません。" + +#: nova/db/sqlalchemy/api.py:1728 +#, python-format +msgid "No project with id %s" +msgstr "id %s のプロジェクトが存在しません。" + +#: nova/image/glance.py:78 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images" +msgstr "Parallax がHTTPエラー%d を /images に対するリクエストに対して返しました。" + +#: nova/image/glance.py:97 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images/detail" +msgstr "Parallax がHTTPエラー %d を /images/detail に対するリクエストに対して返しました" + +#: nova/image/s3.py:82 +#, python-format +msgid "Image %s could not be found" +msgstr "イメージ %s が見つかりませんでした。" + +#: nova/network/api.py:39 +#, python-format +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "アドレスを割り当てようとしましたが、%s のクオータを超えました。" + +#: nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "アドレスのクオータを超えました。これ以上アドレスを割り当てることはできません。" + +#: nova/network/linux_net.py:176 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "VLANインタフェース %s を開始します。" + +#: nova/network/linux_net.py:186 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "%s 用のブリッジインタフェースを開始します。" + +#: nova/network/linux_net.py:254 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "dnsmasqに対してhupを送信しましたが %s が発生しました。" + +#: nova/network/linux_net.py:256 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "Pid %d は無効です。dnsmasqを再実行します。" + +#: nova/network/linux_net.py:334 +#, python-format +msgid "Killing dnsmasq threw %s" +msgstr "dnsmasq をkillしましたが、 %s が発生しました。" + +#: nova/network/manager.py:135 +msgid "setting network host" +msgstr "ネットワークホストの設定をします。" + +#: nova/network/manager.py:190 +#, python-format +msgid "Leasing IP %s" +msgstr "IP %s をリースします。" + +#: nova/network/manager.py:194 +#, python-format +msgid "IP %s leased that isn't associated" +msgstr "IP %s がリースされましたが関連付けられていません。" + +#: nova/network/manager.py:197 +#, python-format +msgid "IP %s leased to bad mac %s vs %s" +msgstr "IP %s が期待した mac %s ではなく %s にリースされました。" + +#: nova/network/manager.py:205 +#, python-format +msgid "IP %s leased that was already deallocated" +msgstr "既に割当解除しているIP %s がリースされました。" + +#: nova/network/manager.py:214 +#, python-format +msgid "IP %s released that isn't associated" +msgstr "割り当てていないIP %s が開放されました。" + +#: nova/network/manager.py:217 +#, python-format +msgid "IP %s released from bad mac %s vs %s" +msgstr "IP %s がmac %s ではない mac %s への割当から開放されました。" + +#: nova/network/manager.py:220 +#, python-format +msgid "IP %s released that was not leased" +msgstr "リースしていないIP %s が開放されました。" + +#: nova/network/manager.py:442 +#, python-format +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "無効になった %s 個の fixed ip を割当解除しました。" + +#: nova/objectstore/handler.py:106 +#, python-format +msgid "Unknown S3 value type %r" +msgstr "未知のS3 value type %r です。" + +#: nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "認証リクエスト" + +#: nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "List of buckets が呼ばれました。" + +#: nova/objectstore/handler.py:209 +#, python-format +msgid "List keys for bucket %s" +msgstr "バケット %s のキーの一覧" + +#: nova/objectstore/handler.py:217 +#, python-format +msgid "Unauthorized attempt to access bucket %s" +msgstr "Unauthorized attempt to access bucket: バケット %s に対するアクセスは許可されていません。" + +#: nova/objectstore/handler.py:235 +#, python-format +msgid "Creating bucket %s" +msgstr "バケットを作成します。 %s" + +#: nova/objectstore/handler.py:245 +#, python-format +msgid "Deleting bucket %s" +msgstr "バケットを削除します。 %s" + +#: nova/objectstore/handler.py:249 +#, python-format +msgid "Unauthorized attempt to delete bucket %s" +msgstr "Unauthorized attempt to delete bucket: バケット %s に対する削除は許可されていません。" + +#: nova/objectstore/handler.py:271 +#, python-format +msgid "Getting object: %s / %s" +msgstr "オブジェクトの取得: %s / %s" + +#: nova/objectstore/handler.py:274 +#, python-format +msgid "Unauthorized attempt to get object %s from bucket %s" +msgstr "" +"Unauthorized attempt to get object: オブジェクト %s のバケット %s からの取得は許可されていません。" + +#: nova/objectstore/handler.py:292 +#, python-format +msgid "Putting object: %s / %s" +msgstr "オブジェクトの格納:: %s / %s" + +#: nova/objectstore/handler.py:295 +#, python-format +msgid "Unauthorized attempt to upload object %s to bucket %s" +msgstr "" +"Unauthorized attempt to upload: オブジェクト %s のバケット %s へのアップロードは許可されていません。" + +#: nova/objectstore/handler.py:314 +#, python-format +msgid "Deleting object: %s / %s" +msgstr "オブジェクトを削除しています。: %s / %s" + +#: nova/objectstore/handler.py:393 +#, python-format +msgid "Not authorized to upload image: invalid directory %s" +msgstr "" +"Not authorized to upload image: イメージの格納は許可されていません。ディレクトリ %s は正しくありません。" + +#: nova/objectstore/handler.py:401 +#, python-format +msgid "Not authorized to upload image: unauthorized bucket %s" +msgstr "" +"Not authorized to upload image: イメージの格納は許可されていません。バケット %s への格納は許可されていません。" + +#: nova/objectstore/handler.py:406 +#, python-format +msgid "Starting image upload: %s" +msgstr "イメージのアップロードを開始しました。 %s" + +#: nova/objectstore/handler.py:420 +#, python-format +msgid "Not authorized to update attributes of image %s" +msgstr "Not authorized to update attributes: イメージ %s のアトリビュートの更新は許可されていません。" + +#: nova/objectstore/handler.py:428 +#, python-format +msgid "Toggling publicity flag of image %s %r" +msgstr "Toggling publicity flag: イメージ %s の公開フラグを %r に更新します。" + +#: nova/objectstore/handler.py:433 +#, python-format +msgid "Updating user fields on image %s" +msgstr "Updating user fields: イメージ %s のユーザフィールドを更新します。" + +#: nova/objectstore/handler.py:447 +#, python-format +msgid "Unauthorized attempt to delete image %s" +msgstr "Unauthorized attempt to delete image: イメージ %s の削除は許可されていません。" + +#: nova/objectstore/handler.py:452 +#, python-format +msgid "Deleted image: %s" +msgstr "イメージ %s を削除しました。" + +#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 +#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 +msgid "No hosts found" +msgstr "適切なホストが見つかりません。" + +#: nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "予備の(fallback)スケジューラを実装する必要があります。" + +#: nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %s %s for %s" +msgstr "メッセージのcast: %s %s for %s" + +#: nova/scheduler/simple.py:63 +msgid "All hosts have too many cores" +msgstr "全てのホストにコア数の空きがありません。" + +#: nova/scheduler/simple.py:95 +msgid "All hosts have too many gigabytes" +msgstr "全てのホストが利用可能な容量(gigabytes)に達しています。" + +#: nova/scheduler/simple.py:115 +msgid "All hosts have too many networks" +msgstr "全てのホストがネットワークの最大数に達しています。" + +#: nova/tests/test_cloud.py:198 +msgid "Can't test instances without a real virtual env." +msgstr "インスタンスのテストには実際の仮想環境が必要です。(fakeでは実行できません。)" + +#: nova/tests/test_cloud.py:210 +#, python-format +msgid "Need to watch instance %s until it's running..." +msgstr "インスタンス %s が実行するまで監視します…" + +#: nova/tests/test_compute.py:104 +#, python-format +msgid "Running instances: %s" +msgstr "インスタンス %s は実行中です。" + +#: nova/tests/test_compute.py:110 +#, python-format +msgid "After terminating instances: %s" +msgstr "インスタンス %s を終了した後です。" + +#: nova/tests/test_rpc.py:89 +#, python-format +msgid "Nested received %s, %s" +msgstr "ネスとした受信: %s, %s" + +#: nova/tests/test_rpc.py:94 +#, python-format +msgid "Nested return %s" +msgstr "ネストした戻り値: %s" + +#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#, python-format +msgid "Received %s" +msgstr "%s を受信。" + +#: nova/tests/test_volume.py:162 +#, python-format +msgid "Target %s allocated" +msgstr "ターゲット %s をアロケートしました。" + +#: nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "ハイパーバイザへの接続に失敗しました。" + +#: nova/virt/fake.py:210 +#, python-format +msgid "Instance %s Not Found" +msgstr "インスタンス %s が見つかりません。" + +#: nova/virt/hyperv.py:118 +msgid "In init host" +msgstr "In init host" + +#: nova/virt/hyperv.py:131 +#, python-format +msgid "Attempt to create duplicate vm %s" +msgstr "VM %s を二重に作成しようとしました。" + +#: nova/virt/hyperv.py:148 +#, python-format +msgid "Starting VM %s " +msgstr "VM %s を開始します。 " + +#: nova/virt/hyperv.py:150 +#, python-format +msgid "Started VM %s " +msgstr "VM %s を開始しました。 " + +#: nova/virt/hyperv.py:152 +#, python-format +msgid "spawn vm failed: %s" +msgstr "vmの生成(spawn)に失敗しました: %s" + +#: nova/virt/hyperv.py:169 +#, python-format +msgid "Failed to create VM %s" +msgstr "VM %s の作成に失敗しました。" + +#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 +#, python-format +msgid "Created VM %s..." +msgstr "VM %s を作成します。" + +#: nova/virt/hyperv.py:188 +#, python-format +msgid "Set memory for vm %s..." +msgstr "vm %s のメモリを設定します。" + +#: nova/virt/hyperv.py:198 +#, python-format +msgid "Set vcpus for vm %s..." +msgstr "vm %s のvcpus を設定します。" + +#: nova/virt/hyperv.py:202 +#, python-format +msgid "Creating disk for %s by attaching disk file %s" +msgstr "%s のディスクをディスクファイル %s をアタッチして作成します。" + +#: nova/virt/hyperv.py:227 +#, python-format +msgid "Failed to add diskdrive to VM %s" +msgstr "VM %s へのディスクドライブの追加に失敗しました。" + +#: nova/virt/hyperv.py:230 +#, python-format +msgid "New disk drive path is %s" +msgstr "新しいドライブパスは %s です。" + +#: nova/virt/hyperv.py:247 +#, python-format +msgid "Failed to add vhd file to VM %s" +msgstr "vhdファイルの VM %s への追加に失敗しました。" + +#: nova/virt/hyperv.py:249 +#, python-format +msgid "Created disk for %s" +msgstr "%s に diskを作成します。" + +#: nova/virt/hyperv.py:253 +#, python-format +msgid "Creating nic for %s " +msgstr "%s にNICを作成します。 " + +#: nova/virt/hyperv.py:272 +msgid "Failed creating a port on the external vswitch" +msgstr "外部vswitchへのポート作成に失敗しました。" + +#: nova/virt/hyperv.py:273 +#, python-format +msgid "Failed creating port for %s" +msgstr "ポート %s の作成に失敗しました。" + +#: nova/virt/hyperv.py:275 +#, python-format +msgid "Created switch port %s on switch %s" +msgstr "スイッチポート %s をスイッチ %s に作成しました。" + +#: nova/virt/hyperv.py:285 +#, python-format +msgid "Failed to add nic to VM %s" +msgstr "VM %s に対してNICの追加に失敗しました。" + +#: nova/virt/hyperv.py:287 +#, python-format +msgid "Created nic for %s " +msgstr "%s のNICを作成しました。 " + +#: nova/virt/hyperv.py:320 +#, python-format +msgid "WMI job failed: %s" +msgstr "WMIジョブに失敗しました: %s" + +#: nova/virt/hyperv.py:322 +#, python-format +msgid "WMI job succeeded: %s, Elapsed=%s " +msgstr "WMIジョブが成功しました: %s, 経過時間=%s " + +#: nova/virt/hyperv.py:358 +#, python-format +msgid "Got request to destroy vm %s" +msgstr "destroy vm %s リクエストを受信しました。" + +#: nova/virt/hyperv.py:383 +#, python-format +msgid "Failed to destroy vm %s" +msgstr "vm %s の削除に失敗しました。" + +#: nova/virt/hyperv.py:389 +#, python-format +msgid "Del: disk %s vm %s" +msgstr "Del: 削除: disk %s vm %s" + +#: nova/virt/hyperv.py:405 +#, python-format +msgid "" +"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " +"cpu_time=%s" +msgstr "" +"vm %s の情報の取得: state=%s, mem=%s, num_cpu=%s, cpu_time=%s" + +#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#, python-format +msgid "duplicate name found: %s" +msgstr "%s は重複しています。" + +#: nova/virt/hyperv.py:444 +#, python-format +msgid "Successfully changed vm state of %s to %s" +msgstr "vmの状態の %s から %s への変更に成功しました。" + +#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#, python-format +msgid "Failed to change vm state of %s to %s" +msgstr "VMの状態の %s から %s への変更に失敗しました。" + +#: nova/virt/images.py:70 +#, python-format +msgid "Finished retreving %s -- placed in %s" +msgstr "%s を取得しました。格納先: %s" + +#: nova/virt/libvirt_conn.py:144 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "libvirt %s へ接続します。" + +#: nova/virt/libvirt_conn.py:157 +msgid "Connection to libvirt broke" +msgstr "libvirtへの接続が切れています。" + +#: nova/virt/libvirt_conn.py:229 +#, python-format +msgid "instance %s: deleting instance files %s" +msgstr "インスタンス %s: インスタンスファイル %s を削除しています。" + +#: nova/virt/libvirt_conn.py:271 +#, python-format +msgid "No disk at %s" +msgstr "%s にディスクが存在しません。" + +#: nova/virt/libvirt_conn.py:278 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "インスタンスのスナップショットは現在libvirtに対してはサポートされていません。" + +#: nova/virt/libvirt_conn.py:294 +#, python-format +msgid "instance %s: rebooted" +msgstr "インスタンス%s: 再起動しました。" + +#: nova/virt/libvirt_conn.py:297 +#, python-format +msgid "_wait_for_reboot failed: %s" +msgstr "_wait_for_reboot 失敗: %s" + +#: nova/virt/libvirt_conn.py:340 +#, python-format +msgid "instance %s: rescued" +msgstr "インスタンス %s: rescued" + +#: nova/virt/libvirt_conn.py:343 +#, python-format +msgid "_wait_for_rescue failed: %s" +msgstr "_wait_for_rescue 失敗: %s" + +#: nova/virt/libvirt_conn.py:370 +#, python-format +msgid "instance %s: is running" +msgstr "インスタンス %s を起動中です。" + +#: nova/virt/libvirt_conn.py:381 +#, python-format +msgid "instance %s: booted" +msgstr "インスタンス %s: 起動しました。" + +#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 +#, python-format +msgid "instance %s: failed to boot" +msgstr "インスタンス %s の起動に失敗しました。" + +#: nova/virt/libvirt_conn.py:395 +#, python-format +msgid "virsh said: %r" +msgstr "virsh の出力: %r" + +#: nova/virt/libvirt_conn.py:399 +msgid "cool, it's a device" +msgstr "デバイスです。" + +#: nova/virt/libvirt_conn.py:407 +#, python-format +msgid "data: %r, fpath: %r" +msgstr "データ:%r ファイルパス: %r" + +#: nova/virt/libvirt_conn.py:415 +#, python-format +msgid "Contents of file %s: %r" +msgstr "ファイル %s の中身: %r" + +#: nova/virt/libvirt_conn.py:449 +#, python-format +msgid "instance %s: Creating image" +msgstr "インスタンス %s のイメージを生成します。" + +#: nova/virt/libvirt_conn.py:505 +#, python-format +msgid "instance %s: injecting key into image %s" +msgstr "インスタンス %s にキー %s をインジェクトします。" + +#: nova/virt/libvirt_conn.py:508 +#, python-format +msgid "instance %s: injecting net into image %s" +msgstr "インスタンス %s のネットワーク設定をイメージ %s にインジェクトします。" + +#: nova/virt/libvirt_conn.py:516 +#, python-format +msgid "instance %s: ignoring error injecting data into image %s (%s)" +msgstr "インスタンス %s: データをイメージ %s にインジェクトする際にエラーが発生しました。(%s)" + +#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "インスタンス %s: toXML メソッドを開始。" + +#: nova/virt/libvirt_conn.py:589 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "インスタンス %s: toXML メソッドを完了。" + +#: nova/virt/xenapi_conn.py:113 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" +msgstr "" +"connection_type=xenapi を使用するには、以下の指定が必要です: xenapi_connection_url, " +"xenapi_connection_username (オプション), xenapi_connection_password" + +#: nova/virt/xenapi_conn.py:263 +#, python-format +msgid "Task [%s] %s status: success %s" +msgstr "タスク [%s] %s ステータス: success %s" + +#: nova/virt/xenapi_conn.py:271 +#, python-format +msgid "Task [%s] %s status: %s %s" +msgstr "タスク [%s] %s ステータス: %s %s" + +#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 +#, python-format +msgid "Got exception: %s" +msgstr "例外 %s が発生しました。" + +#: nova/virt/xenapi/fake.py:72 +#, python-format +msgid "%s: _db_content => %s" +msgstr "%s: _db_content => %s" + +#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 +#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 +msgid "Raising NotImplemented" +msgstr "NotImplemented 例外を発生させます。" + +#: nova/virt/xenapi/fake.py:249 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "xenapi.fake には %s が実装されていません。" + +#: nova/virt/xenapi/fake.py:283 +#, python-format +msgid "Calling %s %s" +msgstr "呼び出し: %s %s" + +#: nova/virt/xenapi/fake.py:288 +#, python-format +msgid "Calling getter %s" +msgstr "getter %s をコールします。" + +#: nova/virt/xenapi/fake.py:340 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "xenapi.fake に %s に関する実装がないか、引数の数が誤っています。" + +#: nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "ブリッジ %s に対してブリッジが複数存在します。" + +#: nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "ブリッジ %s に対するネットワークが存在しません。" + +#: nova/virt/xenapi/vm_utils.py:127 +#, python-format +msgid "Created VM %s as %s." +msgstr "VM %s を %s として作成しました。" + +#: nova/virt/xenapi/vm_utils.py:147 +#, python-format +msgid "Creating VBD for VM %s, VDI %s ... " +msgstr "VM %s, VDI %s のVBDを作成します… " + +#: nova/virt/xenapi/vm_utils.py:149 +#, python-format +msgid "Created VBD %s for VM %s, VDI %s." +msgstr "VBD %s を VM %s, VDI %s に対して作成しました。" + +#: nova/virt/xenapi/vm_utils.py:165 +#, python-format +msgid "VBD not found in instance %s" +msgstr "インスタンス %s のVBDが見つかりません。" + +#: nova/virt/xenapi/vm_utils.py:175 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "VBD %s の unplug に失敗しました。" + +#: nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "VBD %s の削除に失敗しました。" + +#: nova/virt/xenapi/vm_utils.py:202 +#, python-format +msgid "Creating VIF for VM %s, network %s." +msgstr "VM %s, ネットワーク %s を作成します。" + +#: nova/virt/xenapi/vm_utils.py:205 +#, python-format +msgid "Created VIF %s for VM %s, network %s." +msgstr "VIF %s を VM %s, ネットワーク %s に作成しました。" + +#: nova/virt/xenapi/vm_utils.py:216 +#, python-format +msgid "Snapshotting VM %s with label '%s'..." +msgstr "VM %s のスナップショットをラベル '%s' で作成します。" + +#: nova/virt/xenapi/vm_utils.py:229 +#, python-format +msgid "Created snapshot %s from VM %s." +msgstr "スナップショット %s を VM %s について作成しました。" + +#: nova/virt/xenapi/vm_utils.py:243 +#, python-format +msgid "Asking xapi to upload %s as '%s'" +msgstr "xapiに対して %s を '%s' としてアップロードするように指示します。" + +#: nova/virt/xenapi/vm_utils.py:261 +#, python-format +msgid "Asking xapi to fetch %s as %s" +msgstr "xapi に対して %s を %s として取得するように指示します。" + +#: nova/virt/xenapi/vm_utils.py:279 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "PV kernelのvdi %s を取得します。" + +#: nova/virt/xenapi/vm_utils.py:290 +#, python-format +msgid "PV Kernel in VDI:%d" +msgstr "VDIのPV Kernel: %d" + +#: nova/virt/xenapi/vm_utils.py:318 +#, python-format +msgid "VDI %s is still available" +msgstr "VDI %s は依然として存在しています。" + +#: nova/virt/xenapi/vm_utils.py:331 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "(VM_UTILS) xenserver の vm state -> |%s|" + +#: nova/virt/xenapi/vm_utils.py:333 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "(VM_UTILS) xenapi の power_state -> |%s|" + +#: nova/virt/xenapi/vm_utils.py:390 +#, python-format +msgid "VHD %s has parent %s" +msgstr "VHD %s のペアレントは %s です。" + +#: nova/virt/xenapi/vm_utils.py:407 +#, python-format +msgid "Re-scanning SR %s" +msgstr "SR %s を再スキャンします。" + +#: nova/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +msgstr "ペアレント %s がオリジナルのペアレント %s と一致しません。合致するのを待ちます…" + +#: nova/virt/xenapi/vm_utils.py:448 +#, python-format +msgid "No VDIs found for VM %s" +msgstr "VM %s にVDIが存在しません。" + +#: nova/virt/xenapi/vm_utils.py:452 +#, python-format +msgid "Unexpected number of VDIs (%s) found for VM %s" +msgstr "予期しない数 (%s) のVDIがVM %s に存在します。" + +#: nova/virt/xenapi/vmops.py:62 +#, python-format +msgid "Attempted to create non-unique name %s" +msgstr "ユニークではないname %s を作成しようとしました。" + +#: nova/virt/xenapi/vmops.py:99 +#, python-format +msgid "Starting VM %s..." +msgstr "VM %s を開始します…" + +#: nova/virt/xenapi/vmops.py:101 +#, python-format +msgid "Spawning VM %s created %s." +msgstr "VM %s の生成(spawning) により %s を作成しました。" + +#: nova/virt/xenapi/vmops.py:112 +#, python-format +msgid "Instance %s: booted" +msgstr "インスタンス%s: ブートしました。" + +#: nova/virt/xenapi/vmops.py:137 +#, python-format +msgid "Instance not present %s" +msgstr "インスタンス%s が存在しません。" + +#: nova/virt/xenapi/vmops.py:166 +#, python-format +msgid "Starting snapshot for VM %s" +msgstr "VM %s に対するスナップショットを開始します。" + +#: nova/virt/xenapi/vmops.py:174 +#, python-format +msgid "Unable to Snapshot %s: %s" +msgstr "%s のスナップショットに失敗しました: %s" + +#: nova/virt/xenapi/vmops.py:184 +#, python-format +msgid "Finished snapshot and upload for VM %s" +msgstr "VM %s のスナップショットとアップロードが完了しました。" + +#: nova/virt/xenapi/vmops.py:252 +#, python-format +msgid "suspend: instance not present %s" +msgstr "suspend: インスタンス %s は存在しません。" + +#: nova/virt/xenapi/vmops.py:262 +#, python-format +msgid "resume: instance not present %s" +msgstr "resume: インスタンス %s は存在しません。" + +#: nova/virt/xenapi/vmops.py:271 +#, python-format +msgid "Instance not found %s" +msgstr "インスタンス %s が見つかりません。" + +#: nova/virt/xenapi/volume_utils.py:57 +#, python-format +msgid "Introducing %s..." +msgstr "%s を introduce します…" + +#: nova/virt/xenapi/volume_utils.py:74 +#, python-format +msgid "Introduced %s as %s." +msgstr "%s を %s として introduce しました。" + +#: nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "Storage Repository を作成できません。" + +#: nova/virt/xenapi/volume_utils.py:90 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "VBD %s から SRを取得できません。" + +#: nova/virt/xenapi/volume_utils.py:96 +#, python-format +msgid "Forgetting SR %s ... " +msgstr "SR %s をforgetします。 " + +#: nova/virt/xenapi/volume_utils.py:101 +#, python-format +msgid "Ignoring exception %s when getting PBDs for %s" +msgstr "例外 %s が %s のPBDを取得する際に発生しましたが無視します。" + +#: nova/virt/xenapi/volume_utils.py:107 +#, python-format +msgid "Ignoring exception %s when unplugging PBD %s" +msgstr "例外 %s が %s のPBDをunplugする際に発生しましたが無視します。" + +#: nova/virt/xenapi/volume_utils.py:111 +#, python-format +msgid "Forgetting SR %s done." +msgstr "SR %s のforgetが完了。" + +#: nova/virt/xenapi/volume_utils.py:113 +#, python-format +msgid "Ignoring exception %s when forgetting SR %s" +msgstr "例外 %s がSR %s をforgetする際に発生しましたが無視します。" + +#: nova/virt/xenapi/volume_utils.py:123 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "SR %s のVDIのintroduceができません。" + +#: nova/virt/xenapi/volume_utils.py:128 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "VDI %s のレコードを取得できません。" + +#: nova/virt/xenapi/volume_utils.py:146 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "SR %s のVDIをintroduceできません。" + +#: nova/virt/xenapi/volume_utils.py:175 +#, python-format +msgid "Unable to obtain target information %s, %s" +msgstr "ターゲットの情報を取得できません。 %s, %s" + +#: nova/virt/xenapi/volume_utils.py:197 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "マウントポイントを変換できません。 %s" + +#: nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %s, %s, %s" +msgstr "Attach_volume: ボリュームのアタッチ: %s, %s, %s" + +#: nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %s for instance %s" +msgstr "SR %s にインスタンス %s のVDIを作成できません。" + +#: nova/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Unable to use SR %s for instance %s" +msgstr "SR %s をインスタンス %s に対して利用できません。" + +#: nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "インスタンス %s にボリュームをアタッチできません。" + +#: nova/virt/xenapi/volumeops.py:95 +#, python-format +msgid "Mountpoint %s attached to instance %s" +msgstr "マウントポイント %s をインスタンス %s にアタッチしました。" + +#: nova/virt/xenapi/volumeops.py:106 +#, python-format +msgid "Detach_volume: %s, %s" +msgstr "Detach_volume: ボリュームのデタッチ: %s, %s" + +#: nova/virt/xenapi/volumeops.py:113 +#, python-format +msgid "Unable to locate volume %s" +msgstr "ボリューム %s の存在が確認できません。" + +#: nova/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Unable to detach volume %s" +msgstr "ボリューム %s のデタッチができません。" + +#: nova/virt/xenapi/volumeops.py:128 +#, python-format +msgid "Mountpoint %s detached from instance %s" +msgstr "マウントポイント %s をインスタンス %s からデタッチしました。" + +#: nova/volume/api.py:44 +#, python-format +msgid "Quota exceeeded for %s, tried to create %sG volume" +msgstr "%sのクオータを超えています。サイズ %sG のボリュームの作成を行おうとしました。" + +#: nova/volume/api.py:46 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %s" +msgstr "ボリュームのクオータを超えています。%sの大きさのボリュームは作成できません。" + +#: nova/volume/api.py:70 nova/volume/api.py:95 +msgid "Volume status must be available" +msgstr "ボリュームのステータス(status)が available でなければなりません。" + +#: nova/volume/api.py:97 +msgid "Volume is already attached" +msgstr "ボリュームは既にアタッチされています(attached)。" + +#: nova/volume/api.py:103 +msgid "Volume is already detached" +msgstr "ボリュームは既にデタッチされています(detached)。" + +#: nova/volume/driver.py:76 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "実行失敗からリカバリーします。%s 回目のトライ。" + +#: nova/volume/driver.py:85 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "ボリュームグループ%sが存在しません。" + +#: nova/volume/driver.py:210 +#, python-format +msgid "FAKE AOE: %s" +msgstr "偽のAOE: %s" + +#: nova/volume/driver.py:315 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "偽のISCSI: %s" + +#: nova/volume/manager.py:85 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "%s 個のボリュームを再エクスポートします。" + +#: nova/volume/manager.py:93 +#, python-format +msgid "volume %s: creating" +msgstr "ボリューム%sを作成します。" + +#: nova/volume/manager.py:102 +#, python-format +msgid "volume %s: creating lv of size %sG" +msgstr "ボリューム%sの%sGのlv (論理ボリューム) を作成します。" + +#: nova/volume/manager.py:106 +#, python-format +msgid "volume %s: creating export" +msgstr "ボリューム %s をエクスポートします。" + +#: nova/volume/manager.py:113 +#, python-format +msgid "volume %s: created successfully" +msgstr "ボリューム %s の作成に成功しました。" + +#: nova/volume/manager.py:121 +msgid "Volume is still attached" +msgstr "ボリュームはアタッチされたままです。" + +#: nova/volume/manager.py:123 +msgid "Volume is not local to this node" +msgstr "ボリュームはこのノードのローカルではありません。" + +#: nova/volume/manager.py:124 +#, python-format +msgid "volume %s: removing export" +msgstr "ボリューム %s のエクスポートを解除します。" + +#: nova/volume/manager.py:126 +#, python-format +msgid "volume %s: deleting" +msgstr "ボリューム %s を削除します。" + +#: nova/volume/manager.py:129 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "ボリューム %s の削除に成功しました。" diff --git a/po/nova.pot b/po/nova.pot new file mode 100644 index 000000000..ce88d731b --- /dev/null +++ b/po/nova.pot @@ -0,0 +1,2847 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2011-02-21 10:03-0500\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n" +"Language-Team: LANGUAGE <LL@li.org>\n" +"Language: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=CHARSET\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 +#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 +#: ../nova/scheduler/simple.py:122 +msgid "No hosts found" +msgstr "" + +#: ../nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "" + +#: ../nova/exception.py:36 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: ../nova/exception.py:107 +msgid "DB exception wrapped" +msgstr "" + +#. exc_type, exc_value, exc_traceback = sys.exc_info() +#: ../nova/exception.py:120 +msgid "Uncaught exception" +msgstr "" + +#: ../nova/volume/api.py:45 +#, python-format +msgid "Quota exceeeded for %(pid)s, tried to create %(size)sG volume" +msgstr "" + +#: ../nova/volume/api.py:47 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %sG" +msgstr "" + +#: ../nova/volume/api.py:71 ../nova/volume/api.py:96 +msgid "Volume status must be available" +msgstr "" + +#: ../nova/volume/api.py:98 +msgid "Volume is already attached" +msgstr "" + +#: ../nova/volume/api.py:104 +msgid "Volume is already detached" +msgstr "" + +#: ../nova/api/openstack/servers.py:72 +msgid "Failed to read private ip" +msgstr "" + +#: ../nova/api/openstack/servers.py:79 +msgid "Failed to read public ip(s)" +msgstr "" + +#: ../nova/api/openstack/servers.py:152 +#, python-format +msgid "%(param)s property not found for image %(_image_id)s" +msgstr "" + +#: ../nova/api/openstack/servers.py:168 +msgid "No keypairs defined" +msgstr "" + +#: ../nova/api/openstack/servers.py:238 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:253 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:267 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:281 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:292 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:303 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:314 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:325 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: ../nova/twistd.py:157 +msgid "Wrong number of arguments." +msgstr "" + +#: ../nova/twistd.py:209 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "" + +#: ../nova/twistd.py:221 +msgid "No such process" +msgstr "" + +#: ../nova/twistd.py:230 ../nova/service.py:224 +#, python-format +msgid "Serving %s" +msgstr "" + +#: ../nova/twistd.py:262 ../nova/service.py:225 +msgid "Full set of FLAGS:" +msgstr "" + +#: ../nova/twistd.py:266 +#, python-format +msgid "Starting %s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 +#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 +#: ../nova/api/ec2/__init__.py:317 +#, python-format +msgid "Instance %s not found" +msgstr "" + +#. NOTE: No Resource Pool concept so far +#: ../nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %(instance_name)s, %(device_path)s, %(mountpoint)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:80 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:91 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "" + +#. Detach VBD from VM +#: ../nova/virt/xenapi/volumeops.py:104 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:112 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:120 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:127 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "" + +#: ../nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "" + +#: ../nova/crypto.py:46 +msgid "Filename of root CA" +msgstr "" + +#: ../nova/crypto.py:49 +msgid "Filename of private key" +msgstr "" + +#: ../nova/crypto.py:51 +msgid "Filename of root Certificate Revokation List" +msgstr "" + +#: ../nova/crypto.py:53 +msgid "Where we keep our keys" +msgstr "" + +#: ../nova/crypto.py:55 +msgid "Where we keep our root CA" +msgstr "" + +#: ../nova/crypto.py:57 +msgid "Should we use a CA for each project?" +msgstr "" + +#: ../nova/crypto.py:61 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" + +#: ../nova/crypto.py:66 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "" + +#: ../nova/crypto.py:71 +#, python-format +msgid "Subject for certificate for vpns, %s for project, timestamp" +msgstr "" + +#: ../nova/crypto.py:258 +#, python-format +msgid "Flags path: %s" +msgstr "" + +#: ../nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %(topic)s %(host)s for %(method)s" +msgstr "" + +#: ../nova/compute/manager.py:78 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: ../nova/compute/manager.py:80 +#, python-format +msgid "" +"check_instance_lock: arguments: |%(self)s| |%(context)s| |%(instance_id)s|" +msgstr "" + +#: ../nova/compute/manager.py:84 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: ../nova/compute/manager.py:86 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: ../nova/compute/manager.py:91 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: ../nova/compute/manager.py:95 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: ../nova/compute/manager.py:179 +msgid "Instance has already been created" +msgstr "" + +#: ../nova/compute/manager.py:180 +#, python-format +msgid "instance %s: starting..." +msgstr "" + +#. pylint: disable-msg=W0702 +#: ../nova/compute/manager.py:219 +#, python-format +msgid "instance %s: Failed to spawn" +msgstr "" + +#: ../nova/compute/manager.py:233 ../nova/tests/test_cloud.py:286 +#, python-format +msgid "Terminating instance %s" +msgstr "" + +#: ../nova/compute/manager.py:255 +#, python-format +msgid "Deallocating address %s" +msgstr "" + +#: ../nova/compute/manager.py:268 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "" + +#: ../nova/compute/manager.py:282 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: ../nova/compute/manager.py:287 +#, python-format +msgid "" +"trying to reboot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" +msgstr "" + +#: ../nova/compute/manager.py:311 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: ../nova/compute/manager.py:316 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" +msgstr "" + +#: ../nova/compute/manager.py:332 +#, python-format +msgid "" +"trying to reset the password on a non-running instance: %(instance_id)s " +"(state: %(instance_state)s expected: %(expected_state)s)" +msgstr "" + +#: ../nova/compute/manager.py:335 +#, python-format +msgid "instance %s: setting admin password" +msgstr "" + +#: ../nova/compute/manager.py:353 +#, python-format +msgid "" +"trying to inject a file into a non-running instance: %(instance_id)s (state: " +"%(instance_state)s expected: %(expected_state)s)" +msgstr "" + +#: ../nova/compute/manager.py:362 +#, python-format +msgid "instance %(nm)s: injecting file to %(plain_path)s" +msgstr "" + +#: ../nova/compute/manager.py:372 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: ../nova/compute/manager.py:387 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: ../nova/compute/manager.py:406 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: ../nova/compute/manager.py:423 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: ../nova/compute/manager.py:440 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: ../nova/compute/manager.py:453 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: ../nova/compute/manager.py:472 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: ../nova/compute/manager.py:491 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: ../nova/compute/manager.py:503 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: ../nova/compute/manager.py:513 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: ../nova/compute/manager.py:526 +#, python-format +msgid "instance %s: reset network" +msgstr "" + +#: ../nova/compute/manager.py:535 ../nova/api/ec2/cloud.py:515 +#, python-format +msgid "Get console output for instance %s" +msgstr "" + +#: ../nova/compute/manager.py:543 +#, python-format +msgid "instance %s: getting ajax console" +msgstr "" + +#: ../nova/compute/manager.py:553 +#, python-format +msgid "" +"instance %(instance_id)s: attaching volume %(volume_id)s to %(mountpoint)s" +msgstr "" + +#. pylint: disable-msg=W0702 +#. NOTE(vish): The inline callback eats the exception info so we +#. log the traceback here and reraise the same +#. ecxception below. +#: ../nova/compute/manager.py:569 +#, python-format +msgid "instance %(instance_id)s: attach failed %(mountpoint)s, removing" +msgstr "" + +#: ../nova/compute/manager.py:585 +#, python-format +msgid "" +"Detach volume %(volume_id)s from mountpoint %(mp)s on instance " +"%(instance_id)s" +msgstr "" + +#: ../nova/compute/manager.py:588 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: ../nova/scheduler/simple.py:53 +#, python-format +msgid "Host %s is not alive" +msgstr "" + +#: ../nova/scheduler/simple.py:65 +msgid "All hosts have too many cores" +msgstr "" + +#: ../nova/scheduler/simple.py:87 +#, python-format +msgid "Host %s not available" +msgstr "" + +#: ../nova/scheduler/simple.py:99 +msgid "All hosts have too many gigabytes" +msgstr "" + +#: ../nova/scheduler/simple.py:119 +msgid "All hosts have too many networks" +msgstr "" + +#: ../nova/volume/manager.py:85 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: ../nova/volume/manager.py:90 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: ../nova/volume/manager.py:96 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: ../nova/volume/manager.py:108 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: ../nova/volume/manager.py:112 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: ../nova/volume/manager.py:123 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: ../nova/volume/manager.py:131 +msgid "Volume is still attached" +msgstr "" + +#: ../nova/volume/manager.py:133 +msgid "Volume is not local to this node" +msgstr "" + +#: ../nova/volume/manager.py:136 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: ../nova/volume/manager.py:138 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: ../nova/volume/manager.py:147 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:74 +#, python-format +msgid "%(text)s: _db_content => %(content)s" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:304 ../nova/virt/xenapi/fake.py:404 +#: ../nova/virt/xenapi/fake.py:422 ../nova/virt/xenapi/fake.py:478 +msgid "Raising NotImplemented" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:306 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:341 +#, python-format +msgid "Calling %(localname)s %(impl)s" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:346 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:406 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: ../nova/tests/test_cloud.py:256 +msgid "Can't test instances without a real virtual env." +msgstr "" + +#: ../nova/tests/test_cloud.py:268 +#, python-format +msgid "Need to watch instance %s until it's running..." +msgstr "" + +#: ../nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: ../nova/network/linux_net.py:187 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: ../nova/network/linux_net.py:208 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#. pylint: disable-msg=W0703 +#: ../nova/network/linux_net.py:314 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: ../nova/network/linux_net.py:316 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#. pylint: disable-msg=W0703 +#: ../nova/network/linux_net.py:358 +#, python-format +msgid "killing radvd threw %s" +msgstr "" + +#: ../nova/network/linux_net.py:360 +#, python-format +msgid "Pid %d is stale, relaunching radvd" +msgstr "" + +#. pylint: disable-msg=W0703 +#: ../nova/network/linux_net.py:449 +#, python-format +msgid "Killing dnsmasq threw %s" +msgstr "" + +#: ../nova/utils.py:58 +#, python-format +msgid "Inner Exception: %s" +msgstr "" + +#: ../nova/utils.py:59 +#, python-format +msgid "Class %s cannot be found" +msgstr "" + +#: ../nova/utils.py:118 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: ../nova/utils.py:130 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: ../nova/utils.py:143 ../nova/utils.py:183 +#, python-format +msgid "Result was %s" +msgstr "" + +#: ../nova/utils.py:159 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: ../nova/utils.py:217 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: ../nova/utils.py:222 +#, python-format +msgid "Running %s" +msgstr "" + +#: ../nova/utils.py:262 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: ../nova/utils.py:265 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: ../nova/utils.py:363 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: ../nova/utils.py:374 +#, python-format +msgid "backend %s" +msgstr "" + +#: ../nova/fakerabbit.py:49 +#, python-format +msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +msgstr "" + +#: ../nova/fakerabbit.py:54 +#, python-format +msgid "Publishing to route %s" +msgstr "" + +#: ../nova/fakerabbit.py:84 +#, python-format +msgid "Declaring queue %s" +msgstr "" + +#: ../nova/fakerabbit.py:90 +#, python-format +msgid "Declaring exchange %s" +msgstr "" + +#: ../nova/fakerabbit.py:96 +#, python-format +msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +msgstr "" + +#: ../nova/fakerabbit.py:121 +#, python-format +msgid "Getting from %(queue)s: %(message)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:135 ../nova/virt/hyperv.py:171 +#, python-format +msgid "Created VM %s..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:138 +#, python-format +msgid "Created VM %(instance_name)s as %(vm_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:168 +#, python-format +msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:171 +#, python-format +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:197 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:209 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:224 +#, python-format +msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:227 +#, python-format +msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:246 +#, python-format +msgid "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) on " +"%(sr_ref)s." +msgstr "" + +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vm_utils.py:258 +#, python-format +msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:272 +#, python-format +msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:286 +#, python-format +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:327 +#, python-format +msgid "Size for image %(image)s:%(virtual_size)d" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:332 +#, python-format +msgid "Glance image %s" +msgstr "" + +#. we need to invoke a plugin for copying VDI's +#. content into proper path +#: ../nova/virt/xenapi/vm_utils.py:342 +#, python-format +msgid "Copying VDI %s to /boot/guest on dom0" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:352 +#, python-format +msgid "Kernel/Ramdisk VDI %s destroyed" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:361 +#, python-format +msgid "Asking xapi to fetch %(url)s as %(access)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:386 ../nova/virt/xenapi/vm_utils.py:402 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:397 +#, python-format +msgid "PV Kernel in VDI:%s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:405 +#, python-format +msgid "Running pygrub against %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:411 +#, python-format +msgid "Found Xen kernel %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:413 +msgid "No Xen kernel found. Booting HVM." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:425 ../nova/virt/hyperv.py:431 +#, python-format +msgid "duplicate name found: %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:442 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:463 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:465 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:525 +#, python-format +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:542 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:567 +#, python-format +msgid "" +"VHD coalesce attempts exceeded (%(counter)d > %(max_attempts)d), giving up..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:574 +#, python-format +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:590 +#, python-format +msgid "No VDIs found for VM %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:594 +#, python-format +msgid "Unexpected number of VDIs (%(num_vdis)s) found for VM %(vm_ref)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:653 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:188 +#, python-format +msgid "Creating VBD for VDI %s ... " +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:655 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:190 +#, python-format +msgid "Creating VBD for VDI %s done." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:657 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:192 +#, python-format +msgid "Plugging VBD %s ... " +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:659 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:194 +#, python-format +msgid "Plugging VBD %s done." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:661 +#, python-format +msgid "VBD %(vbd)s plugged as %(orig_dev)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:664 +#, python-format +msgid "VBD %(vbd)s plugged into wrong dev, remapping to %(dev)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:668 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:197 +#, python-format +msgid "Destroying VBD for VDI %s ... " +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:671 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:200 +#, python-format +msgid "Destroying VBD for VDI %s done." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:683 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:211 +msgid "VBD.unplug successful first time." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:688 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:216 +msgid "VBD.unplug rejected: retrying..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:692 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:220 +msgid "VBD.unplug successful eventually." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:695 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:223 +#, python-format +msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:704 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:66 +#, python-format +msgid "Ignoring XenAPI.Failure %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:735 +#, python-format +msgid "" +"Writing partition table %(primary_first)d %(primary_last)d to %(dest)s..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:747 +#, python-format +msgid "Writing partition table %s done." +msgstr "" + +#: ../nova/tests/test_rpc.py:89 +#, python-format +msgid "Nested received %(queue)s, %(value)s" +msgstr "" + +#: ../nova/tests/test_rpc.py:95 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: ../nova/tests/test_rpc.py:120 ../nova/tests/test_rpc.py:126 +#, python-format +msgid "Received %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:44 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:133 +#, python-format +msgid "No service for id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:251 +#, python-format +msgid "No service for %(host)s, %(binary)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:592 +msgid "No fixed ips defined" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:608 +#, python-format +msgid "No floating ip for address %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:629 +#, python-format +msgid "No address for instance %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:961 +#, python-format +msgid "no keypair for user %(user_id)s, name %(name)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1076 ../nova/db/sqlalchemy/api.py:1156 +#, python-format +msgid "No network for id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1086 +msgid "No networks defined" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1115 +#, python-format +msgid "No network for bridge %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1129 ../nova/db/sqlalchemy/api.py:1142 +#, python-format +msgid "No network for instance %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1277 +#, python-format +msgid "Token %s does not exist" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1302 +#, python-format +msgid "No quota for project_id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1455 ../nova/db/sqlalchemy/api.py:1501 +#: ../nova/api/ec2/__init__.py:323 +#, python-format +msgid "Volume %s not found" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1514 +#, python-format +msgid "No export device found for volume %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1527 +#, python-format +msgid "No target id found for volume %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1572 +#, python-format +msgid "No security group with id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1589 +#, python-format +msgid "No security group named %(group_name)s for project: %(project_id)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1682 +#, python-format +msgid "No secuity group rule with id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1756 +#, python-format +msgid "No user for id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1772 +#, python-format +msgid "No user for access key %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1834 +#, python-format +msgid "No project with id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1979 +#, python-format +msgid "No console pool with id %(pool_id)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1996 +#, python-format +msgid "" +"No console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:2035 +#, python-format +msgid "No console for instance %(instance_id)s in pool %(pool_id)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:2057 +#, python-format +msgid "on instance %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:2058 +#, python-format +msgid "No console with id %(console_id)s %(idesc)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:2078 ../nova/db/sqlalchemy/api.py:2097 +#, python-format +msgid "No zone with id %(zone_id)s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:160 +#, python-format +msgid "Checking state of %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:165 +#, python-format +msgid "Current state of %(name)s was %(state)s." +msgstr "" + +#: ../nova/virt/libvirt_conn.py:183 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:196 +msgid "Connection to libvirt broke" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:258 +#, python-format +msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:283 +#, python-format +msgid "Invalid device path %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:313 +#, python-format +msgid "No disk at %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:320 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:336 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:339 +#, python-format +msgid "_wait_for_reboot failed: %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:382 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:385 +#, python-format +msgid "_wait_for_rescue failed: %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:411 +#, python-format +msgid "instance %s: is running" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:422 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:425 ../nova/virt/xenapi/vmops.py:186 +#, python-format +msgid "instance %s: failed to boot" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:436 +#, python-format +msgid "virsh said: %r" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:440 +msgid "cool, it's a device" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:448 +#, python-format +msgid "data: %(data)r, fpath: %(fpath)r" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:456 +#, python-format +msgid "Contents of file %(fpath)s: %(contents)r" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:489 +msgid "Unable to find an open port" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:563 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:646 +#, python-format +msgid "instance %(inst_name)s: injecting key into image %(img_id)s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:649 +#, python-format +msgid "instance %(inst_name)s: injecting net into image %(img_id)s" +msgstr "" + +#. This could be a windows image, or a vmdk format disk +#: ../nova/virt/libvirt_conn.py:657 +#, python-format +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image %(img_id)s " +"(%(e)s)" +msgstr "" + +#. TODO(termie): cache? +#: ../nova/virt/libvirt_conn.py:665 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:732 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:751 +msgid "diagnostics are not supported for libvirt" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:1225 +#, python-format +msgid "Attempted to unfilter instance %s which is not filtered" +msgstr "" + +#: ../nova/api/ec2/metadatarequesthandler.py:76 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: ../nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: ../nova/network/api.py:39 +#, python-format +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "" + +#: ../nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" + +#: ../nova/tests/test_volume.py:162 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: ../nova/virt/images.py:70 +#, python-format +msgid "Finished retreving %(url)s -- placed in %(path)s" +msgstr "" + +#: ../nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "" + +#: ../nova/console/manager.py:70 +msgid "Adding console" +msgstr "" + +#: ../nova/console/manager.py:90 +#, python-format +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "" + +#: ../nova/api/direct.py:149 +msgid "not available" +msgstr "" + +#: ../nova/api/ec2/cloud.py:62 +#, python-format +msgid "The key_pair %s already exists" +msgstr "" + +#. TODO(vish): Do this with M2Crypto instead +#: ../nova/api/ec2/cloud.py:118 +#, python-format +msgid "Generating root CA: %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:303 +#, python-format +msgid "Create key pair %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:311 +#, python-format +msgid "Delete key pair %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:386 +#, python-format +msgid "%s is not a valid ipProtocol" +msgstr "" + +#: ../nova/api/ec2/cloud.py:390 +msgid "Invalid port range" +msgstr "" + +#: ../nova/api/ec2/cloud.py:421 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:430 ../nova/api/ec2/cloud.py:459 +msgid "Not enough parameters to build a valid rule." +msgstr "" + +#: ../nova/api/ec2/cloud.py:443 +msgid "No rule for the specified parameters." +msgstr "" + +#: ../nova/api/ec2/cloud.py:450 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:464 +#, python-format +msgid "This rule already exists in group %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:492 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:495 +#, python-format +msgid "group %s already exists" +msgstr "" + +#: ../nova/api/ec2/cloud.py:507 +#, python-format +msgid "Delete security group %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:584 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: ../nova/api/ec2/cloud.py:612 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:629 +#, python-format +msgid "Detach volume %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:761 +msgid "Allocate address" +msgstr "" + +#: ../nova/api/ec2/cloud.py:766 +#, python-format +msgid "Release address %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:771 +#, python-format +msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:780 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:807 +msgid "Going to start terminating instances" +msgstr "" + +#: ../nova/api/ec2/cloud.py:815 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: ../nova/api/ec2/cloud.py:867 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:875 +#, python-format +msgid "Registered image %(image_location)s with id %(image_id)s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:882 ../nova/api/ec2/cloud.py:900 +#, python-format +msgid "attribute not supported: %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:890 +#, python-format +msgid "invalid id: %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:903 +msgid "user or group not specified" +msgstr "" + +#: ../nova/api/ec2/cloud.py:905 +msgid "only group \"all\" is supported" +msgstr "" + +#: ../nova/api/ec2/cloud.py:907 +msgid "operation_type must be add or remove" +msgstr "" + +#: ../nova/api/ec2/cloud.py:908 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: ../bin/nova-api.py:52 +#, python-format +msgid "Using paste.deploy config at: %s" +msgstr "" + +#: ../bin/nova-api.py:57 +#, python-format +msgid "No paste configuration for app: %s" +msgstr "" + +#: ../bin/nova-api.py:59 +#, python-format +msgid "" +"App Config: %(api)s\n" +"%(config)r" +msgstr "" + +#: ../bin/nova-api.py:64 +#, python-format +msgid "Running %s API" +msgstr "" + +#: ../bin/nova-api.py:69 +#, python-format +msgid "No known API applications configured in %s." +msgstr "" + +#: ../bin/nova-api.py:83 +#, python-format +msgid "Starting nova-api node (version %s)" +msgstr "" + +#: ../bin/nova-api.py:89 +#, python-format +msgid "No paste configuration found for: %s" +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:84 +#, python-format +msgid "Argument %(key)s value %(value)s is too short." +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:89 +#, python-format +msgid "Argument %(key)s value %(value)s contains invalid characters." +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:94 +#, python-format +msgid "Argument %(key)s value %(value)s starts with a hyphen." +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:102 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:130 +#, python-format +msgid "Argument %s is required." +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:117 +#, python-format +msgid "" +"Argument %(key)s may not take value %(value)s. Valid values are ['true', " +"'false']." +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:163 +#, python-format +msgid "" +"Created VDI %(vdi_ref)s (%(label)s, %(size)s, %(read_only)s) on %(sr_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:67 +#, python-format +msgid "Attempted to create non-unique name %s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:73 +#, python-format +msgid "instance %(name)s: not enough free memory" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:148 +#, python-format +msgid "Starting VM %s..." +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:151 +#, python-format +msgid "Spawning VM %(instance_name)s created %(vm_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:162 +#, python-format +msgid "Invalid value for onset_files: '%s'" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:167 +#, python-format +msgid "Injecting file path: '%s'" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:180 +#, python-format +msgid "Instance %s: booted" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:232 +#, python-format +msgid "Instance not present %s" +msgstr "" + +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vmops.py:261 +#, python-format +msgid "Starting snapshot for VM %s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:269 +#, python-format +msgid "Unable to Snapshot %(vm_ref)s: %(exc)s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:280 +#, python-format +msgid "Finished snapshot and upload for VM %s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:356 +#, python-format +msgid "VM %(vm)s already halted, skipping shutdown..." +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:389 +msgid "Removing kernel/ramdisk files" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:399 +msgid "kernel/ramdisk files removed" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:561 +#, python-format +msgid "" +"TIMEOUT: The call to %(method)s timed out. VM id=%(instance_id)s; args=" +"%(strargs)s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:564 +#, python-format +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. VM id=" +"%(instance_id)s; args=%(strargs)s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:569 +#, python-format +msgid "" +"The call to %(method)s returned an error: %(e)s. VM id=%(instance_id)s; args=" +"%(strargs)s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:760 +#, python-format +msgid "OpenSSL error: %s" +msgstr "" + +#: ../nova/tests/test_compute.py:148 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: ../nova/tests/test_compute.py:154 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:97 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: ../nova/db/sqlalchemy/migration.py:35 +msgid "python-migrate is not installed. Exiting." +msgstr "" + +#: ../nova/image/s3.py:99 +#, python-format +msgid "Image %s could not be found" +msgstr "" + +#: ../nova/api/ec2/__init__.py:121 +msgid "Too many failed authentications." +msgstr "" + +#: ../nova/api/ec2/__init__.py:131 +#, python-format +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and " +"will be locked out for %(lock_mins)d minutes." +msgstr "" + +#: ../nova/api/ec2/__init__.py:169 ../nova/objectstore/handler.py:140 +#, python-format +msgid "Authentication Failure: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:182 +#, python-format +msgid "Authenticated Request For %(uname)s:%(pname)s)" +msgstr "" + +#: ../nova/api/ec2/__init__.py:207 +#, python-format +msgid "action: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:209 +#, python-format +msgid "arg: %(key)s\t\tval: %(value)s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:281 +#, python-format +msgid "" +"Unauthorized request for controller=%(controller)s and action=%(action)s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:314 +#, python-format +msgid "InstanceNotFound raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:320 +#, python-format +msgid "VolumeNotFound raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:326 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:329 +#, python-format +msgid "ApiError raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:338 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:343 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: ../nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" +msgstr "" + +#: ../nova/auth/dbdriver.py:106 ../nova/auth/ldapdriver.py:232 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "" + +#: ../nova/auth/dbdriver.py:122 ../nova/auth/ldapdriver.py:243 +#, python-format +msgid "Project can't be created because user %s doesn't exist" +msgstr "" + +#: ../nova/auth/dbdriver.py:135 ../nova/auth/ldapdriver.py:229 +#, python-format +msgid "Project can't be created because project %s already exists" +msgstr "" + +#: ../nova/auth/dbdriver.py:157 ../nova/auth/ldapdriver.py:268 +#, python-format +msgid "Project can't be modified because manager %s doesn't exist" +msgstr "" + +#: ../nova/auth/dbdriver.py:245 +#, python-format +msgid "User \"%s\" not found" +msgstr "" + +#: ../nova/auth/dbdriver.py:248 +#, python-format +msgid "Project \"%s\" not found" +msgstr "" + +#: ../nova/virt/xenapi_conn.py:129 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" +msgstr "" + +#: ../nova/virt/xenapi_conn.py:311 +#, python-format +msgid "Task [%(name)s] %(task)s status: success %(result)s" +msgstr "" + +#: ../nova/virt/xenapi_conn.py:317 +#, python-format +msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +msgstr "" + +#: ../nova/virt/xenapi_conn.py:331 ../nova/virt/xenapi_conn.py:344 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: ../nova/compute/monitor.py:259 +#, python-format +msgid "updating %s..." +msgstr "" + +#: ../nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "" + +#: ../nova/compute/monitor.py:356 +#, python-format +msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" +msgstr "" + +#: ../nova/compute/monitor.py:379 +#, python-format +msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" +msgstr "" + +#: ../nova/compute/monitor.py:414 +msgid "unexpected exception getting connection" +msgstr "" + +#: ../nova/compute/monitor.py:429 +#, python-format +msgid "Found instance: %s" +msgstr "" + +#: ../nova/volume/san.py:67 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: ../nova/api/ec2/apirequest.py:100 +#, python-format +msgid "" +"Unsupported API request: controller = %(controller)s, action = %(action)s" +msgstr "" + +#: ../nova/api/openstack/__init__.py:55 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: ../nova/api/openstack/__init__.py:76 +msgid "Including admin operations in API." +msgstr "" + +#: ../nova/console/xvp.py:99 +msgid "Rebuilding xvp conf" +msgstr "" + +#: ../nova/console/xvp.py:116 +#, python-format +msgid "Re-wrote %s" +msgstr "" + +#: ../nova/console/xvp.py:121 +msgid "Stopping xvp" +msgstr "" + +#: ../nova/console/xvp.py:134 +msgid "Starting xvp" +msgstr "" + +#: ../nova/console/xvp.py:141 +#, python-format +msgid "Error starting xvp: %s" +msgstr "" + +#: ../nova/console/xvp.py:144 +msgid "Restarting xvp" +msgstr "" + +#: ../nova/console/xvp.py:146 +msgid "xvp not running..." +msgstr "" + +#: ../bin/nova-manage.py:272 +msgid "" +"The above error may show that the database has not been created.\n" +"Please create a database using nova-manage sync db before running this " +"command." +msgstr "" + +#: ../bin/nova-manage.py:426 +msgid "" +"No more networks available. If this is a new installation, you need\n" +"to call something like this:\n" +"\n" +" nova-manage network create 10.0.0.0/8 10 64\n" +"\n" +msgstr "" + +#: ../bin/nova-manage.py:431 +msgid "" +"The above error may show that the certificate db has not been created.\n" +"Please create a database by running a nova-api server on this host." +msgstr "" + +#: ../bin/nova-manage.py:447 ../bin/nova-manage.py:536 +msgid "network" +msgstr "" + +#: ../bin/nova-manage.py:448 +msgid "IP address" +msgstr "" + +#: ../bin/nova-manage.py:449 +msgid "MAC address" +msgstr "" + +#: ../bin/nova-manage.py:450 +msgid "hostname" +msgstr "" + +#: ../bin/nova-manage.py:451 +msgid "host" +msgstr "" + +#: ../bin/nova-manage.py:537 +msgid "netmask" +msgstr "" + +#: ../bin/nova-manage.py:538 +msgid "start address" +msgstr "" + +#: ../nova/virt/disk.py:69 +#, python-format +msgid "Failed to load partition: %s" +msgstr "" + +#: ../nova/virt/disk.py:91 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: ../nova/virt/disk.py:124 +#, python-format +msgid "nbd device %s did not show up" +msgstr "" + +#: ../nova/virt/disk.py:128 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: ../nova/virt/disk.py:151 +msgid "No free nbd devices" +msgstr "" + +#: ../doc/ext/nova_todo.py:46 +#, python-format +msgid "%(filename)s, line %(line_info)d" +msgstr "" + +#. FIXME(chiradeep): implement this +#: ../nova/virt/hyperv.py:118 +msgid "In init host" +msgstr "" + +#: ../nova/virt/hyperv.py:131 +#, python-format +msgid "Attempt to create duplicate vm %s" +msgstr "" + +#: ../nova/virt/hyperv.py:148 +#, python-format +msgid "Starting VM %s " +msgstr "" + +#: ../nova/virt/hyperv.py:150 +#, python-format +msgid "Started VM %s " +msgstr "" + +#: ../nova/virt/hyperv.py:152 +#, python-format +msgid "spawn vm failed: %s" +msgstr "" + +#: ../nova/virt/hyperv.py:169 +#, python-format +msgid "Failed to create VM %s" +msgstr "" + +#: ../nova/virt/hyperv.py:188 +#, python-format +msgid "Set memory for vm %s..." +msgstr "" + +#: ../nova/virt/hyperv.py:198 +#, python-format +msgid "Set vcpus for vm %s..." +msgstr "" + +#: ../nova/virt/hyperv.py:202 +#, python-format +msgid "Creating disk for %(vm_name)s by attaching disk file %(vhdfile)s" +msgstr "" + +#: ../nova/virt/hyperv.py:227 +#, python-format +msgid "Failed to add diskdrive to VM %s" +msgstr "" + +#: ../nova/virt/hyperv.py:230 +#, python-format +msgid "New disk drive path is %s" +msgstr "" + +#: ../nova/virt/hyperv.py:247 +#, python-format +msgid "Failed to add vhd file to VM %s" +msgstr "" + +#: ../nova/virt/hyperv.py:249 +#, python-format +msgid "Created disk for %s" +msgstr "" + +#: ../nova/virt/hyperv.py:253 +#, python-format +msgid "Creating nic for %s " +msgstr "" + +#: ../nova/virt/hyperv.py:272 +msgid "Failed creating a port on the external vswitch" +msgstr "" + +#: ../nova/virt/hyperv.py:273 +#, python-format +msgid "Failed creating port for %s" +msgstr "" + +#: ../nova/virt/hyperv.py:276 +#, python-format +msgid "Created switch port %(vm_name)s on switch %(ext_path)s" +msgstr "" + +#: ../nova/virt/hyperv.py:286 +#, python-format +msgid "Failed to add nic to VM %s" +msgstr "" + +#: ../nova/virt/hyperv.py:288 +#, python-format +msgid "Created nic for %s " +msgstr "" + +#: ../nova/virt/hyperv.py:321 +#, python-format +msgid "WMI job failed: %s" +msgstr "" + +#: ../nova/virt/hyperv.py:325 +#, python-format +msgid "WMI job succeeded: %(desc)s, Elapsed=%(elap)s " +msgstr "" + +#: ../nova/virt/hyperv.py:361 +#, python-format +msgid "Got request to destroy vm %s" +msgstr "" + +#: ../nova/virt/hyperv.py:386 +#, python-format +msgid "Failed to destroy vm %s" +msgstr "" + +#: ../nova/virt/hyperv.py:393 +#, python-format +msgid "Del: disk %(vhdfile)s vm %(instance_name)s" +msgstr "" + +#: ../nova/virt/hyperv.py:415 +#, python-format +msgid "" +"Got Info for vm %(instance_id)s: state=%(state)s, mem=%(memusage)s, num_cpu=" +"%(numprocs)s, cpu_time=%(uptime)s" +msgstr "" + +#: ../nova/virt/hyperv.py:451 +#, python-format +msgid "Successfully changed vm state of %(vm_name)s to %(req_state)s" +msgstr "" + +#: ../nova/virt/hyperv.py:454 +#, python-format +msgid "Failed to change vm state of %(vm_name)s to %(req_state)s" +msgstr "" + +#: ../nova/compute/api.py:71 +#, python-format +msgid "Instance %d was not found in get_network_topic" +msgstr "" + +#: ../nova/compute/api.py:77 +#, python-format +msgid "Instance %d has no host" +msgstr "" + +#: ../nova/compute/api.py:97 +#, python-format +msgid "Quota exceeeded for %(pid)s, tried to run %(min_count)s instances" +msgstr "" + +#: ../nova/compute/api.py:99 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." +msgstr "" + +#: ../nova/compute/api.py:112 +msgid "Creating a raw instance" +msgstr "" + +#: ../nova/compute/api.py:160 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: ../nova/compute/api.py:187 +#, python-format +msgid "Casting to scheduler for %(pid)s/%(uid)s's instance %(instance_id)s" +msgstr "" + +#: ../nova/compute/api.py:292 +#, python-format +msgid "Going to try to terminate %s" +msgstr "" + +#: ../nova/compute/api.py:296 +#, python-format +msgid "Instance %d was not found during terminate" +msgstr "" + +#: ../nova/compute/api.py:301 +#, python-format +msgid "Instance %d is already being terminated" +msgstr "" + +#: ../nova/compute/api.py:481 +#, python-format +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "" + +#: ../nova/compute/api.py:496 +msgid "Volume isn't attached to anything!" +msgstr "" + +#: ../nova/rpc.py:98 +#, python-format +msgid "" +"AMQP server on %(fl_host)s:%(fl_port)d is unreachable. Trying again in " +"%(fl_intv)d seconds." +msgstr "" + +#: ../nova/rpc.py:103 +#, python-format +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "" + +#: ../nova/rpc.py:122 +msgid "Reconnected to queue" +msgstr "" + +#: ../nova/rpc.py:129 +msgid "Failed to fetch message from queue" +msgstr "" + +#: ../nova/rpc.py:159 +#, python-format +msgid "Initing the Adapter Consumer for %s" +msgstr "" + +#: ../nova/rpc.py:178 +#, python-format +msgid "received %s" +msgstr "" + +#. NOTE(vish): we may not want to ack here, but that means that bad +#. messages stay in the queue indefinitely, so for now +#. we just log the message and send an error string +#. back to the caller +#: ../nova/rpc.py:191 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: ../nova/rpc.py:192 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: ../nova/rpc.py:253 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: ../nova/rpc.py:294 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: ../nova/rpc.py:313 +msgid "Making asynchronous call..." +msgstr "" + +#: ../nova/rpc.py:316 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: ../nova/rpc.py:354 +msgid "Making asynchronous cast..." +msgstr "" + +#: ../nova/rpc.py:364 +#, python-format +msgid "response %s" +msgstr "" + +#: ../nova/rpc.py:373 +#, python-format +msgid "topic is %s" +msgstr "" + +#: ../nova/rpc.py:374 +#, python-format +msgid "message %s" +msgstr "" + +#: ../nova/volume/driver.py:78 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: ../nova/volume/driver.py:87 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: ../nova/volume/driver.py:220 +#, python-format +msgid "FAKE AOE: %s" +msgstr "" + +#: ../nova/volume/driver.py:233 +msgid "Skipping ensure_export. No iscsi_target " +msgstr "" + +#: ../nova/volume/driver.py:279 ../nova/volume/driver.py:288 +msgid "Skipping remove_export. No iscsi_target " +msgstr "" + +#: ../nova/volume/driver.py:347 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: ../nova/volume/driver.py:359 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: ../nova/volume/driver.py:414 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: ../nova/volume/driver.py:416 +msgid "Sheepdog is not working" +msgstr "" + +#: ../nova/wsgi.py:68 +#, python-format +msgid "Starting %(arg0)s on %(host)s:%(port)s" +msgstr "" + +#: ../nova/wsgi.py:147 +msgid "You must implement __call__" +msgstr "" + +#: ../bin/nova-instancemonitor.py:55 +msgid "Starting instance monitor" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:58 +msgid "leasing ip" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:73 +msgid "Adopted old lease or got a change of mac/hostname" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:80 +msgid "releasing ip" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:123 +#, python-format +msgid "" +"Called %(action)s for mac %(mac)s with ip %(ip)s and hostname %(hostname)s " +"on interface %(interface)s" +msgstr "" + +#: ../nova/virt/fake.py:239 +#, python-format +msgid "Instance %s Not Found" +msgstr "" + +#: ../nova/network/manager.py:153 +#, python-format +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "" + +#: ../nova/network/manager.py:157 +msgid "setting network host" +msgstr "" + +#: ../nova/network/manager.py:212 +#, python-format +msgid "Leasing IP %s" +msgstr "" + +#: ../nova/network/manager.py:216 +#, python-format +msgid "IP %s leased that isn't associated" +msgstr "" + +#: ../nova/network/manager.py:220 +#, python-format +msgid "IP %(address)s leased to bad mac %(inst_addr)s vs %(mac)s" +msgstr "" + +#: ../nova/network/manager.py:228 +#, python-format +msgid "IP %s leased that was already deallocated" +msgstr "" + +#: ../nova/network/manager.py:233 +#, python-format +msgid "Releasing IP %s" +msgstr "" + +#: ../nova/network/manager.py:237 +#, python-format +msgid "IP %s released that isn't associated" +msgstr "" + +#: ../nova/network/manager.py:241 +#, python-format +msgid "IP %(address)s released from bad mac %(inst_addr)s vs %(mac)s" +msgstr "" + +#: ../nova/network/manager.py:244 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: ../nova/network/manager.py:519 +msgid "" +"The sum between the number of networks and the vlan start cannot be greater " +"than 4094" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:57 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:74 +#, python-format +msgid "Introduced %(label)s as %(sr_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:90 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:96 +#, python-format +msgid "Forgetting SR %s ... " +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:101 +#, python-format +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:107 +#, python-format +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:111 +#, python-format +msgid "Forgetting SR %s done." +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:113 +#, python-format +msgid "Ignoring exception %(exc)s when forgetting SR %(sr_ref)s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:123 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:128 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:146 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:175 +#, python-format +msgid "Unable to obtain target information %(device_path)s, %(mountpoint)s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:197 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: ../nova/objectstore/image.py:262 +#, python-format +msgid "Failed to decrypt private key: %s" +msgstr "" + +#: ../nova/objectstore/image.py:269 +#, python-format +msgid "Failed to decrypt initialization vector: %s" +msgstr "" + +#: ../nova/objectstore/image.py:277 +#, python-format +msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgstr "" + +#: ../nova/objectstore/handler.py:106 +#, python-format +msgid "Unknown S3 value type %r" +msgstr "" + +#: ../nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "" + +#: ../nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "" + +#: ../nova/objectstore/handler.py:209 +#, python-format +msgid "List keys for bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:217 +#, python-format +msgid "Unauthorized attempt to access bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:235 +#, python-format +msgid "Creating bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:245 +#, python-format +msgid "Deleting bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:249 +#, python-format +msgid "Unauthorized attempt to delete bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:273 +#, python-format +msgid "Getting object: %(bname)s / %(nm)s" +msgstr "" + +#: ../nova/objectstore/handler.py:276 +#, python-format +msgid "Unauthorized attempt to get object %(nm)s from bucket %(bname)s" +msgstr "" + +#: ../nova/objectstore/handler.py:296 +#, python-format +msgid "Putting object: %(bname)s / %(nm)s" +msgstr "" + +#: ../nova/objectstore/handler.py:299 +#, python-format +msgid "Unauthorized attempt to upload object %(nm)s to bucket %(bname)s" +msgstr "" + +#: ../nova/objectstore/handler.py:318 +#, python-format +msgid "Deleting object: %(bname)s / %(nm)s" +msgstr "" + +#: ../nova/objectstore/handler.py:322 +#, python-format +msgid "Unauthorized attempt to delete object %(nm)s from bucket %(bname)s" +msgstr "" + +#: ../nova/objectstore/handler.py:396 +#, python-format +msgid "Not authorized to upload image: invalid directory %s" +msgstr "" + +#: ../nova/objectstore/handler.py:404 +#, python-format +msgid "Not authorized to upload image: unauthorized bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:409 +#, python-format +msgid "Starting image upload: %s" +msgstr "" + +#: ../nova/objectstore/handler.py:423 +#, python-format +msgid "Not authorized to update attributes of image %s" +msgstr "" + +#: ../nova/objectstore/handler.py:431 +#, python-format +msgid "Toggling publicity flag of image %(image_id)s %(newstatus)r" +msgstr "" + +#. other attributes imply update +#: ../nova/objectstore/handler.py:436 +#, python-format +msgid "Updating user fields on image %s" +msgstr "" + +#: ../nova/objectstore/handler.py:450 +#, python-format +msgid "Unauthorized attempt to delete image %s" +msgstr "" + +#: ../nova/objectstore/handler.py:455 +#, python-format +msgid "Deleted image: %s" +msgstr "" + +#: ../nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: ../nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: ../nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" +msgstr "" + +#: ../nova/auth/manager.py:270 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: ../nova/auth/manager.py:277 +#, python-format +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgstr "" + +#: ../nova/auth/manager.py:279 +#, python-format +msgid "No project called %s could be found" +msgstr "" + +#: ../nova/auth/manager.py:287 +#, python-format +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" +msgstr "" + +#: ../nova/auth/manager.py:289 +#, python-format +msgid "User %(uid)s is not a member of project %(pjid)s" +msgstr "" + +#: ../nova/auth/manager.py:298 ../nova/auth/manager.py:309 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: ../nova/auth/manager.py:299 ../nova/auth/manager.py:310 +msgid "Signature does not match" +msgstr "" + +#: ../nova/auth/manager.py:380 +msgid "Must specify project" +msgstr "" + +#: ../nova/auth/manager.py:414 +#, python-format +msgid "The %s role can not be found" +msgstr "" + +#: ../nova/auth/manager.py:416 +#, python-format +msgid "The %s role is global only" +msgstr "" + +#: ../nova/auth/manager.py:420 +#, python-format +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +msgstr "" + +#: ../nova/auth/manager.py:423 +#, python-format +msgid "Adding sitewide role %(role)s to user %(uid)s" +msgstr "" + +#: ../nova/auth/manager.py:448 +#, python-format +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgstr "" + +#: ../nova/auth/manager.py:451 +#, python-format +msgid "Removing sitewide role %(role)s from user %(uid)s" +msgstr "" + +#: ../nova/auth/manager.py:515 +#, python-format +msgid "Created project %(name)s with manager %(manager_user)s" +msgstr "" + +#: ../nova/auth/manager.py:533 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: ../nova/auth/manager.py:545 +#, python-format +msgid "Adding user %(uid)s to project %(pid)s" +msgstr "" + +#: ../nova/auth/manager.py:566 +#, python-format +msgid "Remove user %(uid)s from project %(pid)s" +msgstr "" + +#: ../nova/auth/manager.py:592 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: ../nova/auth/manager.py:650 +#, python-format +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgstr "" + +#: ../nova/auth/manager.py:659 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: ../nova/auth/manager.py:669 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: ../nova/auth/manager.py:671 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: ../nova/auth/manager.py:673 +#, python-format +msgid "Admin status set to %(admin)r for user %(uid)s" +msgstr "" + +#: ../nova/auth/manager.py:722 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: ../nova/service.py:161 +#, python-format +msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgstr "" + +#: ../nova/service.py:174 +msgid "Service killed that has no database entry" +msgstr "" + +#: ../nova/service.py:195 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: ../nova/service.py:207 +msgid "Recovered model server connection!" +msgstr "" + +#: ../nova/service.py:213 +msgid "model server went away" +msgstr "" + +#: ../nova/auth/ldapdriver.py:174 +#, python-format +msgid "LDAP user %s already exists" +msgstr "" + +#: ../nova/auth/ldapdriver.py:205 +#, python-format +msgid "LDAP object for %s doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:348 +#, python-format +msgid "User %s doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:472 +#, python-format +msgid "Group can't be created because group %s already exists" +msgstr "" + +#: ../nova/auth/ldapdriver.py:478 +#, python-format +msgid "Group can't be created because user %s doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:495 +#, python-format +msgid "User %s can't be searched in group because the user doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:507 +#, python-format +msgid "User %s can't be added to the group because the user doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:510 ../nova/auth/ldapdriver.py:521 +#, python-format +msgid "The group at dn %s doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:513 +#, python-format +msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgstr "" + +#: ../nova/auth/ldapdriver.py:524 +#, python-format +msgid "User %s can't be removed from the group because the user doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:528 +#, python-format +msgid "User %s is not a member of the group" +msgstr "" + +#: ../nova/auth/ldapdriver.py:542 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: ../nova/auth/ldapdriver.py:549 +#, python-format +msgid "User %s can't be removed from all because the user doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:564 +#, python-format +msgid "Group at dn %s doesn't exist" +msgstr "" + +#: ../nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: ../nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:97 +#, python-format +msgid "Creating new user: %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:105 +#, python-format +msgid "Deleting user: %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:127 +#, python-format +msgid "Adding role %(role)s to user %(user)s for project %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:131 +#, python-format +msgid "Adding sitewide role %(role)s to user %(user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:137 +#, python-format +msgid "Removing role %(role)s from user %(user)s for project %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:141 +#, python-format +msgid "Removing sitewide role %(role)s from user %(user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:146 ../nova/api/ec2/admin.py:223 +msgid "operation must be add or remove" +msgstr "" + +#: ../nova/api/ec2/admin.py:159 +#, python-format +msgid "Getting x509 for user: %(name)s on project: %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:177 +#, python-format +msgid "Create project %(name)s managed by %(manager_user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:190 +#, python-format +msgid "Modify project: %(name)s managed by %(manager_user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:200 +#, python-format +msgid "Delete project: %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:214 +#, python-format +msgid "Adding user %(user)s to project %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:218 +#, python-format +msgid "Removing user %(user)s from project %(project)s" +msgstr "" diff --git a/po/pt_BR.po b/po/pt_BR.po new file mode 100644 index 000000000..e57f7304a --- /dev/null +++ b/po/pt_BR.po @@ -0,0 +1,2148 @@ +# Brazilian Portuguese translation for nova +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the nova package. +# FIRST AUTHOR <EMAIL@ADDRESS>, 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: nova\n" +"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n" +"POT-Creation-Date: 2011-01-10 11:25-0800\n" +"PO-Revision-Date: 2011-02-03 20:32+0000\n" +"Last-Translator: André Gondim <andregondim@ubuntu.com>\n" +"Language-Team: Brazilian Portuguese <pt_BR@li.org>\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Launchpad-Export-Date: 2011-02-05 05:36+0000\n" +"X-Generator: Launchpad (build 12177)\n" + +#: nova/crypto.py:46 +msgid "Filename of root CA" +msgstr "Nome do arquivo da CA raiz" + +#: nova/crypto.py:49 +msgid "Filename of private key" +msgstr "Nome do arquivo da chave privada" + +#: nova/crypto.py:51 +msgid "Filename of root Certificate Revokation List" +msgstr "Nome de arquivo da Lista de Revogação de Certificados" + +#: nova/crypto.py:53 +msgid "Where we keep our keys" +msgstr "Aonde armazenamos nossas chaves" + +#: nova/crypto.py:55 +msgid "Where we keep our root CA" +msgstr "Aonde mantemos nosso CA raiz" + +#: nova/crypto.py:57 +msgid "Should we use a CA for each project?" +msgstr "Devemos usar um CA para cada projeto?" + +#: nova/crypto.py:61 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" +"Sujeito do certificado para usuários, %s para projeto, usuário, timestamp" + +#: nova/crypto.py:66 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "Sujeito do certificado para projetos, %s para projeto, timestamp" + +#: nova/crypto.py:71 +#, python-format +msgid "Subject for certificate for vpns, %s for project, timestamp" +msgstr "Sujeito do certificado para vpns, %s para projeto, timestamp" + +#: nova/crypto.py:258 +#, python-format +msgid "Flags path: %s" +msgstr "Caminho da sinalização: %s" + +#: nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "Erro inesperado ao executar o comando." + +#: nova/exception.py:36 +#, python-format +msgid "" +"%s\n" +"Command: %s\n" +"Exit code: %s\n" +"Stdout: %r\n" +"Stderr: %r" +msgstr "" +"%s\n" +"Comando: %s\n" +"Código de retorno: %s\n" +"Stdout: %r\n" +"Stderr: %r" + +#: nova/exception.py:86 +msgid "Uncaught exception" +msgstr "Exceção não capturada" + +#: nova/fakerabbit.py:48 +#, python-format +msgid "(%s) publish (key: %s) %s" +msgstr "(%s) publicar (key: %s) %s" + +#: nova/fakerabbit.py:53 +#, python-format +msgid "Publishing to route %s" +msgstr "Publicando para rota %s" + +#: nova/fakerabbit.py:83 +#, python-format +msgid "Declaring queue %s" +msgstr "Declarando fila %s" + +#: nova/fakerabbit.py:89 +#, python-format +msgid "Declaring exchange %s" +msgstr "Declarando troca %s" + +#: nova/fakerabbit.py:95 +#, python-format +msgid "Binding %s to %s with key %s" +msgstr "Atribuindo %s para %s com chave %s" + +#: nova/fakerabbit.py:120 +#, python-format +msgid "Getting from %s: %s" +msgstr "Obtendo de %s: %s" + +#: nova/rpc.py:92 +#, python-format +msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +msgstr "" +"Servidor AMQP em %s:%d inatingível. Tentando novamente em %d segundos." + +#: nova/rpc.py:99 +#, python-format +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "" +"Não foi possível conectar ao servidor AMQP após %d tentativas. Desligando." + +#: nova/rpc.py:118 +msgid "Reconnected to queue" +msgstr "Reconectado à fila" + +#: nova/rpc.py:125 +msgid "Failed to fetch message from queue" +msgstr "Falha ao obter mensagem da fila" + +#: nova/rpc.py:155 +#, python-format +msgid "Initing the Adapter Consumer for %s" +msgstr "Iniciando o Adaptador Consumidor para %s" + +#: nova/rpc.py:170 +#, python-format +msgid "received %s" +msgstr "recebido %s" + +#: nova/rpc.py:183 +#, python-format +msgid "no method for message: %s" +msgstr "sem método para mensagem: %s" + +#: nova/rpc.py:184 +#, python-format +msgid "No method for message: %s" +msgstr "Sem método para mensagem: %s" + +#: nova/rpc.py:245 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Retornando exceção %s ao método de origem" + +#: nova/rpc.py:286 +#, python-format +msgid "unpacked context: %s" +msgstr "conteúdo descompactado: %s" + +#: nova/rpc.py:305 +msgid "Making asynchronous call..." +msgstr "Fazendo chamada assíncrona..." + +#: nova/rpc.py:308 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID é %s" + +#: nova/rpc.py:356 +#, python-format +msgid "response %s" +msgstr "resposta %s" + +#: nova/rpc.py:365 +#, python-format +msgid "topic is %s" +msgstr "topico é %s" + +#: nova/rpc.py:366 +#, python-format +msgid "message %s" +msgstr "mensagem %s" + +#: nova/service.py:157 +#, python-format +msgid "Starting %s node" +msgstr "Iniciando nó %s" + +#: nova/service.py:169 +msgid "Service killed that has no database entry" +msgstr "Encerrado serviço que não tem entrada na base de dados" + +#: nova/service.py:190 +msgid "The service database object disappeared, Recreating it." +msgstr "O objeto da base de dados do serviço desapareceu, Recriando." + +#: nova/service.py:202 +msgid "Recovered model server connection!" +msgstr "Recuperada conexão servidor de modelo." + +#: nova/service.py:208 +msgid "model server went away" +msgstr "servidor de modelo perdido" + +#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#, python-format +msgid "Data store %s is unreachable. Trying again in %d seconds." +msgstr "" +"Repositório de dados %s não pode ser atingido. Tentando novamente em %d " +"segundos." + +#: nova/service.py:232 nova/twistd.py:232 +#, python-format +msgid "Serving %s" +msgstr "Servindo %s" + +#: nova/service.py:234 nova/twistd.py:264 +msgid "Full set of FLAGS:" +msgstr "Conjunto completo de FLAGS:" + +#: nova/twistd.py:211 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "" +"Arquivo de id de processo (pidfile) %s não existe. Daemon não está " +"executando?\n" + +#: nova/twistd.py:268 +#, python-format +msgid "Starting %s" +msgstr "Iniciando %s" + +#: nova/utils.py:53 +#, python-format +msgid "Inner Exception: %s" +msgstr "Exceção interna: %s" + +#: nova/utils.py:54 +#, python-format +msgid "Class %s cannot be found" +msgstr "Classe %s não pode ser encontrada" + +#: nova/utils.py:113 +#, python-format +msgid "Fetching %s" +msgstr "Obtendo %s" + +#: nova/utils.py:125 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Executando comando (subprocesso): %s" + +#: nova/utils.py:138 +#, python-format +msgid "Result was %s" +msgstr "Resultado foi %s" + +#: nova/utils.py:171 +#, python-format +msgid "debug in callback: %s" +msgstr "debug em callback: %s" + +#: nova/utils.py:176 +#, python-format +msgid "Running %s" +msgstr "Executando %s" + +#: nova/utils.py:207 +#, python-format +msgid "Couldn't get IP, using 127.0.0.1 %s" +msgstr "Não foi possível obter IP, usando 127.0.0.1 %s" + +#: nova/utils.py:289 +#, python-format +msgid "Invalid backend: %s" +msgstr "Backend inválido: %s" + +#: nova/utils.py:300 +#, python-format +msgid "backend %s" +msgstr "backend %s" + +#: nova/api/ec2/__init__.py:133 +msgid "Too many failed authentications." +msgstr "Muitas falhas de autenticação." + +#: nova/api/ec2/__init__.py:142 +#, python-format +msgid "" +"Access key %s has had %d failed authentications and will be locked out for " +"%d minutes." +msgstr "" +"Chave de acesso %s tem %d falhas de autenticação e vai ser bloqueada por %d " +"minutos." + +#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#, python-format +msgid "Authentication Failure: %s" +msgstr "Falha de Autenticação: %s" + +#: nova/api/ec2/__init__.py:190 +#, python-format +msgid "Authenticated Request For %s:%s)" +msgstr "Pedido de Autenticação Para: %s:%s" + +#: nova/api/ec2/__init__.py:227 +#, python-format +msgid "action: %s" +msgstr "ação: %s" + +#: nova/api/ec2/__init__.py:229 +#, python-format +msgid "arg: %s\t\tval: %s" +msgstr "argumento: %s\t\tvalor: %s" + +#: nova/api/ec2/__init__.py:301 +#, python-format +msgid "Unauthorized request for controller=%s and action=%s" +msgstr "Requisição não autorizada para controlador=%s e ação=%s" + +#: nova/api/ec2/__init__.py:339 +#, python-format +msgid "NotFound raised: %s" +msgstr "NotFound lançado: %s" + +#: nova/api/ec2/__init__.py:342 +#, python-format +msgid "ApiError raised: %s" +msgstr "ApiError lançado: %s" + +#: nova/api/ec2/__init__.py:349 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "Erro inexperado lançado: %s" + +#: nova/api/ec2/__init__.py:354 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" +"Ocorreu um erro desconhecido. Por favor tente sua requisição novamente." + +#: nova/api/ec2/admin.py:84 +#, python-format +msgid "Creating new user: %s" +msgstr "Criando novo usuário: %s" + +#: nova/api/ec2/admin.py:92 +#, python-format +msgid "Deleting user: %s" +msgstr "Excluindo usuário: %s" + +#: nova/api/ec2/admin.py:114 +#, python-format +msgid "Adding role %s to user %s for project %s" +msgstr "Adicionando papel %s ao usuário %s para o projeto %s" + +#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#, python-format +msgid "Adding sitewide role %s to user %s" +msgstr "Adicionando papel em todo site %s ao usuário %s" + +#: nova/api/ec2/admin.py:122 +#, python-format +msgid "Removing role %s from user %s for project %s" +msgstr "Removendo papel %s do usuário %s para o projeto %s" + +#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#, python-format +msgid "Removing sitewide role %s from user %s" +msgstr "Removendo papel %s em todo site do usuário %s" + +#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 +msgid "operation must be add or remove" +msgstr "operações devem ser adicionar e excluir" + +#: nova/api/ec2/admin.py:142 +#, python-format +msgid "Getting x509 for user: %s on project: %s" +msgstr "Obtendo x509 para usuário: %s do projeto: %s" + +#: nova/api/ec2/admin.py:159 +#, python-format +msgid "Create project %s managed by %s" +msgstr "Criar projeto %s gerenciado por %s" + +#: nova/api/ec2/admin.py:170 +#, python-format +msgid "Delete project: %s" +msgstr "Excluir projeto: %s" + +#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#, python-format +msgid "Adding user %s to project %s" +msgstr "Adicionando usuário %s ao projeto %s" + +#: nova/api/ec2/admin.py:188 +#, python-format +msgid "Removing user %s from project %s" +msgstr "Excluindo usuário %s do projeto %s" + +#: nova/api/ec2/apirequest.py:95 +#, python-format +msgid "Unsupported API request: controller = %s,action = %s" +msgstr "Requisição de API não suportada: controlador = %s,ação = %s" + +#: nova/api/ec2/cloud.py:117 +#, python-format +msgid "Generating root CA: %s" +msgstr "Gerando CA raiz: %s" + +#: nova/api/ec2/cloud.py:277 +#, python-format +msgid "Create key pair %s" +msgstr "Criar par de chaves %s" + +#: nova/api/ec2/cloud.py:285 +#, python-format +msgid "Delete key pair %s" +msgstr "Remover par de chaves %s" + +#: nova/api/ec2/cloud.py:357 +#, python-format +msgid "%s is not a valid ipProtocol" +msgstr "%s não é um ipProtocol válido" + +#: nova/api/ec2/cloud.py:361 +msgid "Invalid port range" +msgstr "Intervalo de porta inválido" + +#: nova/api/ec2/cloud.py:392 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "Revogado entrada do grupo de segurança %s" + +#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 +msgid "No rule for the specified parameters." +msgstr "Não existe regra para os parâmetros especificados" + +#: nova/api/ec2/cloud.py:421 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "Autorizada entrada do grupo de segurança %s" + +#: nova/api/ec2/cloud.py:432 +#, python-format +msgid "This rule already exists in group %s" +msgstr "Esta regra já existe no grupo %s" + +#: nova/api/ec2/cloud.py:460 +#, python-format +msgid "Create Security Group %s" +msgstr "Criar Grupo de Segurança %s" + +#: nova/api/ec2/cloud.py:463 +#, python-format +msgid "group %s already exists" +msgstr "group %s já existe" + +#: nova/api/ec2/cloud.py:475 +#, python-format +msgid "Delete security group %s" +msgstr "Excluir grupo de segurança %s" + +#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 +#, python-format +msgid "Get console output for instance %s" +msgstr "Obter saída do console para instância %s" + +#: nova/api/ec2/cloud.py:543 +#, python-format +msgid "Create volume of %s GB" +msgstr "Criar volume de %s GB" + +#: nova/api/ec2/cloud.py:567 +#, python-format +msgid "Attach volume %s to instacne %s at %s" +msgstr "Anexar volume %s para instância %s em %s" + +#: nova/api/ec2/cloud.py:579 +#, python-format +msgid "Detach volume %s" +msgstr "Desanexar volume %s" + +#: nova/api/ec2/cloud.py:686 +msgid "Allocate address" +msgstr "Alocar endereço" + +#: nova/api/ec2/cloud.py:691 +#, python-format +msgid "Release address %s" +msgstr "Liberar endereço %s" + +#: nova/api/ec2/cloud.py:696 +#, python-format +msgid "Associate address %s to instance %s" +msgstr "Atribuir endereço %s à instância %s" + +#: nova/api/ec2/cloud.py:703 +#, python-format +msgid "Disassociate address %s" +msgstr "Desatribuir endereço %s" + +#: nova/api/ec2/cloud.py:730 +msgid "Going to start terminating instances" +msgstr "Começando a terminar instâncias" + +#: nova/api/ec2/cloud.py:738 +#, python-format +msgid "Reboot instance %r" +msgstr "Reiniciar instância %r" + +#: nova/api/ec2/cloud.py:775 +#, python-format +msgid "De-registering image %s" +msgstr "Removendo o registro da imagem %s" + +#: nova/api/ec2/cloud.py:783 +#, python-format +msgid "Registered image %s with id %s" +msgstr "Registrada imagem %s com id %s" + +#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#, python-format +msgid "attribute not supported: %s" +msgstr "atributo não suportado: %s" + +#: nova/api/ec2/cloud.py:794 +#, python-format +msgid "invalid id: %s" +msgstr "id inválido: %s" + +#: nova/api/ec2/cloud.py:807 +msgid "user or group not specified" +msgstr "usuário ou grupo não especificado" + +#: nova/api/ec2/cloud.py:809 +msgid "only group \"all\" is supported" +msgstr "apenas o grupo \"all\" é suportado" + +#: nova/api/ec2/cloud.py:811 +msgid "operation_type must be add or remove" +msgstr "operation_type deve ser add ou remove" + +#: nova/api/ec2/cloud.py:812 +#, python-format +msgid "Updating image %s publicity" +msgstr "Atualizando publicidade da imagem %s" + +#: nova/api/ec2/metadatarequesthandler.py:75 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "Falha ao obter metadados para o ip: %s" + +#: nova/api/openstack/__init__.py:70 +#, python-format +msgid "Caught error: %s" +msgstr "Capturado o erro: %s" + +#: nova/api/openstack/__init__.py:86 +msgid "Including admin operations in API." +msgstr "Incluindo operações administrativas na API." + +#: nova/api/openstack/servers.py:184 +#, python-format +msgid "Compute.api::lock %s" +msgstr "Compute.api::lock %s" + +#: nova/api/openstack/servers.py:199 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "Compute.api::unlock %s" + +#: nova/api/openstack/servers.py:213 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "Compute.api::get_lock %s" + +#: nova/api/openstack/servers.py:224 +#, python-format +msgid "Compute.api::pause %s" +msgstr "Compute.api::pause %s" + +#: nova/api/openstack/servers.py:235 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "Compute.api::unpause %s" + +#: nova/api/openstack/servers.py:246 +#, python-format +msgid "compute.api::suspend %s" +msgstr "compute.api::suspend %s" + +#: nova/api/openstack/servers.py:257 +#, python-format +msgid "compute.api::resume %s" +msgstr "compute.api::resume %s" + +#: nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" +msgstr "Usuário %s já existe" + +#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "Projeto não pode ser criado porque o gerente %s não existe." + +#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#, python-format +msgid "Project can't be created because project %s already exists" +msgstr "Projeto não pode ser criado porque o projeto %s já existe." + +#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#, python-format +msgid "Project can't be modified because manager %s doesn't exist" +msgstr "Projeto não pode ser modificado porque o gerente %s não existe." + +#: nova/auth/dbdriver.py:245 +#, python-format +msgid "User \"%s\" not found" +msgstr "Usuário \"%s\" não encontrado" + +#: nova/auth/dbdriver.py:248 +#, python-format +msgid "Project \"%s\" not found" +msgstr "Projeto \"%s\" não encontrado" + +#: nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "Tentativa de instanciar singleton" + +#: nova/auth/ldapdriver.py:181 +#, python-format +msgid "LDAP object for %s doesn't exist" +msgstr "Objeto LDAP para %s não existe" + +#: nova/auth/ldapdriver.py:218 +#, python-format +msgid "Project can't be created because user %s doesn't exist" +msgstr "Projeto não pode ser criado porque o usuário %s não existe" + +#: nova/auth/ldapdriver.py:478 +#, python-format +msgid "User %s is already a member of the group %s" +msgstr "Usuário %s já pertence ao grupo %s" + +#: nova/auth/ldapdriver.py:507 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" +"Tentatica de remover o último membto de um grupo. Ao invés disso excluindo o " +"grupo %s." + +#: nova/auth/ldapdriver.py:528 +#, python-format +msgid "Group at dn %s doesn't exist" +msgstr "Grupo no dn %s não existe" + +#: nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" +msgstr "Procurando usuário: %r" + +#: nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "Falha de autorização para chave de acesso %s" + +#: nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" +msgstr "Nenhum usuário encontrado para chave de acesso %s" + +#: nova/auth/manager.py:270 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "Usando nome do projeto = nome do usuário (%s)" + +#: nova/auth/manager.py:275 +#, python-format +msgid "failed authorization: no project named %s (user=%s)" +msgstr "falha de autorização: nenhum projeto de nome %s (usuário=%s)" + +#: nova/auth/manager.py:277 +#, python-format +msgid "No project called %s could be found" +msgstr "Nenhum projeto chamado %s pode ser encontrado." + +#: nova/auth/manager.py:281 +#, python-format +msgid "Failed authorization: user %s not admin and not member of project %s" +msgstr "" +"Falha de autorização: usuário %s não é administrador nem membro do projeto %s" + +#: nova/auth/manager.py:283 +#, python-format +msgid "User %s is not a member of project %s" +msgstr "Usuário %s não é membro do projeto %s" + +#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#, python-format +msgid "Invalid signature for user %s" +msgstr "Assinatura inválida para usuário %s" + +#: nova/auth/manager.py:293 nova/auth/manager.py:304 +msgid "Signature does not match" +msgstr "Assinatura não confere" + +#: nova/auth/manager.py:374 +msgid "Must specify project" +msgstr "Deve especificar projeto" + +#: nova/auth/manager.py:408 +#, python-format +msgid "The %s role can not be found" +msgstr "O papel %s não foi encontrado" + +#: nova/auth/manager.py:410 +#, python-format +msgid "The %s role is global only" +msgstr "O papel %s é apenas global" + +#: nova/auth/manager.py:412 +#, python-format +msgid "Adding role %s to user %s in project %s" +msgstr "Adicionando papel %s ao usuário %s no projeto %s" + +#: nova/auth/manager.py:438 +#, python-format +msgid "Removing role %s from user %s on project %s" +msgstr "Removendo papel %s do usuário %s no projeto %s" + +#: nova/auth/manager.py:505 +#, python-format +msgid "Created project %s with manager %s" +msgstr "Criado projeto %s com gerente %s" + +#: nova/auth/manager.py:523 +#, python-format +msgid "modifying project %s" +msgstr "modificando projeto %s" + +#: nova/auth/manager.py:553 +#, python-format +msgid "Remove user %s from project %s" +msgstr "Remover usuário %s do projeto %s" + +#: nova/auth/manager.py:581 +#, python-format +msgid "Deleting project %s" +msgstr "Excluindo projeto %s" + +#: nova/auth/manager.py:637 +#, python-format +msgid "Created user %s (admin: %r)" +msgstr "Criado usuário %s (administrador: %r)" + +#: nova/auth/manager.py:645 +#, python-format +msgid "Deleting user %s" +msgstr "Apagando usuário %s" + +#: nova/auth/manager.py:655 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:657 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:659 +#, python-format +msgid "Admin status set to %r for user %s" +msgstr "" + +#: nova/auth/manager.py:708 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "" + +#: nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:97 +#, python-format +msgid "Launching VPN for %s" +msgstr "Executando VPN para %s" + +#: nova/compute/api.py:67 +#, python-format +msgid "Instance %d was not found in get_network_topic" +msgstr "" + +#: nova/compute/api.py:73 +#, python-format +msgid "Instance %d has no host" +msgstr "" + +#: nova/compute/api.py:92 +#, python-format +msgid "Quota exceeeded for %s, tried to run %s instances" +msgstr "" + +#: nova/compute/api.py:94 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." +msgstr "" + +#: nova/compute/api.py:109 +msgid "Creating a raw instance" +msgstr "" + +#: nova/compute/api.py:156 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: nova/compute/api.py:180 +#, python-format +msgid "Casting to scheduler for %s/%s's instance %s" +msgstr "" + +#: nova/compute/api.py:279 +#, python-format +msgid "Going to try and terminate %s" +msgstr "" + +#: nova/compute/api.py:283 +#, python-format +msgid "Instance %d was not found during terminate" +msgstr "" + +#: nova/compute/api.py:288 +#, python-format +msgid "Instance %d is already being terminated" +msgstr "" + +#: nova/compute/api.py:450 +#, python-format +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "" + +#: nova/compute/api.py:465 +msgid "Volume isn't attached to anything!" +msgstr "" + +#: nova/compute/disk.py:71 +#, python-format +msgid "Input partition size not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:75 +#, python-format +msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:128 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: nova/compute/disk.py:136 +#, python-format +msgid "Failed to load partition: %s" +msgstr "" + +#: nova/compute/disk.py:158 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "" + +#: nova/compute/manager.py:69 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: nova/compute/manager.py:71 +#, python-format +msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +msgstr "" + +#: nova/compute/manager.py:75 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: nova/compute/manager.py:77 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: nova/compute/manager.py:82 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: nova/compute/manager.py:86 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: nova/compute/manager.py:157 +msgid "Instance has already been created" +msgstr "" + +#: nova/compute/manager.py:158 +#, python-format +msgid "instance %s: starting..." +msgstr "" + +#: nova/compute/manager.py:197 +#, python-format +msgid "instance %s: Failed to spawn" +msgstr "" + +#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 +#, python-format +msgid "Terminating instance %s" +msgstr "" + +#: nova/compute/manager.py:217 +#, python-format +msgid "Disassociating address %s" +msgstr "" + +#: nova/compute/manager.py:230 +#, python-format +msgid "Deallocating address %s" +msgstr "" + +#: nova/compute/manager.py:243 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "" + +#: nova/compute/manager.py:257 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: nova/compute/manager.py:260 +#, python-format +msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:286 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: nova/compute/manager.py:289 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:301 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: nova/compute/manager.py:316 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: nova/compute/manager.py:335 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: nova/compute/manager.py:352 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: nova/compute/manager.py:369 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: nova/compute/manager.py:382 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: nova/compute/manager.py:401 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: nova/compute/manager.py:420 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: nova/compute/manager.py:432 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: nova/compute/manager.py:442 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: nova/compute/manager.py:462 +#, python-format +msgid "instance %s: attaching volume %s to %s" +msgstr "" + +#: nova/compute/manager.py:478 +#, python-format +msgid "instance %s: attach failed %s, removing" +msgstr "" + +#: nova/compute/manager.py:493 +#, python-format +msgid "Detach volume %s from mountpoint %s on instance %s" +msgstr "" + +#: nova/compute/manager.py:497 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: nova/compute/monitor.py:259 +#, python-format +msgid "updating %s..." +msgstr "" + +#: nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "" + +#: nova/compute/monitor.py:355 +#, python-format +msgid "Cannot get blockstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:377 +#, python-format +msgid "Cannot get ifstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:412 +msgid "unexpected exception getting connection" +msgstr "" + +#: nova/compute/monitor.py:427 +#, python-format +msgid "Found instance: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:43 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: nova/db/sqlalchemy/api.py:132 +#, python-format +msgid "No service for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:229 +#, python-format +msgid "No service for %s, %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:574 +#, python-format +msgid "No floating ip for address %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:668 +#, python-format +msgid "No instance for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 +#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#, python-format +msgid "Instance %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:891 +#, python-format +msgid "no keypair for user %s, name %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 +#, python-format +msgid "No network for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1036 +#, python-format +msgid "No network for bridge %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1050 +#, python-format +msgid "No network for instance %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1180 +#, python-format +msgid "Token %s does not exist" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1205 +#, python-format +msgid "No quota for project_id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1356 +#, python-format +msgid "No volume for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1401 +#, python-format +msgid "Volume %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1413 +#, python-format +msgid "No export device found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1426 +#, python-format +msgid "No target id found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1471 +#, python-format +msgid "No security group with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1488 +#, python-format +msgid "No security group named %s for project: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1576 +#, python-format +msgid "No secuity group rule with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1650 +#, python-format +msgid "No user for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1666 +#, python-format +msgid "No user for access key %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1728 +#, python-format +msgid "No project with id %s" +msgstr "" + +#: nova/image/glance.py:78 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images" +msgstr "" + +#: nova/image/glance.py:97 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images/detail" +msgstr "" + +#: nova/image/s3.py:82 +#, python-format +msgid "Image %s could not be found" +msgstr "" + +#: nova/network/api.py:39 +#, python-format +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "" + +#: nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" + +#: nova/network/linux_net.py:176 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: nova/network/linux_net.py:186 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#: nova/network/linux_net.py:254 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: nova/network/linux_net.py:256 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: nova/network/linux_net.py:334 +#, python-format +msgid "Killing dnsmasq threw %s" +msgstr "" + +#: nova/network/manager.py:135 +msgid "setting network host" +msgstr "" + +#: nova/network/manager.py:190 +#, python-format +msgid "Leasing IP %s" +msgstr "" + +#: nova/network/manager.py:194 +#, python-format +msgid "IP %s leased that isn't associated" +msgstr "" + +#: nova/network/manager.py:197 +#, python-format +msgid "IP %s leased to bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:205 +#, python-format +msgid "IP %s leased that was already deallocated" +msgstr "" + +#: nova/network/manager.py:214 +#, python-format +msgid "IP %s released that isn't associated" +msgstr "" + +#: nova/network/manager.py:217 +#, python-format +msgid "IP %s released from bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:220 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: nova/network/manager.py:442 +#, python-format +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "" + +#: nova/objectstore/handler.py:106 +#, python-format +msgid "Unknown S3 value type %r" +msgstr "" + +#: nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "" + +#: nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "" + +#: nova/objectstore/handler.py:209 +#, python-format +msgid "List keys for bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:217 +#, python-format +msgid "Unauthorized attempt to access bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:235 +#, python-format +msgid "Creating bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:245 +#, python-format +msgid "Deleting bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:249 +#, python-format +msgid "Unauthorized attempt to delete bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:271 +#, python-format +msgid "Getting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:274 +#, python-format +msgid "Unauthorized attempt to get object %s from bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:292 +#, python-format +msgid "Putting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:295 +#, python-format +msgid "Unauthorized attempt to upload object %s to bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:314 +#, python-format +msgid "Deleting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:393 +#, python-format +msgid "Not authorized to upload image: invalid directory %s" +msgstr "" + +#: nova/objectstore/handler.py:401 +#, python-format +msgid "Not authorized to upload image: unauthorized bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:406 +#, python-format +msgid "Starting image upload: %s" +msgstr "" + +#: nova/objectstore/handler.py:420 +#, python-format +msgid "Not authorized to update attributes of image %s" +msgstr "" + +#: nova/objectstore/handler.py:428 +#, python-format +msgid "Toggling publicity flag of image %s %r" +msgstr "" + +#: nova/objectstore/handler.py:433 +#, python-format +msgid "Updating user fields on image %s" +msgstr "" + +#: nova/objectstore/handler.py:447 +#, python-format +msgid "Unauthorized attempt to delete image %s" +msgstr "" + +#: nova/objectstore/handler.py:452 +#, python-format +msgid "Deleted image: %s" +msgstr "" + +#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 +#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 +msgid "No hosts found" +msgstr "" + +#: nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "" + +#: nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %s %s for %s" +msgstr "" + +#: nova/scheduler/simple.py:63 +msgid "All hosts have too many cores" +msgstr "" + +#: nova/scheduler/simple.py:95 +msgid "All hosts have too many gigabytes" +msgstr "" + +#: nova/scheduler/simple.py:115 +msgid "All hosts have too many networks" +msgstr "" + +#: nova/tests/test_cloud.py:198 +msgid "Can't test instances without a real virtual env." +msgstr "" + +#: nova/tests/test_cloud.py:210 +#, python-format +msgid "Need to watch instance %s until it's running..." +msgstr "" + +#: nova/tests/test_compute.py:104 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: nova/tests/test_compute.py:110 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: nova/tests/test_rpc.py:89 +#, python-format +msgid "Nested received %s, %s" +msgstr "" + +#: nova/tests/test_rpc.py:94 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#, python-format +msgid "Received %s" +msgstr "" + +#: nova/tests/test_volume.py:162 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: nova/virt/fake.py:210 +#, python-format +msgid "Instance %s Not Found" +msgstr "" + +#: nova/virt/hyperv.py:118 +msgid "In init host" +msgstr "" + +#: nova/virt/hyperv.py:131 +#, python-format +msgid "Attempt to create duplicate vm %s" +msgstr "" + +#: nova/virt/hyperv.py:148 +#, python-format +msgid "Starting VM %s " +msgstr "" + +#: nova/virt/hyperv.py:150 +#, python-format +msgid "Started VM %s " +msgstr "" + +#: nova/virt/hyperv.py:152 +#, python-format +msgid "spawn vm failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:169 +#, python-format +msgid "Failed to create VM %s" +msgstr "" + +#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 +#, python-format +msgid "Created VM %s..." +msgstr "" + +#: nova/virt/hyperv.py:188 +#, python-format +msgid "Set memory for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:198 +#, python-format +msgid "Set vcpus for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:202 +#, python-format +msgid "Creating disk for %s by attaching disk file %s" +msgstr "" + +#: nova/virt/hyperv.py:227 +#, python-format +msgid "Failed to add diskdrive to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:230 +#, python-format +msgid "New disk drive path is %s" +msgstr "" + +#: nova/virt/hyperv.py:247 +#, python-format +msgid "Failed to add vhd file to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:249 +#, python-format +msgid "Created disk for %s" +msgstr "" + +#: nova/virt/hyperv.py:253 +#, python-format +msgid "Creating nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:272 +msgid "Failed creating a port on the external vswitch" +msgstr "" + +#: nova/virt/hyperv.py:273 +#, python-format +msgid "Failed creating port for %s" +msgstr "" + +#: nova/virt/hyperv.py:275 +#, python-format +msgid "Created switch port %s on switch %s" +msgstr "" + +#: nova/virt/hyperv.py:285 +#, python-format +msgid "Failed to add nic to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:287 +#, python-format +msgid "Created nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:320 +#, python-format +msgid "WMI job failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:322 +#, python-format +msgid "WMI job succeeded: %s, Elapsed=%s " +msgstr "" + +#: nova/virt/hyperv.py:358 +#, python-format +msgid "Got request to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:383 +#, python-format +msgid "Failed to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:389 +#, python-format +msgid "Del: disk %s vm %s" +msgstr "" + +#: nova/virt/hyperv.py:405 +#, python-format +msgid "" +"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " +"cpu_time=%s" +msgstr "" + +#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#, python-format +msgid "duplicate name found: %s" +msgstr "" + +#: nova/virt/hyperv.py:444 +#, python-format +msgid "Successfully changed vm state of %s to %s" +msgstr "" + +#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#, python-format +msgid "Failed to change vm state of %s to %s" +msgstr "" + +#: nova/virt/images.py:70 +#, python-format +msgid "Finished retreving %s -- placed in %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:144 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:157 +msgid "Connection to libvirt broke" +msgstr "" + +#: nova/virt/libvirt_conn.py:229 +#, python-format +msgid "instance %s: deleting instance files %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:271 +#, python-format +msgid "No disk at %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:278 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "" + +#: nova/virt/libvirt_conn.py:294 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: nova/virt/libvirt_conn.py:297 +#, python-format +msgid "_wait_for_reboot failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:340 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: nova/virt/libvirt_conn.py:343 +#, python-format +msgid "_wait_for_rescue failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:370 +#, python-format +msgid "instance %s: is running" +msgstr "" + +#: nova/virt/libvirt_conn.py:381 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 +#, python-format +msgid "instance %s: failed to boot" +msgstr "" + +#: nova/virt/libvirt_conn.py:395 +#, python-format +msgid "virsh said: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:399 +msgid "cool, it's a device" +msgstr "" + +#: nova/virt/libvirt_conn.py:407 +#, python-format +msgid "data: %r, fpath: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:415 +#, python-format +msgid "Contents of file %s: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:449 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: nova/virt/libvirt_conn.py:505 +#, python-format +msgid "instance %s: injecting key into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:508 +#, python-format +msgid "instance %s: injecting net into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:516 +#, python-format +msgid "instance %s: ignoring error injecting data into image %s (%s)" +msgstr "" + +#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: nova/virt/libvirt_conn.py:589 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: nova/virt/xenapi_conn.py:113 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" +msgstr "" + +#: nova/virt/xenapi_conn.py:263 +#, python-format +msgid "Task [%s] %s status: success %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:271 +#, python-format +msgid "Task [%s] %s status: %s %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:72 +#, python-format +msgid "%s: _db_content => %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 +#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 +msgid "Raising NotImplemented" +msgstr "" + +#: nova/virt/xenapi/fake.py:249 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:283 +#, python-format +msgid "Calling %s %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:288 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:340 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:127 +#, python-format +msgid "Created VM %s as %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:147 +#, python-format +msgid "Creating VBD for VM %s, VDI %s ... " +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:149 +#, python-format +msgid "Created VBD %s for VM %s, VDI %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:165 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:175 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:202 +#, python-format +msgid "Creating VIF for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:205 +#, python-format +msgid "Created VIF %s for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:216 +#, python-format +msgid "Snapshotting VM %s with label '%s'..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:229 +#, python-format +msgid "Created snapshot %s from VM %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:243 +#, python-format +msgid "Asking xapi to upload %s as '%s'" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:261 +#, python-format +msgid "Asking xapi to fetch %s as %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:279 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:290 +#, python-format +msgid "PV Kernel in VDI:%d" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:318 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:331 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:333 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:390 +#, python-format +msgid "VHD %s has parent %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:407 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:448 +#, python-format +msgid "No VDIs found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:452 +#, python-format +msgid "Unexpected number of VDIs (%s) found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:62 +#, python-format +msgid "Attempted to create non-unique name %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:99 +#, python-format +msgid "Starting VM %s..." +msgstr "" + +#: nova/virt/xenapi/vmops.py:101 +#, python-format +msgid "Spawning VM %s created %s." +msgstr "" + +#: nova/virt/xenapi/vmops.py:112 +#, python-format +msgid "Instance %s: booted" +msgstr "" + +#: nova/virt/xenapi/vmops.py:137 +#, python-format +msgid "Instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:166 +#, python-format +msgid "Starting snapshot for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:174 +#, python-format +msgid "Unable to Snapshot %s: %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:184 +#, python-format +msgid "Finished snapshot and upload for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:252 +#, python-format +msgid "suspend: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:262 +#, python-format +msgid "resume: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:271 +#, python-format +msgid "Instance not found %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:57 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:74 +#, python-format +msgid "Introduced %s as %s." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:90 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:96 +#, python-format +msgid "Forgetting SR %s ... " +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:101 +#, python-format +msgid "Ignoring exception %s when getting PBDs for %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:107 +#, python-format +msgid "Ignoring exception %s when unplugging PBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:111 +#, python-format +msgid "Forgetting SR %s done." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:113 +#, python-format +msgid "Ignoring exception %s when forgetting SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:123 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:128 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:146 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:175 +#, python-format +msgid "Unable to obtain target information %s, %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:197 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %s, %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Unable to use SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:95 +#, python-format +msgid "Mountpoint %s attached to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:106 +#, python-format +msgid "Detach_volume: %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:113 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:128 +#, python-format +msgid "Mountpoint %s detached from instance %s" +msgstr "" + +#: nova/volume/api.py:44 +#, python-format +msgid "Quota exceeeded for %s, tried to create %sG volume" +msgstr "" + +#: nova/volume/api.py:46 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %s" +msgstr "" + +#: nova/volume/api.py:70 nova/volume/api.py:95 +msgid "Volume status must be available" +msgstr "" + +#: nova/volume/api.py:97 +msgid "Volume is already attached" +msgstr "" + +#: nova/volume/api.py:103 +msgid "Volume is already detached" +msgstr "" + +#: nova/volume/driver.py:76 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: nova/volume/driver.py:85 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: nova/volume/driver.py:210 +#, python-format +msgid "FAKE AOE: %s" +msgstr "" + +#: nova/volume/driver.py:315 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: nova/volume/manager.py:85 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: nova/volume/manager.py:93 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: nova/volume/manager.py:102 +#, python-format +msgid "volume %s: creating lv of size %sG" +msgstr "" + +#: nova/volume/manager.py:106 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: nova/volume/manager.py:113 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: nova/volume/manager.py:121 +msgid "Volume is still attached" +msgstr "" + +#: nova/volume/manager.py:123 +msgid "Volume is not local to this node" +msgstr "" + +#: nova/volume/manager.py:124 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: nova/volume/manager.py:126 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: nova/volume/manager.py:129 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" diff --git a/po/ru.po b/po/ru.po new file mode 100644 index 000000000..5d031ac08 --- /dev/null +++ b/po/ru.po @@ -0,0 +1,2138 @@ +# Russian translation for nova +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the nova package. +# FIRST AUTHOR <EMAIL@ADDRESS>, 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: nova\n" +"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n" +"POT-Creation-Date: 2011-01-10 11:25-0800\n" +"PO-Revision-Date: 2011-01-31 06:53+0000\n" +"Last-Translator: Andrey Olykainen <Unknown>\n" +"Language-Team: Russian <ru@li.org>\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Launchpad-Export-Date: 2011-02-05 05:36+0000\n" +"X-Generator: Launchpad (build 12177)\n" + +#: nova/crypto.py:46 +msgid "Filename of root CA" +msgstr "" + +#: nova/crypto.py:49 +msgid "Filename of private key" +msgstr "Имя файла секретного ключа" + +#: nova/crypto.py:51 +msgid "Filename of root Certificate Revokation List" +msgstr "" + +#: nova/crypto.py:53 +msgid "Where we keep our keys" +msgstr "Путь к ключам" + +#: nova/crypto.py:55 +msgid "Where we keep our root CA" +msgstr "" + +#: nova/crypto.py:57 +msgid "Should we use a CA for each project?" +msgstr "" + +#: nova/crypto.py:61 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" + +#: nova/crypto.py:66 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "" + +#: nova/crypto.py:71 +#, python-format +msgid "Subject for certificate for vpns, %s for project, timestamp" +msgstr "" + +#: nova/crypto.py:258 +#, python-format +msgid "Flags path: %s" +msgstr "" + +#: nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "Неожиданная ошибка при выполнении команды." + +#: nova/exception.py:36 +#, python-format +msgid "" +"%s\n" +"Command: %s\n" +"Exit code: %s\n" +"Stdout: %r\n" +"Stderr: %r" +msgstr "" +"%s\n" +"Команда: %s\n" +"Код завершения: %s\n" +"Stdout: %r\n" +"Stderr: %r" + +#: nova/exception.py:86 +msgid "Uncaught exception" +msgstr "Необработанное исключение" + +#: nova/fakerabbit.py:48 +#, python-format +msgid "(%s) publish (key: %s) %s" +msgstr "" + +#: nova/fakerabbit.py:53 +#, python-format +msgid "Publishing to route %s" +msgstr "" + +#: nova/fakerabbit.py:83 +#, python-format +msgid "Declaring queue %s" +msgstr "Объявление очереди %s" + +#: nova/fakerabbit.py:89 +#, python-format +msgid "Declaring exchange %s" +msgstr "Объявление точки обмена %s" + +#: nova/fakerabbit.py:95 +#, python-format +msgid "Binding %s to %s with key %s" +msgstr "" + +#: nova/fakerabbit.py:120 +#, python-format +msgid "Getting from %s: %s" +msgstr "Получение из %s: %s" + +#: nova/rpc.py:92 +#, python-format +msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +msgstr "AMQP сервер %s:%d недоступен. Повторная попытка через %d секунд." + +#: nova/rpc.py:99 +#, python-format +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "Не удалось подключиться к серверу AMQP после %d попыток. Выключение." + +#: nova/rpc.py:118 +msgid "Reconnected to queue" +msgstr "Переподлючено к очереди" + +#: nova/rpc.py:125 +msgid "Failed to fetch message from queue" +msgstr "Не удалось получить сообщение из очереди" + +#: nova/rpc.py:155 +#, python-format +msgid "Initing the Adapter Consumer for %s" +msgstr "" + +#: nova/rpc.py:170 +#, python-format +msgid "received %s" +msgstr "получено %s" + +#: nova/rpc.py:183 +#, python-format +msgid "no method for message: %s" +msgstr "не определен метод для сообщения: %s" + +#: nova/rpc.py:184 +#, python-format +msgid "No method for message: %s" +msgstr "Не определен метод для сообщения: %s" + +#: nova/rpc.py:245 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: nova/rpc.py:286 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: nova/rpc.py:305 +msgid "Making asynchronous call..." +msgstr "Выполняется асинхронный вызов..." + +#: nova/rpc.py:308 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID is %s" + +#: nova/rpc.py:356 +#, python-format +msgid "response %s" +msgstr "ответ %s" + +#: nova/rpc.py:365 +#, python-format +msgid "topic is %s" +msgstr "тема %s" + +#: nova/rpc.py:366 +#, python-format +msgid "message %s" +msgstr "сообщение %s" + +#: nova/service.py:157 +#, python-format +msgid "Starting %s node" +msgstr "Запускается нода %s" + +#: nova/service.py:169 +msgid "Service killed that has no database entry" +msgstr "" + +#: nova/service.py:190 +msgid "The service database object disappeared, Recreating it." +msgstr "Объект сервиса в базе данных отсутствует, Повторное создание." + +#: nova/service.py:202 +msgid "Recovered model server connection!" +msgstr "" + +#: nova/service.py:208 +msgid "model server went away" +msgstr "" + +#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#, python-format +msgid "Data store %s is unreachable. Trying again in %d seconds." +msgstr "Хранилище данных %s недоступно. Повторная попытка через %d секунд." + +#: nova/service.py:232 nova/twistd.py:232 +#, python-format +msgid "Serving %s" +msgstr "" + +#: nova/service.py:234 nova/twistd.py:264 +msgid "Full set of FLAGS:" +msgstr "" + +#: nova/twistd.py:211 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "pidfile %s не обнаружен. Демон не запущен?\n" + +#: nova/twistd.py:268 +#, python-format +msgid "Starting %s" +msgstr "Запускается %s" + +#: nova/utils.py:53 +#, python-format +msgid "Inner Exception: %s" +msgstr "Вложенное исключение: %s" + +#: nova/utils.py:54 +#, python-format +msgid "Class %s cannot be found" +msgstr "Класс %s не найден" + +#: nova/utils.py:113 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: nova/utils.py:125 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: nova/utils.py:138 +#, python-format +msgid "Result was %s" +msgstr "Результат %s" + +#: nova/utils.py:171 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: nova/utils.py:176 +#, python-format +msgid "Running %s" +msgstr "Выполняется %s" + +#: nova/utils.py:207 +#, python-format +msgid "Couldn't get IP, using 127.0.0.1 %s" +msgstr "Не удалось получить IP, используем 127.0.0.1 %s" + +#: nova/utils.py:289 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: nova/utils.py:300 +#, python-format +msgid "backend %s" +msgstr "" + +#: nova/api/ec2/__init__.py:133 +msgid "Too many failed authentications." +msgstr "Слишком много неудачных попыток аутентификации." + +#: nova/api/ec2/__init__.py:142 +#, python-format +msgid "" +"Access key %s has had %d failed authentications and will be locked out for " +"%d minutes." +msgstr "" +"Ключ доступа %s имеет %d неудачных попыток аутентификации и будет " +"заблокирован на %d минут." + +#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#, python-format +msgid "Authentication Failure: %s" +msgstr "Ошибка аутентификации: %s" + +#: nova/api/ec2/__init__.py:190 +#, python-format +msgid "Authenticated Request For %s:%s)" +msgstr "Запрос аутентификации для %s:%s)" + +#: nova/api/ec2/__init__.py:227 +#, python-format +msgid "action: %s" +msgstr "действие: %s" + +#: nova/api/ec2/__init__.py:229 +#, python-format +msgid "arg: %s\t\tval: %s" +msgstr "arg: %s\t\tval: %s" + +#: nova/api/ec2/__init__.py:301 +#, python-format +msgid "Unauthorized request for controller=%s and action=%s" +msgstr "" + +#: nova/api/ec2/__init__.py:339 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:342 +#, python-format +msgid "ApiError raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:349 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:354 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" +"Произошла неизвестная ошибка. Пожалуйста, попытайтесь повторить ваш запрос." + +#: nova/api/ec2/admin.py:84 +#, python-format +msgid "Creating new user: %s" +msgstr "Создание нового пользователя: %s" + +#: nova/api/ec2/admin.py:92 +#, python-format +msgid "Deleting user: %s" +msgstr "Удаление пользователя: %s" + +#: nova/api/ec2/admin.py:114 +#, python-format +msgid "Adding role %s to user %s for project %s" +msgstr "Добавление роли %s для пользователя %s для проекта %s" + +#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#, python-format +msgid "Adding sitewide role %s to user %s" +msgstr "" + +#: nova/api/ec2/admin.py:122 +#, python-format +msgid "Removing role %s from user %s for project %s" +msgstr "Удаление роли %s пользователя %s для проекта %s" + +#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#, python-format +msgid "Removing sitewide role %s from user %s" +msgstr "" + +#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 +msgid "operation must be add or remove" +msgstr "" + +#: nova/api/ec2/admin.py:142 +#, python-format +msgid "Getting x509 for user: %s on project: %s" +msgstr "" + +#: nova/api/ec2/admin.py:159 +#, python-format +msgid "Create project %s managed by %s" +msgstr "Создать проект %s под управлением %s" + +#: nova/api/ec2/admin.py:170 +#, python-format +msgid "Delete project: %s" +msgstr "Удалить проект: %s" + +#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#, python-format +msgid "Adding user %s to project %s" +msgstr "Добавление пользователя %s к проекту %s" + +#: nova/api/ec2/admin.py:188 +#, python-format +msgid "Removing user %s from project %s" +msgstr "Удаление пользователя %s с проекта %s" + +#: nova/api/ec2/apirequest.py:95 +#, python-format +msgid "Unsupported API request: controller = %s,action = %s" +msgstr "" + +#: nova/api/ec2/cloud.py:117 +#, python-format +msgid "Generating root CA: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:277 +#, python-format +msgid "Create key pair %s" +msgstr "Создание пары ключей %s" + +#: nova/api/ec2/cloud.py:285 +#, python-format +msgid "Delete key pair %s" +msgstr "Удаление пары ключей %s" + +#: nova/api/ec2/cloud.py:357 +#, python-format +msgid "%s is not a valid ipProtocol" +msgstr "" + +#: nova/api/ec2/cloud.py:361 +msgid "Invalid port range" +msgstr "Неверный диапазон портов" + +#: nova/api/ec2/cloud.py:392 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 +msgid "No rule for the specified parameters." +msgstr "" + +#: nova/api/ec2/cloud.py:421 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: nova/api/ec2/cloud.py:432 +#, python-format +msgid "This rule already exists in group %s" +msgstr "Это правило уже существует в группе %s" + +#: nova/api/ec2/cloud.py:460 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:463 +#, python-format +msgid "group %s already exists" +msgstr "группа %s уже существует" + +#: nova/api/ec2/cloud.py:475 +#, python-format +msgid "Delete security group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 +#, python-format +msgid "Get console output for instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:543 +#, python-format +msgid "Create volume of %s GB" +msgstr "Создание раздела %s ГБ" + +#: nova/api/ec2/cloud.py:567 +#, python-format +msgid "Attach volume %s to instacne %s at %s" +msgstr "" + +#: nova/api/ec2/cloud.py:579 +#, python-format +msgid "Detach volume %s" +msgstr "" + +#: nova/api/ec2/cloud.py:686 +msgid "Allocate address" +msgstr "" + +#: nova/api/ec2/cloud.py:691 +#, python-format +msgid "Release address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:696 +#, python-format +msgid "Associate address %s to instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:703 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:730 +msgid "Going to start terminating instances" +msgstr "" + +#: nova/api/ec2/cloud.py:738 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: nova/api/ec2/cloud.py:775 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: nova/api/ec2/cloud.py:783 +#, python-format +msgid "Registered image %s with id %s" +msgstr "" + +#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#, python-format +msgid "attribute not supported: %s" +msgstr "аттрибут не поддерживается: %s" + +#: nova/api/ec2/cloud.py:794 +#, python-format +msgid "invalid id: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:807 +msgid "user or group not specified" +msgstr "не указан пользователь или группа" + +#: nova/api/ec2/cloud.py:809 +msgid "only group \"all\" is supported" +msgstr "" + +#: nova/api/ec2/cloud.py:811 +msgid "operation_type must be add or remove" +msgstr "" + +#: nova/api/ec2/cloud.py:812 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: nova/api/ec2/metadatarequesthandler.py:75 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "Ошибка получения метаданных для ip: %s" + +#: nova/api/openstack/__init__.py:70 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:86 +msgid "Including admin operations in API." +msgstr "" + +#: nova/api/openstack/servers.py:184 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:199 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: nova/api/openstack/servers.py:213 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:224 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: nova/api/openstack/servers.py:235 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: nova/api/openstack/servers.py:246 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: nova/api/openstack/servers.py:257 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" +msgstr "Пользователь %s уже существует" + +#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "Проект не может быть создан поскольку менеджер %s не существует" + +#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#, python-format +msgid "Project can't be created because project %s already exists" +msgstr "Проект не может быть созан поскольку проект %s уже существует" + +#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#, python-format +msgid "Project can't be modified because manager %s doesn't exist" +msgstr "" + +#: nova/auth/dbdriver.py:245 +#, python-format +msgid "User \"%s\" not found" +msgstr "Пользователь \"%s\" не существует" + +#: nova/auth/dbdriver.py:248 +#, python-format +msgid "Project \"%s\" not found" +msgstr "Проект \"%s\" не найден" + +#: nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: nova/auth/ldapdriver.py:181 +#, python-format +msgid "LDAP object for %s doesn't exist" +msgstr "Объект LDAP %s не существует" + +#: nova/auth/ldapdriver.py:218 +#, python-format +msgid "Project can't be created because user %s doesn't exist" +msgstr "Проект не может быть создан поскольку пользователь %s не существует" + +#: nova/auth/ldapdriver.py:478 +#, python-format +msgid "User %s is already a member of the group %s" +msgstr "Пользователь %s уже член группы %s" + +#: nova/auth/ldapdriver.py:507 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: nova/auth/ldapdriver.py:528 +#, python-format +msgid "Group at dn %s doesn't exist" +msgstr "" + +#: nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" +msgstr "" + +#: nova/auth/manager.py:270 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: nova/auth/manager.py:275 +#, python-format +msgid "failed authorization: no project named %s (user=%s)" +msgstr "" + +#: nova/auth/manager.py:277 +#, python-format +msgid "No project called %s could be found" +msgstr "" + +#: nova/auth/manager.py:281 +#, python-format +msgid "Failed authorization: user %s not admin and not member of project %s" +msgstr "" + +#: nova/auth/manager.py:283 +#, python-format +msgid "User %s is not a member of project %s" +msgstr "Пользователь %s не является членом группы %s" + +#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#, python-format +msgid "Invalid signature for user %s" +msgstr "Не допустимая подпись для пользователя %s" + +#: nova/auth/manager.py:293 nova/auth/manager.py:304 +msgid "Signature does not match" +msgstr "Подпись не совпадает" + +#: nova/auth/manager.py:374 +msgid "Must specify project" +msgstr "Необходимо указать проект" + +#: nova/auth/manager.py:408 +#, python-format +msgid "The %s role can not be found" +msgstr "Роль %s не может быть найдена" + +#: nova/auth/manager.py:410 +#, python-format +msgid "The %s role is global only" +msgstr "" + +#: nova/auth/manager.py:412 +#, python-format +msgid "Adding role %s to user %s in project %s" +msgstr "Добавление роли %s для пользователя %s в проект %s" + +#: nova/auth/manager.py:438 +#, python-format +msgid "Removing role %s from user %s on project %s" +msgstr "Удаление роли %s пользователя %s в проекте %s" + +#: nova/auth/manager.py:505 +#, python-format +msgid "Created project %s with manager %s" +msgstr "Создан проект %s под управлением %s" + +#: nova/auth/manager.py:523 +#, python-format +msgid "modifying project %s" +msgstr "изменение проекта %s" + +#: nova/auth/manager.py:553 +#, python-format +msgid "Remove user %s from project %s" +msgstr "Удалить пользователя %s из проекта %s" + +#: nova/auth/manager.py:581 +#, python-format +msgid "Deleting project %s" +msgstr "Удаление проекта %s" + +#: nova/auth/manager.py:637 +#, python-format +msgid "Created user %s (admin: %r)" +msgstr "Создан пользователь %s (администратор: %r)" + +#: nova/auth/manager.py:645 +#, python-format +msgid "Deleting user %s" +msgstr "Удаление пользователя %s" + +#: nova/auth/manager.py:655 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:657 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:659 +#, python-format +msgid "Admin status set to %r for user %s" +msgstr "" + +#: nova/auth/manager.py:708 +#, python-format +msgid "No vpn data for project %s" +msgstr "Нет vpn данных для проекта %s" + +#: nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "" + +#: nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:97 +#, python-format +msgid "Launching VPN for %s" +msgstr "Запуск VPN для %s" + +#: nova/compute/api.py:67 +#, python-format +msgid "Instance %d was not found in get_network_topic" +msgstr "" + +#: nova/compute/api.py:73 +#, python-format +msgid "Instance %d has no host" +msgstr "" + +#: nova/compute/api.py:92 +#, python-format +msgid "Quota exceeeded for %s, tried to run %s instances" +msgstr "" + +#: nova/compute/api.py:94 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." +msgstr "" + +#: nova/compute/api.py:109 +msgid "Creating a raw instance" +msgstr "" + +#: nova/compute/api.py:156 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: nova/compute/api.py:180 +#, python-format +msgid "Casting to scheduler for %s/%s's instance %s" +msgstr "" + +#: nova/compute/api.py:279 +#, python-format +msgid "Going to try and terminate %s" +msgstr "" + +#: nova/compute/api.py:283 +#, python-format +msgid "Instance %d was not found during terminate" +msgstr "" + +#: nova/compute/api.py:288 +#, python-format +msgid "Instance %d is already being terminated" +msgstr "" + +#: nova/compute/api.py:450 +#, python-format +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "" + +#: nova/compute/api.py:465 +msgid "Volume isn't attached to anything!" +msgstr "" + +#: nova/compute/disk.py:71 +#, python-format +msgid "Input partition size not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:75 +#, python-format +msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:128 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: nova/compute/disk.py:136 +#, python-format +msgid "Failed to load partition: %s" +msgstr "" + +#: nova/compute/disk.py:158 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "Ошибка монтирования файловой системы: %s" + +#: nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "" + +#: nova/compute/manager.py:69 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: nova/compute/manager.py:71 +#, python-format +msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +msgstr "" + +#: nova/compute/manager.py:75 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: nova/compute/manager.py:77 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: nova/compute/manager.py:82 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: nova/compute/manager.py:86 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: nova/compute/manager.py:157 +msgid "Instance has already been created" +msgstr "" + +#: nova/compute/manager.py:158 +#, python-format +msgid "instance %s: starting..." +msgstr "" + +#: nova/compute/manager.py:197 +#, python-format +msgid "instance %s: Failed to spawn" +msgstr "" + +#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 +#, python-format +msgid "Terminating instance %s" +msgstr "" + +#: nova/compute/manager.py:217 +#, python-format +msgid "Disassociating address %s" +msgstr "" + +#: nova/compute/manager.py:230 +#, python-format +msgid "Deallocating address %s" +msgstr "" + +#: nova/compute/manager.py:243 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "" + +#: nova/compute/manager.py:257 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: nova/compute/manager.py:260 +#, python-format +msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:286 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: nova/compute/manager.py:289 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:301 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: nova/compute/manager.py:316 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: nova/compute/manager.py:335 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: nova/compute/manager.py:352 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: nova/compute/manager.py:369 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: nova/compute/manager.py:382 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: nova/compute/manager.py:401 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: nova/compute/manager.py:420 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: nova/compute/manager.py:432 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: nova/compute/manager.py:442 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: nova/compute/manager.py:462 +#, python-format +msgid "instance %s: attaching volume %s to %s" +msgstr "" + +#: nova/compute/manager.py:478 +#, python-format +msgid "instance %s: attach failed %s, removing" +msgstr "" + +#: nova/compute/manager.py:493 +#, python-format +msgid "Detach volume %s from mountpoint %s on instance %s" +msgstr "" + +#: nova/compute/manager.py:497 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: nova/compute/monitor.py:259 +#, python-format +msgid "updating %s..." +msgstr "обновление %s..." + +#: nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "неожиданная ошибка во время обновления" + +#: nova/compute/monitor.py:355 +#, python-format +msgid "Cannot get blockstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:377 +#, python-format +msgid "Cannot get ifstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:412 +msgid "unexpected exception getting connection" +msgstr "" + +#: nova/compute/monitor.py:427 +#, python-format +msgid "Found instance: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:43 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: nova/db/sqlalchemy/api.py:132 +#, python-format +msgid "No service for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:229 +#, python-format +msgid "No service for %s, %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:574 +#, python-format +msgid "No floating ip for address %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:668 +#, python-format +msgid "No instance for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 +#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#, python-format +msgid "Instance %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:891 +#, python-format +msgid "no keypair for user %s, name %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 +#, python-format +msgid "No network for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1036 +#, python-format +msgid "No network for bridge %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1050 +#, python-format +msgid "No network for instance %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1180 +#, python-format +msgid "Token %s does not exist" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1205 +#, python-format +msgid "No quota for project_id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1356 +#, python-format +msgid "No volume for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1401 +#, python-format +msgid "Volume %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1413 +#, python-format +msgid "No export device found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1426 +#, python-format +msgid "No target id found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1471 +#, python-format +msgid "No security group with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1488 +#, python-format +msgid "No security group named %s for project: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1576 +#, python-format +msgid "No secuity group rule with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1650 +#, python-format +msgid "No user for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1666 +#, python-format +msgid "No user for access key %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1728 +#, python-format +msgid "No project with id %s" +msgstr "" + +#: nova/image/glance.py:78 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images" +msgstr "" + +#: nova/image/glance.py:97 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images/detail" +msgstr "" + +#: nova/image/s3.py:82 +#, python-format +msgid "Image %s could not be found" +msgstr "" + +#: nova/network/api.py:39 +#, python-format +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "" + +#: nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" + +#: nova/network/linux_net.py:176 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: nova/network/linux_net.py:186 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#: nova/network/linux_net.py:254 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: nova/network/linux_net.py:256 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: nova/network/linux_net.py:334 +#, python-format +msgid "Killing dnsmasq threw %s" +msgstr "" + +#: nova/network/manager.py:135 +msgid "setting network host" +msgstr "" + +#: nova/network/manager.py:190 +#, python-format +msgid "Leasing IP %s" +msgstr "" + +#: nova/network/manager.py:194 +#, python-format +msgid "IP %s leased that isn't associated" +msgstr "" + +#: nova/network/manager.py:197 +#, python-format +msgid "IP %s leased to bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:205 +#, python-format +msgid "IP %s leased that was already deallocated" +msgstr "" + +#: nova/network/manager.py:214 +#, python-format +msgid "IP %s released that isn't associated" +msgstr "" + +#: nova/network/manager.py:217 +#, python-format +msgid "IP %s released from bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:220 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: nova/network/manager.py:442 +#, python-format +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "" + +#: nova/objectstore/handler.py:106 +#, python-format +msgid "Unknown S3 value type %r" +msgstr "" + +#: nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "" + +#: nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "" + +#: nova/objectstore/handler.py:209 +#, python-format +msgid "List keys for bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:217 +#, python-format +msgid "Unauthorized attempt to access bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:235 +#, python-format +msgid "Creating bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:245 +#, python-format +msgid "Deleting bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:249 +#, python-format +msgid "Unauthorized attempt to delete bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:271 +#, python-format +msgid "Getting object: %s / %s" +msgstr "Получение объекта: %s / %s" + +#: nova/objectstore/handler.py:274 +#, python-format +msgid "Unauthorized attempt to get object %s from bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:292 +#, python-format +msgid "Putting object: %s / %s" +msgstr "Вставка объекта: %s / %s" + +#: nova/objectstore/handler.py:295 +#, python-format +msgid "Unauthorized attempt to upload object %s to bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:314 +#, python-format +msgid "Deleting object: %s / %s" +msgstr "Удаление объекта: %s / %s" + +#: nova/objectstore/handler.py:393 +#, python-format +msgid "Not authorized to upload image: invalid directory %s" +msgstr "" + +#: nova/objectstore/handler.py:401 +#, python-format +msgid "Not authorized to upload image: unauthorized bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:406 +#, python-format +msgid "Starting image upload: %s" +msgstr "" + +#: nova/objectstore/handler.py:420 +#, python-format +msgid "Not authorized to update attributes of image %s" +msgstr "" + +#: nova/objectstore/handler.py:428 +#, python-format +msgid "Toggling publicity flag of image %s %r" +msgstr "" + +#: nova/objectstore/handler.py:433 +#, python-format +msgid "Updating user fields on image %s" +msgstr "" + +#: nova/objectstore/handler.py:447 +#, python-format +msgid "Unauthorized attempt to delete image %s" +msgstr "" + +#: nova/objectstore/handler.py:452 +#, python-format +msgid "Deleted image: %s" +msgstr "Удаленное изображение: %s" + +#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 +#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 +msgid "No hosts found" +msgstr "" + +#: nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "" + +#: nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %s %s for %s" +msgstr "" + +#: nova/scheduler/simple.py:63 +msgid "All hosts have too many cores" +msgstr "" + +#: nova/scheduler/simple.py:95 +msgid "All hosts have too many gigabytes" +msgstr "" + +#: nova/scheduler/simple.py:115 +msgid "All hosts have too many networks" +msgstr "" + +#: nova/tests/test_cloud.py:198 +msgid "Can't test instances without a real virtual env." +msgstr "" + +#: nova/tests/test_cloud.py:210 +#, python-format +msgid "Need to watch instance %s until it's running..." +msgstr "" + +#: nova/tests/test_compute.py:104 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: nova/tests/test_compute.py:110 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: nova/tests/test_rpc.py:89 +#, python-format +msgid "Nested received %s, %s" +msgstr "" + +#: nova/tests/test_rpc.py:94 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#, python-format +msgid "Received %s" +msgstr "Получено %s" + +#: nova/tests/test_volume.py:162 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: nova/virt/fake.py:210 +#, python-format +msgid "Instance %s Not Found" +msgstr "" + +#: nova/virt/hyperv.py:118 +msgid "In init host" +msgstr "" + +#: nova/virt/hyperv.py:131 +#, python-format +msgid "Attempt to create duplicate vm %s" +msgstr "" + +#: nova/virt/hyperv.py:148 +#, python-format +msgid "Starting VM %s " +msgstr "Запускается VM %s " + +#: nova/virt/hyperv.py:150 +#, python-format +msgid "Started VM %s " +msgstr "Запущен VM %s " + +#: nova/virt/hyperv.py:152 +#, python-format +msgid "spawn vm failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:169 +#, python-format +msgid "Failed to create VM %s" +msgstr "" + +#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 +#, python-format +msgid "Created VM %s..." +msgstr "" + +#: nova/virt/hyperv.py:188 +#, python-format +msgid "Set memory for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:198 +#, python-format +msgid "Set vcpus for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:202 +#, python-format +msgid "Creating disk for %s by attaching disk file %s" +msgstr "" + +#: nova/virt/hyperv.py:227 +#, python-format +msgid "Failed to add diskdrive to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:230 +#, python-format +msgid "New disk drive path is %s" +msgstr "" + +#: nova/virt/hyperv.py:247 +#, python-format +msgid "Failed to add vhd file to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:249 +#, python-format +msgid "Created disk for %s" +msgstr "Создан диск для %s" + +#: nova/virt/hyperv.py:253 +#, python-format +msgid "Creating nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:272 +msgid "Failed creating a port on the external vswitch" +msgstr "" + +#: nova/virt/hyperv.py:273 +#, python-format +msgid "Failed creating port for %s" +msgstr "" + +#: nova/virt/hyperv.py:275 +#, python-format +msgid "Created switch port %s on switch %s" +msgstr "" + +#: nova/virt/hyperv.py:285 +#, python-format +msgid "Failed to add nic to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:287 +#, python-format +msgid "Created nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:320 +#, python-format +msgid "WMI job failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:322 +#, python-format +msgid "WMI job succeeded: %s, Elapsed=%s " +msgstr "" + +#: nova/virt/hyperv.py:358 +#, python-format +msgid "Got request to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:383 +#, python-format +msgid "Failed to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:389 +#, python-format +msgid "Del: disk %s vm %s" +msgstr "" + +#: nova/virt/hyperv.py:405 +#, python-format +msgid "" +"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " +"cpu_time=%s" +msgstr "" + +#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#, python-format +msgid "duplicate name found: %s" +msgstr "" + +#: nova/virt/hyperv.py:444 +#, python-format +msgid "Successfully changed vm state of %s to %s" +msgstr "" + +#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#, python-format +msgid "Failed to change vm state of %s to %s" +msgstr "" + +#: nova/virt/images.py:70 +#, python-format +msgid "Finished retreving %s -- placed in %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:144 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:157 +msgid "Connection to libvirt broke" +msgstr "" + +#: nova/virt/libvirt_conn.py:229 +#, python-format +msgid "instance %s: deleting instance files %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:271 +#, python-format +msgid "No disk at %s" +msgstr "Нет диска в %s" + +#: nova/virt/libvirt_conn.py:278 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "" + +#: nova/virt/libvirt_conn.py:294 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: nova/virt/libvirt_conn.py:297 +#, python-format +msgid "_wait_for_reboot failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:340 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: nova/virt/libvirt_conn.py:343 +#, python-format +msgid "_wait_for_rescue failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:370 +#, python-format +msgid "instance %s: is running" +msgstr "" + +#: nova/virt/libvirt_conn.py:381 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 +#, python-format +msgid "instance %s: failed to boot" +msgstr "" + +#: nova/virt/libvirt_conn.py:395 +#, python-format +msgid "virsh said: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:399 +msgid "cool, it's a device" +msgstr "" + +#: nova/virt/libvirt_conn.py:407 +#, python-format +msgid "data: %r, fpath: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:415 +#, python-format +msgid "Contents of file %s: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:449 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: nova/virt/libvirt_conn.py:505 +#, python-format +msgid "instance %s: injecting key into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:508 +#, python-format +msgid "instance %s: injecting net into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:516 +#, python-format +msgid "instance %s: ignoring error injecting data into image %s (%s)" +msgstr "" + +#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: nova/virt/libvirt_conn.py:589 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: nova/virt/xenapi_conn.py:113 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" +msgstr "" + +#: nova/virt/xenapi_conn.py:263 +#, python-format +msgid "Task [%s] %s status: success %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:271 +#, python-format +msgid "Task [%s] %s status: %s %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:72 +#, python-format +msgid "%s: _db_content => %s" +msgstr "%s: _db_content => %s" + +#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 +#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 +msgid "Raising NotImplemented" +msgstr "" + +#: nova/virt/xenapi/fake.py:249 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:283 +#, python-format +msgid "Calling %s %s" +msgstr "Звонок %s %s" + +#: nova/virt/xenapi/fake.py:288 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:340 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:127 +#, python-format +msgid "Created VM %s as %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:147 +#, python-format +msgid "Creating VBD for VM %s, VDI %s ... " +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:149 +#, python-format +msgid "Created VBD %s for VM %s, VDI %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:165 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:175 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:202 +#, python-format +msgid "Creating VIF for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:205 +#, python-format +msgid "Created VIF %s for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:216 +#, python-format +msgid "Snapshotting VM %s with label '%s'..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:229 +#, python-format +msgid "Created snapshot %s from VM %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:243 +#, python-format +msgid "Asking xapi to upload %s as '%s'" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:261 +#, python-format +msgid "Asking xapi to fetch %s as %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:279 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:290 +#, python-format +msgid "PV Kernel in VDI:%d" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:318 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:331 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:333 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:390 +#, python-format +msgid "VHD %s has parent %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:407 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:448 +#, python-format +msgid "No VDIs found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:452 +#, python-format +msgid "Unexpected number of VDIs (%s) found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:62 +#, python-format +msgid "Attempted to create non-unique name %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:99 +#, python-format +msgid "Starting VM %s..." +msgstr "" + +#: nova/virt/xenapi/vmops.py:101 +#, python-format +msgid "Spawning VM %s created %s." +msgstr "" + +#: nova/virt/xenapi/vmops.py:112 +#, python-format +msgid "Instance %s: booted" +msgstr "" + +#: nova/virt/xenapi/vmops.py:137 +#, python-format +msgid "Instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:166 +#, python-format +msgid "Starting snapshot for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:174 +#, python-format +msgid "Unable to Snapshot %s: %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:184 +#, python-format +msgid "Finished snapshot and upload for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:252 +#, python-format +msgid "suspend: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:262 +#, python-format +msgid "resume: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:271 +#, python-format +msgid "Instance not found %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:57 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:74 +#, python-format +msgid "Introduced %s as %s." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:90 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:96 +#, python-format +msgid "Forgetting SR %s ... " +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:101 +#, python-format +msgid "Ignoring exception %s when getting PBDs for %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:107 +#, python-format +msgid "Ignoring exception %s when unplugging PBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:111 +#, python-format +msgid "Forgetting SR %s done." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:113 +#, python-format +msgid "Ignoring exception %s when forgetting SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:123 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:128 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:146 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:175 +#, python-format +msgid "Unable to obtain target information %s, %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:197 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %s, %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Unable to use SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:95 +#, python-format +msgid "Mountpoint %s attached to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:106 +#, python-format +msgid "Detach_volume: %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:113 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:128 +#, python-format +msgid "Mountpoint %s detached from instance %s" +msgstr "" + +#: nova/volume/api.py:44 +#, python-format +msgid "Quota exceeeded for %s, tried to create %sG volume" +msgstr "" + +#: nova/volume/api.py:46 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %s" +msgstr "" + +#: nova/volume/api.py:70 nova/volume/api.py:95 +msgid "Volume status must be available" +msgstr "" + +#: nova/volume/api.py:97 +msgid "Volume is already attached" +msgstr "" + +#: nova/volume/api.py:103 +msgid "Volume is already detached" +msgstr "" + +#: nova/volume/driver.py:76 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: nova/volume/driver.py:85 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: nova/volume/driver.py:210 +#, python-format +msgid "FAKE AOE: %s" +msgstr "" + +#: nova/volume/driver.py:315 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: nova/volume/manager.py:85 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: nova/volume/manager.py:93 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: nova/volume/manager.py:102 +#, python-format +msgid "volume %s: creating lv of size %sG" +msgstr "" + +#: nova/volume/manager.py:106 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: nova/volume/manager.py:113 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: nova/volume/manager.py:121 +msgid "Volume is still attached" +msgstr "" + +#: nova/volume/manager.py:123 +msgid "Volume is not local to this node" +msgstr "" + +#: nova/volume/manager.py:124 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: nova/volume/manager.py:126 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: nova/volume/manager.py:129 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" diff --git a/po/uk.po b/po/uk.po new file mode 100644 index 000000000..f3e217690 --- /dev/null +++ b/po/uk.po @@ -0,0 +1,2135 @@ +# Ukrainian translation for nova +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the nova package. +# FIRST AUTHOR <EMAIL@ADDRESS>, 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: nova\n" +"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n" +"POT-Creation-Date: 2011-01-10 11:25-0800\n" +"PO-Revision-Date: 2011-02-03 22:02+0000\n" +"Last-Translator: Wladimir Rossinski <Unknown>\n" +"Language-Team: Ukrainian <uk@li.org>\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Launchpad-Export-Date: 2011-02-05 05:36+0000\n" +"X-Generator: Launchpad (build 12177)\n" + +#: nova/crypto.py:46 +msgid "Filename of root CA" +msgstr "" + +#: nova/crypto.py:49 +msgid "Filename of private key" +msgstr "Ім'я файлу секретного ключа" + +#: nova/crypto.py:51 +msgid "Filename of root Certificate Revokation List" +msgstr "" + +#: nova/crypto.py:53 +msgid "Where we keep our keys" +msgstr "Шлях до збережених ключів" + +#: nova/crypto.py:55 +msgid "Where we keep our root CA" +msgstr "" + +#: nova/crypto.py:57 +msgid "Should we use a CA for each project?" +msgstr "" + +#: nova/crypto.py:61 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" + +#: nova/crypto.py:66 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "" + +#: nova/crypto.py:71 +#, python-format +msgid "Subject for certificate for vpns, %s for project, timestamp" +msgstr "" + +#: nova/crypto.py:258 +#, python-format +msgid "Flags path: %s" +msgstr "" + +#: nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "Неочікувана помилка при виконанні команди." + +#: nova/exception.py:36 +#, python-format +msgid "" +"%s\n" +"Command: %s\n" +"Exit code: %s\n" +"Stdout: %r\n" +"Stderr: %r" +msgstr "" +"%s\n" +"Команда: %s\n" +"Код завершення: %s\n" +"Stdout: %r\n" +"Stderr: %r" + +#: nova/exception.py:86 +msgid "Uncaught exception" +msgstr "Необроблене виключення" + +#: nova/fakerabbit.py:48 +#, python-format +msgid "(%s) publish (key: %s) %s" +msgstr "" + +#: nova/fakerabbit.py:53 +#, python-format +msgid "Publishing to route %s" +msgstr "" + +#: nova/fakerabbit.py:83 +#, python-format +msgid "Declaring queue %s" +msgstr "Оголошення черги %s" + +#: nova/fakerabbit.py:89 +#, python-format +msgid "Declaring exchange %s" +msgstr "Оголошення точки обміну %s" + +#: nova/fakerabbit.py:95 +#, python-format +msgid "Binding %s to %s with key %s" +msgstr "" + +#: nova/fakerabbit.py:120 +#, python-format +msgid "Getting from %s: %s" +msgstr "Отримання з %s: %s" + +#: nova/rpc.py:92 +#, python-format +msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +msgstr "AMQP сервер %s:%d недоступний. Спроба під'єднання через %d секунд." + +#: nova/rpc.py:99 +#, python-format +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "Не вдалось під'єднатися до серверу AMQP після %d спроб. Вимкнення." + +#: nova/rpc.py:118 +msgid "Reconnected to queue" +msgstr "Оновлено з'єднання до черги" + +#: nova/rpc.py:125 +msgid "Failed to fetch message from queue" +msgstr "" + +#: nova/rpc.py:155 +#, python-format +msgid "Initing the Adapter Consumer for %s" +msgstr "" + +#: nova/rpc.py:170 +#, python-format +msgid "received %s" +msgstr "отримано %s" + +#: nova/rpc.py:183 +#, python-format +msgid "no method for message: %s" +msgstr "без порядку для повідомлень: %s" + +#: nova/rpc.py:184 +#, python-format +msgid "No method for message: %s" +msgstr "Без порядку для повідомлень: %s" + +#: nova/rpc.py:245 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: nova/rpc.py:286 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: nova/rpc.py:305 +msgid "Making asynchronous call..." +msgstr "Створення асинхронного виклику..." + +#: nova/rpc.py:308 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID %s" + +#: nova/rpc.py:356 +#, python-format +msgid "response %s" +msgstr "відповідь %s" + +#: nova/rpc.py:365 +#, python-format +msgid "topic is %s" +msgstr "заголовок %s" + +#: nova/rpc.py:366 +#, python-format +msgid "message %s" +msgstr "повідомлення %s" + +#: nova/service.py:157 +#, python-format +msgid "Starting %s node" +msgstr "" + +#: nova/service.py:169 +msgid "Service killed that has no database entry" +msgstr "" + +#: nova/service.py:190 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: nova/service.py:202 +msgid "Recovered model server connection!" +msgstr "" + +#: nova/service.py:208 +msgid "model server went away" +msgstr "" + +#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#, python-format +msgid "Data store %s is unreachable. Trying again in %d seconds." +msgstr "" + +#: nova/service.py:232 nova/twistd.py:232 +#, python-format +msgid "Serving %s" +msgstr "Обслуговування %s" + +#: nova/service.py:234 nova/twistd.py:264 +msgid "Full set of FLAGS:" +msgstr "" + +#: nova/twistd.py:211 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "" + +#: nova/twistd.py:268 +#, python-format +msgid "Starting %s" +msgstr "Запускається %s" + +#: nova/utils.py:53 +#, python-format +msgid "Inner Exception: %s" +msgstr "" + +#: nova/utils.py:54 +#, python-format +msgid "Class %s cannot be found" +msgstr "" + +#: nova/utils.py:113 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: nova/utils.py:125 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: nova/utils.py:138 +#, python-format +msgid "Result was %s" +msgstr "" + +#: nova/utils.py:171 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: nova/utils.py:176 +#, python-format +msgid "Running %s" +msgstr "Запускається %s" + +#: nova/utils.py:207 +#, python-format +msgid "Couldn't get IP, using 127.0.0.1 %s" +msgstr "Не вдалось отримати IP, використовуючи 127.0.0.1 %s" + +#: nova/utils.py:289 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: nova/utils.py:300 +#, python-format +msgid "backend %s" +msgstr "" + +#: nova/api/ec2/__init__.py:133 +msgid "Too many failed authentications." +msgstr "Занадто багато невдалих аутентифікацій." + +#: nova/api/ec2/__init__.py:142 +#, python-format +msgid "" +"Access key %s has had %d failed authentications and will be locked out for " +"%d minutes." +msgstr "" + +#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#, python-format +msgid "Authentication Failure: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:190 +#, python-format +msgid "Authenticated Request For %s:%s)" +msgstr "" + +#: nova/api/ec2/__init__.py:227 +#, python-format +msgid "action: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:229 +#, python-format +msgid "arg: %s\t\tval: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:301 +#, python-format +msgid "Unauthorized request for controller=%s and action=%s" +msgstr "" + +#: nova/api/ec2/__init__.py:339 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:342 +#, python-format +msgid "ApiError raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:349 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:354 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: nova/api/ec2/admin.py:84 +#, python-format +msgid "Creating new user: %s" +msgstr "" + +#: nova/api/ec2/admin.py:92 +#, python-format +msgid "Deleting user: %s" +msgstr "" + +#: nova/api/ec2/admin.py:114 +#, python-format +msgid "Adding role %s to user %s for project %s" +msgstr "" + +#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#, python-format +msgid "Adding sitewide role %s to user %s" +msgstr "" + +#: nova/api/ec2/admin.py:122 +#, python-format +msgid "Removing role %s from user %s for project %s" +msgstr "" + +#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#, python-format +msgid "Removing sitewide role %s from user %s" +msgstr "" + +#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 +msgid "operation must be add or remove" +msgstr "" + +#: nova/api/ec2/admin.py:142 +#, python-format +msgid "Getting x509 for user: %s on project: %s" +msgstr "" + +#: nova/api/ec2/admin.py:159 +#, python-format +msgid "Create project %s managed by %s" +msgstr "" + +#: nova/api/ec2/admin.py:170 +#, python-format +msgid "Delete project: %s" +msgstr "Вилучити проект: %s" + +#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#, python-format +msgid "Adding user %s to project %s" +msgstr "Долучення користувача %s до проекту %s" + +#: nova/api/ec2/admin.py:188 +#, python-format +msgid "Removing user %s from project %s" +msgstr "Вилучення користувача %s з проекту %s" + +#: nova/api/ec2/apirequest.py:95 +#, python-format +msgid "Unsupported API request: controller = %s,action = %s" +msgstr "" + +#: nova/api/ec2/cloud.py:117 +#, python-format +msgid "Generating root CA: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:277 +#, python-format +msgid "Create key pair %s" +msgstr "" + +#: nova/api/ec2/cloud.py:285 +#, python-format +msgid "Delete key pair %s" +msgstr "" + +#: nova/api/ec2/cloud.py:357 +#, python-format +msgid "%s is not a valid ipProtocol" +msgstr "%s не допустимий ipProtocol" + +#: nova/api/ec2/cloud.py:361 +msgid "Invalid port range" +msgstr "Невірний діапазон портів" + +#: nova/api/ec2/cloud.py:392 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 +msgid "No rule for the specified parameters." +msgstr "" + +#: nova/api/ec2/cloud.py:421 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: nova/api/ec2/cloud.py:432 +#, python-format +msgid "This rule already exists in group %s" +msgstr "Це правило вже існує в групі %s" + +#: nova/api/ec2/cloud.py:460 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:463 +#, python-format +msgid "group %s already exists" +msgstr "" + +#: nova/api/ec2/cloud.py:475 +#, python-format +msgid "Delete security group %s" +msgstr "Вилучити групу безпеки %s" + +#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 +#, python-format +msgid "Get console output for instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:543 +#, python-format +msgid "Create volume of %s GB" +msgstr "Створити розділ на %s ГБ" + +#: nova/api/ec2/cloud.py:567 +#, python-format +msgid "Attach volume %s to instacne %s at %s" +msgstr "" + +#: nova/api/ec2/cloud.py:579 +#, python-format +msgid "Detach volume %s" +msgstr "Від'єднати том %s" + +#: nova/api/ec2/cloud.py:686 +msgid "Allocate address" +msgstr "" + +#: nova/api/ec2/cloud.py:691 +#, python-format +msgid "Release address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:696 +#, python-format +msgid "Associate address %s to instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:703 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:730 +msgid "Going to start terminating instances" +msgstr "" + +#: nova/api/ec2/cloud.py:738 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: nova/api/ec2/cloud.py:775 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: nova/api/ec2/cloud.py:783 +#, python-format +msgid "Registered image %s with id %s" +msgstr "" + +#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#, python-format +msgid "attribute not supported: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:794 +#, python-format +msgid "invalid id: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:807 +msgid "user or group not specified" +msgstr "" + +#: nova/api/ec2/cloud.py:809 +msgid "only group \"all\" is supported" +msgstr "лише група \"всі\" підтримується" + +#: nova/api/ec2/cloud.py:811 +msgid "operation_type must be add or remove" +msgstr "" + +#: nova/api/ec2/cloud.py:812 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: nova/api/ec2/metadatarequesthandler.py:75 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:70 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:86 +msgid "Including admin operations in API." +msgstr "" + +#: nova/api/openstack/servers.py:184 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:199 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: nova/api/openstack/servers.py:213 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:224 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: nova/api/openstack/servers.py:235 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: nova/api/openstack/servers.py:246 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: nova/api/openstack/servers.py:257 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" +msgstr "Користувач %s вже існує" + +#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "" + +#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#, python-format +msgid "Project can't be created because project %s already exists" +msgstr "" + +#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#, python-format +msgid "Project can't be modified because manager %s doesn't exist" +msgstr "" + +#: nova/auth/dbdriver.py:245 +#, python-format +msgid "User \"%s\" not found" +msgstr "Користувач \"%s\" не знайдено" + +#: nova/auth/dbdriver.py:248 +#, python-format +msgid "Project \"%s\" not found" +msgstr "Проект \"%s\" не знайдено" + +#: nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: nova/auth/ldapdriver.py:181 +#, python-format +msgid "LDAP object for %s doesn't exist" +msgstr "" + +#: nova/auth/ldapdriver.py:218 +#, python-format +msgid "Project can't be created because user %s doesn't exist" +msgstr "" + +#: nova/auth/ldapdriver.py:478 +#, python-format +msgid "User %s is already a member of the group %s" +msgstr "" + +#: nova/auth/ldapdriver.py:507 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: nova/auth/ldapdriver.py:528 +#, python-format +msgid "Group at dn %s doesn't exist" +msgstr "" + +#: nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" +msgstr "" + +#: nova/auth/manager.py:270 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: nova/auth/manager.py:275 +#, python-format +msgid "failed authorization: no project named %s (user=%s)" +msgstr "" + +#: nova/auth/manager.py:277 +#, python-format +msgid "No project called %s could be found" +msgstr "" + +#: nova/auth/manager.py:281 +#, python-format +msgid "Failed authorization: user %s not admin and not member of project %s" +msgstr "" + +#: nova/auth/manager.py:283 +#, python-format +msgid "User %s is not a member of project %s" +msgstr "" + +#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: nova/auth/manager.py:293 nova/auth/manager.py:304 +msgid "Signature does not match" +msgstr "" + +#: nova/auth/manager.py:374 +msgid "Must specify project" +msgstr "" + +#: nova/auth/manager.py:408 +#, python-format +msgid "The %s role can not be found" +msgstr "" + +#: nova/auth/manager.py:410 +#, python-format +msgid "The %s role is global only" +msgstr "" + +#: nova/auth/manager.py:412 +#, python-format +msgid "Adding role %s to user %s in project %s" +msgstr "" + +#: nova/auth/manager.py:438 +#, python-format +msgid "Removing role %s from user %s on project %s" +msgstr "" + +#: nova/auth/manager.py:505 +#, python-format +msgid "Created project %s with manager %s" +msgstr "" + +#: nova/auth/manager.py:523 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: nova/auth/manager.py:553 +#, python-format +msgid "Remove user %s from project %s" +msgstr "" + +#: nova/auth/manager.py:581 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: nova/auth/manager.py:637 +#, python-format +msgid "Created user %s (admin: %r)" +msgstr "" + +#: nova/auth/manager.py:645 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: nova/auth/manager.py:655 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:657 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:659 +#, python-format +msgid "Admin status set to %r for user %s" +msgstr "" + +#: nova/auth/manager.py:708 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "" + +#: nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:97 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: nova/compute/api.py:67 +#, python-format +msgid "Instance %d was not found in get_network_topic" +msgstr "" + +#: nova/compute/api.py:73 +#, python-format +msgid "Instance %d has no host" +msgstr "" + +#: nova/compute/api.py:92 +#, python-format +msgid "Quota exceeeded for %s, tried to run %s instances" +msgstr "" + +#: nova/compute/api.py:94 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." +msgstr "" + +#: nova/compute/api.py:109 +msgid "Creating a raw instance" +msgstr "" + +#: nova/compute/api.py:156 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: nova/compute/api.py:180 +#, python-format +msgid "Casting to scheduler for %s/%s's instance %s" +msgstr "" + +#: nova/compute/api.py:279 +#, python-format +msgid "Going to try and terminate %s" +msgstr "" + +#: nova/compute/api.py:283 +#, python-format +msgid "Instance %d was not found during terminate" +msgstr "" + +#: nova/compute/api.py:288 +#, python-format +msgid "Instance %d is already being terminated" +msgstr "" + +#: nova/compute/api.py:450 +#, python-format +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "" + +#: nova/compute/api.py:465 +msgid "Volume isn't attached to anything!" +msgstr "" + +#: nova/compute/disk.py:71 +#, python-format +msgid "Input partition size not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:75 +#, python-format +msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:128 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: nova/compute/disk.py:136 +#, python-format +msgid "Failed to load partition: %s" +msgstr "" + +#: nova/compute/disk.py:158 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "" + +#: nova/compute/manager.py:69 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: nova/compute/manager.py:71 +#, python-format +msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +msgstr "" + +#: nova/compute/manager.py:75 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: nova/compute/manager.py:77 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: nova/compute/manager.py:82 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: nova/compute/manager.py:86 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: nova/compute/manager.py:157 +msgid "Instance has already been created" +msgstr "" + +#: nova/compute/manager.py:158 +#, python-format +msgid "instance %s: starting..." +msgstr "" + +#: nova/compute/manager.py:197 +#, python-format +msgid "instance %s: Failed to spawn" +msgstr "" + +#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 +#, python-format +msgid "Terminating instance %s" +msgstr "" + +#: nova/compute/manager.py:217 +#, python-format +msgid "Disassociating address %s" +msgstr "" + +#: nova/compute/manager.py:230 +#, python-format +msgid "Deallocating address %s" +msgstr "" + +#: nova/compute/manager.py:243 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "" + +#: nova/compute/manager.py:257 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: nova/compute/manager.py:260 +#, python-format +msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:286 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: nova/compute/manager.py:289 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:301 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: nova/compute/manager.py:316 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: nova/compute/manager.py:335 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: nova/compute/manager.py:352 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: nova/compute/manager.py:369 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: nova/compute/manager.py:382 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: nova/compute/manager.py:401 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: nova/compute/manager.py:420 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: nova/compute/manager.py:432 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: nova/compute/manager.py:442 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: nova/compute/manager.py:462 +#, python-format +msgid "instance %s: attaching volume %s to %s" +msgstr "" + +#: nova/compute/manager.py:478 +#, python-format +msgid "instance %s: attach failed %s, removing" +msgstr "" + +#: nova/compute/manager.py:493 +#, python-format +msgid "Detach volume %s from mountpoint %s on instance %s" +msgstr "" + +#: nova/compute/manager.py:497 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: nova/compute/monitor.py:259 +#, python-format +msgid "updating %s..." +msgstr "" + +#: nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "" + +#: nova/compute/monitor.py:355 +#, python-format +msgid "Cannot get blockstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:377 +#, python-format +msgid "Cannot get ifstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:412 +msgid "unexpected exception getting connection" +msgstr "" + +#: nova/compute/monitor.py:427 +#, python-format +msgid "Found instance: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:43 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: nova/db/sqlalchemy/api.py:132 +#, python-format +msgid "No service for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:229 +#, python-format +msgid "No service for %s, %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:574 +#, python-format +msgid "No floating ip for address %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:668 +#, python-format +msgid "No instance for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 +#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#, python-format +msgid "Instance %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:891 +#, python-format +msgid "no keypair for user %s, name %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 +#, python-format +msgid "No network for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1036 +#, python-format +msgid "No network for bridge %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1050 +#, python-format +msgid "No network for instance %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1180 +#, python-format +msgid "Token %s does not exist" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1205 +#, python-format +msgid "No quota for project_id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1356 +#, python-format +msgid "No volume for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1401 +#, python-format +msgid "Volume %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1413 +#, python-format +msgid "No export device found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1426 +#, python-format +msgid "No target id found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1471 +#, python-format +msgid "No security group with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1488 +#, python-format +msgid "No security group named %s for project: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1576 +#, python-format +msgid "No secuity group rule with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1650 +#, python-format +msgid "No user for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1666 +#, python-format +msgid "No user for access key %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1728 +#, python-format +msgid "No project with id %s" +msgstr "" + +#: nova/image/glance.py:78 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images" +msgstr "" + +#: nova/image/glance.py:97 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images/detail" +msgstr "" + +#: nova/image/s3.py:82 +#, python-format +msgid "Image %s could not be found" +msgstr "" + +#: nova/network/api.py:39 +#, python-format +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "" + +#: nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" + +#: nova/network/linux_net.py:176 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: nova/network/linux_net.py:186 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#: nova/network/linux_net.py:254 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: nova/network/linux_net.py:256 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: nova/network/linux_net.py:334 +#, python-format +msgid "Killing dnsmasq threw %s" +msgstr "" + +#: nova/network/manager.py:135 +msgid "setting network host" +msgstr "" + +#: nova/network/manager.py:190 +#, python-format +msgid "Leasing IP %s" +msgstr "" + +#: nova/network/manager.py:194 +#, python-format +msgid "IP %s leased that isn't associated" +msgstr "" + +#: nova/network/manager.py:197 +#, python-format +msgid "IP %s leased to bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:205 +#, python-format +msgid "IP %s leased that was already deallocated" +msgstr "" + +#: nova/network/manager.py:214 +#, python-format +msgid "IP %s released that isn't associated" +msgstr "" + +#: nova/network/manager.py:217 +#, python-format +msgid "IP %s released from bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:220 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: nova/network/manager.py:442 +#, python-format +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "" + +#: nova/objectstore/handler.py:106 +#, python-format +msgid "Unknown S3 value type %r" +msgstr "" + +#: nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "" + +#: nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "" + +#: nova/objectstore/handler.py:209 +#, python-format +msgid "List keys for bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:217 +#, python-format +msgid "Unauthorized attempt to access bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:235 +#, python-format +msgid "Creating bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:245 +#, python-format +msgid "Deleting bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:249 +#, python-format +msgid "Unauthorized attempt to delete bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:271 +#, python-format +msgid "Getting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:274 +#, python-format +msgid "Unauthorized attempt to get object %s from bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:292 +#, python-format +msgid "Putting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:295 +#, python-format +msgid "Unauthorized attempt to upload object %s to bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:314 +#, python-format +msgid "Deleting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:393 +#, python-format +msgid "Not authorized to upload image: invalid directory %s" +msgstr "" + +#: nova/objectstore/handler.py:401 +#, python-format +msgid "Not authorized to upload image: unauthorized bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:406 +#, python-format +msgid "Starting image upload: %s" +msgstr "" + +#: nova/objectstore/handler.py:420 +#, python-format +msgid "Not authorized to update attributes of image %s" +msgstr "" + +#: nova/objectstore/handler.py:428 +#, python-format +msgid "Toggling publicity flag of image %s %r" +msgstr "" + +#: nova/objectstore/handler.py:433 +#, python-format +msgid "Updating user fields on image %s" +msgstr "" + +#: nova/objectstore/handler.py:447 +#, python-format +msgid "Unauthorized attempt to delete image %s" +msgstr "" + +#: nova/objectstore/handler.py:452 +#, python-format +msgid "Deleted image: %s" +msgstr "" + +#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 +#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 +msgid "No hosts found" +msgstr "" + +#: nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "" + +#: nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %s %s for %s" +msgstr "" + +#: nova/scheduler/simple.py:63 +msgid "All hosts have too many cores" +msgstr "" + +#: nova/scheduler/simple.py:95 +msgid "All hosts have too many gigabytes" +msgstr "" + +#: nova/scheduler/simple.py:115 +msgid "All hosts have too many networks" +msgstr "" + +#: nova/tests/test_cloud.py:198 +msgid "Can't test instances without a real virtual env." +msgstr "" + +#: nova/tests/test_cloud.py:210 +#, python-format +msgid "Need to watch instance %s until it's running..." +msgstr "" + +#: nova/tests/test_compute.py:104 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: nova/tests/test_compute.py:110 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: nova/tests/test_rpc.py:89 +#, python-format +msgid "Nested received %s, %s" +msgstr "" + +#: nova/tests/test_rpc.py:94 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#, python-format +msgid "Received %s" +msgstr "" + +#: nova/tests/test_volume.py:162 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: nova/virt/fake.py:210 +#, python-format +msgid "Instance %s Not Found" +msgstr "" + +#: nova/virt/hyperv.py:118 +msgid "In init host" +msgstr "" + +#: nova/virt/hyperv.py:131 +#, python-format +msgid "Attempt to create duplicate vm %s" +msgstr "" + +#: nova/virt/hyperv.py:148 +#, python-format +msgid "Starting VM %s " +msgstr "" + +#: nova/virt/hyperv.py:150 +#, python-format +msgid "Started VM %s " +msgstr "" + +#: nova/virt/hyperv.py:152 +#, python-format +msgid "spawn vm failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:169 +#, python-format +msgid "Failed to create VM %s" +msgstr "" + +#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 +#, python-format +msgid "Created VM %s..." +msgstr "" + +#: nova/virt/hyperv.py:188 +#, python-format +msgid "Set memory for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:198 +#, python-format +msgid "Set vcpus for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:202 +#, python-format +msgid "Creating disk for %s by attaching disk file %s" +msgstr "" + +#: nova/virt/hyperv.py:227 +#, python-format +msgid "Failed to add diskdrive to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:230 +#, python-format +msgid "New disk drive path is %s" +msgstr "" + +#: nova/virt/hyperv.py:247 +#, python-format +msgid "Failed to add vhd file to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:249 +#, python-format +msgid "Created disk for %s" +msgstr "" + +#: nova/virt/hyperv.py:253 +#, python-format +msgid "Creating nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:272 +msgid "Failed creating a port on the external vswitch" +msgstr "" + +#: nova/virt/hyperv.py:273 +#, python-format +msgid "Failed creating port for %s" +msgstr "" + +#: nova/virt/hyperv.py:275 +#, python-format +msgid "Created switch port %s on switch %s" +msgstr "" + +#: nova/virt/hyperv.py:285 +#, python-format +msgid "Failed to add nic to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:287 +#, python-format +msgid "Created nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:320 +#, python-format +msgid "WMI job failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:322 +#, python-format +msgid "WMI job succeeded: %s, Elapsed=%s " +msgstr "" + +#: nova/virt/hyperv.py:358 +#, python-format +msgid "Got request to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:383 +#, python-format +msgid "Failed to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:389 +#, python-format +msgid "Del: disk %s vm %s" +msgstr "" + +#: nova/virt/hyperv.py:405 +#, python-format +msgid "" +"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " +"cpu_time=%s" +msgstr "" + +#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#, python-format +msgid "duplicate name found: %s" +msgstr "" + +#: nova/virt/hyperv.py:444 +#, python-format +msgid "Successfully changed vm state of %s to %s" +msgstr "" + +#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#, python-format +msgid "Failed to change vm state of %s to %s" +msgstr "" + +#: nova/virt/images.py:70 +#, python-format +msgid "Finished retreving %s -- placed in %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:144 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:157 +msgid "Connection to libvirt broke" +msgstr "" + +#: nova/virt/libvirt_conn.py:229 +#, python-format +msgid "instance %s: deleting instance files %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:271 +#, python-format +msgid "No disk at %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:278 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "" + +#: nova/virt/libvirt_conn.py:294 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: nova/virt/libvirt_conn.py:297 +#, python-format +msgid "_wait_for_reboot failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:340 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: nova/virt/libvirt_conn.py:343 +#, python-format +msgid "_wait_for_rescue failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:370 +#, python-format +msgid "instance %s: is running" +msgstr "" + +#: nova/virt/libvirt_conn.py:381 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 +#, python-format +msgid "instance %s: failed to boot" +msgstr "" + +#: nova/virt/libvirt_conn.py:395 +#, python-format +msgid "virsh said: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:399 +msgid "cool, it's a device" +msgstr "" + +#: nova/virt/libvirt_conn.py:407 +#, python-format +msgid "data: %r, fpath: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:415 +#, python-format +msgid "Contents of file %s: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:449 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: nova/virt/libvirt_conn.py:505 +#, python-format +msgid "instance %s: injecting key into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:508 +#, python-format +msgid "instance %s: injecting net into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:516 +#, python-format +msgid "instance %s: ignoring error injecting data into image %s (%s)" +msgstr "" + +#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: nova/virt/libvirt_conn.py:589 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: nova/virt/xenapi_conn.py:113 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" +msgstr "" + +#: nova/virt/xenapi_conn.py:263 +#, python-format +msgid "Task [%s] %s status: success %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:271 +#, python-format +msgid "Task [%s] %s status: %s %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:72 +#, python-format +msgid "%s: _db_content => %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 +#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 +msgid "Raising NotImplemented" +msgstr "" + +#: nova/virt/xenapi/fake.py:249 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:283 +#, python-format +msgid "Calling %s %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:288 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:340 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:127 +#, python-format +msgid "Created VM %s as %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:147 +#, python-format +msgid "Creating VBD for VM %s, VDI %s ... " +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:149 +#, python-format +msgid "Created VBD %s for VM %s, VDI %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:165 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:175 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:202 +#, python-format +msgid "Creating VIF for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:205 +#, python-format +msgid "Created VIF %s for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:216 +#, python-format +msgid "Snapshotting VM %s with label '%s'..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:229 +#, python-format +msgid "Created snapshot %s from VM %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:243 +#, python-format +msgid "Asking xapi to upload %s as '%s'" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:261 +#, python-format +msgid "Asking xapi to fetch %s as %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:279 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:290 +#, python-format +msgid "PV Kernel in VDI:%d" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:318 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:331 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:333 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:390 +#, python-format +msgid "VHD %s has parent %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:407 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:448 +#, python-format +msgid "No VDIs found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:452 +#, python-format +msgid "Unexpected number of VDIs (%s) found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:62 +#, python-format +msgid "Attempted to create non-unique name %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:99 +#, python-format +msgid "Starting VM %s..." +msgstr "" + +#: nova/virt/xenapi/vmops.py:101 +#, python-format +msgid "Spawning VM %s created %s." +msgstr "" + +#: nova/virt/xenapi/vmops.py:112 +#, python-format +msgid "Instance %s: booted" +msgstr "" + +#: nova/virt/xenapi/vmops.py:137 +#, python-format +msgid "Instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:166 +#, python-format +msgid "Starting snapshot for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:174 +#, python-format +msgid "Unable to Snapshot %s: %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:184 +#, python-format +msgid "Finished snapshot and upload for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:252 +#, python-format +msgid "suspend: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:262 +#, python-format +msgid "resume: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:271 +#, python-format +msgid "Instance not found %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:57 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:74 +#, python-format +msgid "Introduced %s as %s." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:90 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:96 +#, python-format +msgid "Forgetting SR %s ... " +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:101 +#, python-format +msgid "Ignoring exception %s when getting PBDs for %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:107 +#, python-format +msgid "Ignoring exception %s when unplugging PBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:111 +#, python-format +msgid "Forgetting SR %s done." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:113 +#, python-format +msgid "Ignoring exception %s when forgetting SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:123 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:128 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:146 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:175 +#, python-format +msgid "Unable to obtain target information %s, %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:197 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %s, %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Unable to use SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:95 +#, python-format +msgid "Mountpoint %s attached to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:106 +#, python-format +msgid "Detach_volume: %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:113 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:128 +#, python-format +msgid "Mountpoint %s detached from instance %s" +msgstr "" + +#: nova/volume/api.py:44 +#, python-format +msgid "Quota exceeeded for %s, tried to create %sG volume" +msgstr "" + +#: nova/volume/api.py:46 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %s" +msgstr "" + +#: nova/volume/api.py:70 nova/volume/api.py:95 +msgid "Volume status must be available" +msgstr "" + +#: nova/volume/api.py:97 +msgid "Volume is already attached" +msgstr "" + +#: nova/volume/api.py:103 +msgid "Volume is already detached" +msgstr "" + +#: nova/volume/driver.py:76 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: nova/volume/driver.py:85 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: nova/volume/driver.py:210 +#, python-format +msgid "FAKE AOE: %s" +msgstr "" + +#: nova/volume/driver.py:315 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: nova/volume/manager.py:85 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: nova/volume/manager.py:93 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: nova/volume/manager.py:102 +#, python-format +msgid "volume %s: creating lv of size %sG" +msgstr "" + +#: nova/volume/manager.py:106 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: nova/volume/manager.py:113 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: nova/volume/manager.py:121 +msgid "Volume is still attached" +msgstr "" + +#: nova/volume/manager.py:123 +msgid "Volume is not local to this node" +msgstr "" + +#: nova/volume/manager.py:124 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: nova/volume/manager.py:126 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: nova/volume/manager.py:129 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" diff --git a/po/zh_CN.po b/po/zh_CN.po new file mode 100644 index 000000000..a39383497 --- /dev/null +++ b/po/zh_CN.po @@ -0,0 +1,2135 @@ +# Chinese (Simplified) translation for nova +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the nova package. +# FIRST AUTHOR <EMAIL@ADDRESS>, 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: nova\n" +"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n" +"POT-Creation-Date: 2011-01-10 11:25-0800\n" +"PO-Revision-Date: 2011-02-14 02:26+0000\n" +"Last-Translator: Winston Dillon <Unknown>\n" +"Language-Team: Chinese (Simplified) <zh_CN@li.org>\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Launchpad-Export-Date: 2011-02-15 05:12+0000\n" +"X-Generator: Launchpad (build 12351)\n" + +#: nova/twistd.py:268 +#, python-format +msgid "Starting %s" +msgstr "正在启动 %s" + +#: nova/crypto.py:46 +msgid "Filename of root CA" +msgstr "根证书文件名" + +#: nova/crypto.py:49 +msgid "Filename of private key" +msgstr "私钥文件名" + +#: nova/crypto.py:51 +msgid "Filename of root Certificate Revokation List" +msgstr "" + +#: nova/crypto.py:53 +msgid "Where we keep our keys" +msgstr "保存密钥的位置" + +#: nova/crypto.py:55 +msgid "Where we keep our root CA" +msgstr "保存根证书的位置" + +#: nova/crypto.py:57 +msgid "Should we use a CA for each project?" +msgstr "是否所有项目都是用证书授权(CA)?" + +#: nova/crypto.py:61 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "用户证书的标题,%s依次分别为项目,用户,时间戳" + +#: nova/crypto.py:66 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "项目证书的标题,%s依次分别为项目,时间戳" + +#: nova/crypto.py:71 +#, python-format +msgid "Subject for certificate for vpns, %s for project, timestamp" +msgstr "VPN证书的标题,%s依次分别为项目,时间戳" + +#: nova/crypto.py:258 +#, python-format +msgid "Flags path: %s" +msgstr "Flag所在路径:%s" + +#: nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "运行命令时出现了意外错误。" + +#: nova/exception.py:36 +#, python-format +msgid "" +"%s\n" +"Command: %s\n" +"Exit code: %s\n" +"Stdout: %r\n" +"Stderr: %r" +msgstr "" +"%s\n" +"命令:%s\n" +"退出代码:%s\n" +"标准输出(stdout):%r\n" +"标准错误(stderr):%r" + +#: nova/exception.py:86 +msgid "Uncaught exception" +msgstr "未捕获异常" + +#: nova/fakerabbit.py:48 +#, python-format +msgid "(%s) publish (key: %s) %s" +msgstr "(%s)发布(键值:%s)%s" + +#: nova/fakerabbit.py:53 +#, python-format +msgid "Publishing to route %s" +msgstr "发布并路由到 %s" + +#: nova/fakerabbit.py:83 +#, python-format +msgid "Declaring queue %s" +msgstr "正在声明队列%s" + +#: nova/fakerabbit.py:89 +#, python-format +msgid "Declaring exchange %s" +msgstr "正在声明交换(exchange)%s" + +#: nova/fakerabbit.py:95 +#, python-format +msgid "Binding %s to %s with key %s" +msgstr "将%s绑定到%s(以%s键值)" + +#: nova/fakerabbit.py:120 +#, python-format +msgid "Getting from %s: %s" +msgstr "从%s获得如下内容:%s" + +#: nova/rpc.py:92 +#, python-format +msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +msgstr "位于%s:%d的AMQP服务器不可用。%d秒后重试。" + +#: nova/rpc.py:99 +#, python-format +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "已尝试%d次,均无法连接到AMQP服务器。关闭中。" + +#: nova/rpc.py:118 +msgid "Reconnected to queue" +msgstr "重新与队列建立连接" + +#: nova/rpc.py:125 +msgid "Failed to fetch message from queue" +msgstr "从队列获取数据失败" + +#: nova/rpc.py:155 +#, python-format +msgid "Initing the Adapter Consumer for %s" +msgstr "" + +#: nova/rpc.py:170 +#, python-format +msgid "received %s" +msgstr "已接收 %s" + +#: nova/rpc.py:183 +#, python-format +msgid "no method for message: %s" +msgstr "没有适用于消息%s的方法" + +#: nova/rpc.py:184 +#, python-format +msgid "No method for message: %s" +msgstr "没有适用于消息%s的方法" + +#: nova/rpc.py:245 +#, python-format +msgid "Returning exception %s to caller" +msgstr "返回%s异常给调用者" + +#: nova/rpc.py:286 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: nova/rpc.py:305 +msgid "Making asynchronous call..." +msgstr "产生异步调用中……" + +#: nova/rpc.py:308 +#, python-format +msgid "MSG_ID is %s" +msgstr "消息ID(MSG_ID)是 %s" + +#: nova/rpc.py:356 +#, python-format +msgid "response %s" +msgstr "回复 %s" + +#: nova/rpc.py:365 +#, python-format +msgid "topic is %s" +msgstr "话题是 %s" + +#: nova/rpc.py:366 +#, python-format +msgid "message %s" +msgstr "消息 %s" + +#: nova/service.py:157 +#, python-format +msgid "Starting %s node" +msgstr "启动%s节点" + +#: nova/service.py:169 +msgid "Service killed that has no database entry" +msgstr "因无数据库记录,服务已被中止" + +#: nova/service.py:190 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: nova/service.py:202 +msgid "Recovered model server connection!" +msgstr "与模型服务器(model server)的连接已恢复!" + +#: nova/service.py:208 +msgid "model server went away" +msgstr "失去与模型服务器的连接" + +#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#, python-format +msgid "Data store %s is unreachable. Trying again in %d seconds." +msgstr "数据储存服务%s不可用。%d秒之后继续尝试。" + +#: nova/service.py:232 nova/twistd.py:232 +#, python-format +msgid "Serving %s" +msgstr "正在为%s服务" + +#: nova/service.py:234 nova/twistd.py:264 +msgid "Full set of FLAGS:" +msgstr "FLAGS全集:" + +#: nova/twistd.py:211 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "pidfile %s不存在。后台服务没有运行?\n" + +#: nova/utils.py:53 +#, python-format +msgid "Inner Exception: %s" +msgstr "内层异常:%s" + +#: nova/utils.py:54 +#, python-format +msgid "Class %s cannot be found" +msgstr "无法找到%s类" + +#: nova/utils.py:113 +#, python-format +msgid "Fetching %s" +msgstr "正在抓取%s" + +#: nova/utils.py:125 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "正在运行(在子进程中)运行命令:%s" + +#: nova/utils.py:138 +#, python-format +msgid "Result was %s" +msgstr "运行结果为 %s" + +#: nova/utils.py:171 +#, python-format +msgid "debug in callback: %s" +msgstr "回调中debug:%s" + +#: nova/utils.py:176 +#, python-format +msgid "Running %s" +msgstr "正在运行 %s" + +#: nova/utils.py:207 +#, python-format +msgid "Couldn't get IP, using 127.0.0.1 %s" +msgstr "不能获取IP,将使用 127.0.0.1 %s" + +#: nova/utils.py:289 +#, python-format +msgid "Invalid backend: %s" +msgstr "无效的后台:%s" + +#: nova/utils.py:300 +#, python-format +msgid "backend %s" +msgstr "后台 %s" + +#: nova/api/ec2/__init__.py:133 +msgid "Too many failed authentications." +msgstr "较多失败的认证" + +#: nova/api/ec2/__init__.py:142 +#, python-format +msgid "" +"Access key %s has had %d failed authentications and will be locked out for " +"%d minutes." +msgstr "访问键 %s时,存在%d个失败的认证,将于%d分钟后解锁" + +#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#, python-format +msgid "Authentication Failure: %s" +msgstr "认证失败:%s" + +#: nova/api/ec2/__init__.py:190 +#, python-format +msgid "Authenticated Request For %s:%s)" +msgstr "为%s:%s申请认证" + +#: nova/api/ec2/__init__.py:227 +#, python-format +msgid "action: %s" +msgstr "执行: %s" + +#: nova/api/ec2/__init__.py:229 +#, python-format +msgid "arg: %s\t\tval: %s" +msgstr "键为: %s\t\t值为: %s" + +#: nova/api/ec2/__init__.py:301 +#, python-format +msgid "Unauthorized request for controller=%s and action=%s" +msgstr "对控制器=%s及动作=%s未经授权" + +#: nova/api/ec2/__init__.py:339 +#, python-format +msgid "NotFound raised: %s" +msgstr "引起没有找到的错误: %s" + +#: nova/api/ec2/__init__.py:342 +#, python-format +msgid "ApiError raised: %s" +msgstr "引发了Api错误: %s" + +#: nova/api/ec2/__init__.py:349 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "引发了意外的错误:%s" + +#: nova/api/ec2/__init__.py:354 +msgid "An unknown error has occurred. Please try your request again." +msgstr "发生了一个未知的错误. 请重试你的请求." + +#: nova/api/ec2/admin.py:84 +#, python-format +msgid "Creating new user: %s" +msgstr "创建新用户: %s" + +#: nova/api/ec2/admin.py:92 +#, python-format +msgid "Deleting user: %s" +msgstr "删除用户: %s" + +#: nova/api/ec2/admin.py:114 +#, python-format +msgid "Adding role %s to user %s for project %s" +msgstr "正将%s角色赋予用户%s(在工程%s中)" + +#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#, python-format +msgid "Adding sitewide role %s to user %s" +msgstr "增加站点范围的 %s角色给用户 %s" + +#: nova/api/ec2/admin.py:122 +#, python-format +msgid "Removing role %s from user %s for project %s" +msgstr "正将角色%s从用户%s在工程%s中移除" + +#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#, python-format +msgid "Removing sitewide role %s from user %s" +msgstr "" + +#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 +msgid "operation must be add or remove" +msgstr "操作必须为增加或删除" + +#: nova/api/ec2/admin.py:142 +#, python-format +msgid "Getting x509 for user: %s on project: %s" +msgstr "为用户 %s从工程%s中获取 x509" + +#: nova/api/ec2/admin.py:159 +#, python-format +msgid "Create project %s managed by %s" +msgstr "创建工程%s,此工程由%s管理" + +#: nova/api/ec2/admin.py:170 +#, python-format +msgid "Delete project: %s" +msgstr "删除工程%s" + +#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#, python-format +msgid "Adding user %s to project %s" +msgstr "增加用户%s到%s工程" + +#: nova/api/ec2/admin.py:188 +#, python-format +msgid "Removing user %s from project %s" +msgstr "正将用户%s从工程%s中移除" + +#: nova/api/ec2/apirequest.py:95 +#, python-format +msgid "Unsupported API request: controller = %s,action = %s" +msgstr "不支持的API请求: 控制器 = %s,执行 = %s" + +#: nova/api/ec2/cloud.py:117 +#, python-format +msgid "Generating root CA: %s" +msgstr "生成根证书: %s" + +#: nova/api/ec2/cloud.py:277 +#, python-format +msgid "Create key pair %s" +msgstr "创建键值对 %s" + +#: nova/api/ec2/cloud.py:285 +#, python-format +msgid "Delete key pair %s" +msgstr "删除键值对 %s" + +#: nova/api/ec2/cloud.py:357 +#, python-format +msgid "%s is not a valid ipProtocol" +msgstr "%s是无效的IP协议" + +#: nova/api/ec2/cloud.py:361 +msgid "Invalid port range" +msgstr "端口范围无效" + +#: nova/api/ec2/cloud.py:392 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "撤销输入安全组 %s" + +#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 +msgid "No rule for the specified parameters." +msgstr "对给定的参数无特定规则。" + +#: nova/api/ec2/cloud.py:421 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "验证输入安全组 %s" + +#: nova/api/ec2/cloud.py:432 +#, python-format +msgid "This rule already exists in group %s" +msgstr "这条规则已经存在安全组%s中。" + +#: nova/api/ec2/cloud.py:460 +#, python-format +msgid "Create Security Group %s" +msgstr "创建安全组%s" + +#: nova/api/ec2/cloud.py:463 +#, python-format +msgid "group %s already exists" +msgstr "安全组%s已经存在" + +#: nova/api/ec2/cloud.py:475 +#, python-format +msgid "Delete security group %s" +msgstr "删除安全组 %s" + +#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 +#, python-format +msgid "Get console output for instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:543 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: nova/api/ec2/cloud.py:567 +#, python-format +msgid "Attach volume %s to instacne %s at %s" +msgstr "" + +#: nova/api/ec2/cloud.py:579 +#, python-format +msgid "Detach volume %s" +msgstr "" + +#: nova/api/ec2/cloud.py:686 +msgid "Allocate address" +msgstr "" + +#: nova/api/ec2/cloud.py:691 +#, python-format +msgid "Release address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:696 +#, python-format +msgid "Associate address %s to instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:703 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:730 +msgid "Going to start terminating instances" +msgstr "" + +#: nova/api/ec2/cloud.py:738 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: nova/api/ec2/cloud.py:775 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: nova/api/ec2/cloud.py:783 +#, python-format +msgid "Registered image %s with id %s" +msgstr "" + +#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#, python-format +msgid "attribute not supported: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:794 +#, python-format +msgid "invalid id: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:807 +msgid "user or group not specified" +msgstr "" + +#: nova/api/ec2/cloud.py:809 +msgid "only group \"all\" is supported" +msgstr "" + +#: nova/api/ec2/cloud.py:811 +msgid "operation_type must be add or remove" +msgstr "" + +#: nova/api/ec2/cloud.py:812 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: nova/api/ec2/metadatarequesthandler.py:75 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:70 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:86 +msgid "Including admin operations in API." +msgstr "" + +#: nova/api/openstack/servers.py:184 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:199 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: nova/api/openstack/servers.py:213 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:224 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: nova/api/openstack/servers.py:235 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: nova/api/openstack/servers.py:246 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: nova/api/openstack/servers.py:257 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" +msgstr "" + +#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "" + +#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#, python-format +msgid "Project can't be created because project %s already exists" +msgstr "" + +#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#, python-format +msgid "Project can't be modified because manager %s doesn't exist" +msgstr "" + +#: nova/auth/dbdriver.py:245 +#, python-format +msgid "User \"%s\" not found" +msgstr "" + +#: nova/auth/dbdriver.py:248 +#, python-format +msgid "Project \"%s\" not found" +msgstr "" + +#: nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: nova/auth/ldapdriver.py:181 +#, python-format +msgid "LDAP object for %s doesn't exist" +msgstr "" + +#: nova/auth/ldapdriver.py:218 +#, python-format +msgid "Project can't be created because user %s doesn't exist" +msgstr "" + +#: nova/auth/ldapdriver.py:478 +#, python-format +msgid "User %s is already a member of the group %s" +msgstr "" + +#: nova/auth/ldapdriver.py:507 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: nova/auth/ldapdriver.py:528 +#, python-format +msgid "Group at dn %s doesn't exist" +msgstr "" + +#: nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" +msgstr "" + +#: nova/auth/manager.py:270 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: nova/auth/manager.py:275 +#, python-format +msgid "failed authorization: no project named %s (user=%s)" +msgstr "" + +#: nova/auth/manager.py:277 +#, python-format +msgid "No project called %s could be found" +msgstr "" + +#: nova/auth/manager.py:281 +#, python-format +msgid "Failed authorization: user %s not admin and not member of project %s" +msgstr "" + +#: nova/auth/manager.py:283 +#, python-format +msgid "User %s is not a member of project %s" +msgstr "" + +#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: nova/auth/manager.py:293 nova/auth/manager.py:304 +msgid "Signature does not match" +msgstr "" + +#: nova/auth/manager.py:374 +msgid "Must specify project" +msgstr "" + +#: nova/auth/manager.py:408 +#, python-format +msgid "The %s role can not be found" +msgstr "" + +#: nova/auth/manager.py:410 +#, python-format +msgid "The %s role is global only" +msgstr "" + +#: nova/auth/manager.py:412 +#, python-format +msgid "Adding role %s to user %s in project %s" +msgstr "" + +#: nova/auth/manager.py:438 +#, python-format +msgid "Removing role %s from user %s on project %s" +msgstr "" + +#: nova/auth/manager.py:505 +#, python-format +msgid "Created project %s with manager %s" +msgstr "" + +#: nova/auth/manager.py:523 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: nova/auth/manager.py:553 +#, python-format +msgid "Remove user %s from project %s" +msgstr "" + +#: nova/auth/manager.py:581 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: nova/auth/manager.py:637 +#, python-format +msgid "Created user %s (admin: %r)" +msgstr "" + +#: nova/auth/manager.py:645 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: nova/auth/manager.py:655 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:657 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:659 +#, python-format +msgid "Admin status set to %r for user %s" +msgstr "" + +#: nova/auth/manager.py:708 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "" + +#: nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:97 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: nova/compute/api.py:67 +#, python-format +msgid "Instance %d was not found in get_network_topic" +msgstr "" + +#: nova/compute/api.py:73 +#, python-format +msgid "Instance %d has no host" +msgstr "" + +#: nova/compute/api.py:92 +#, python-format +msgid "Quota exceeeded for %s, tried to run %s instances" +msgstr "" + +#: nova/compute/api.py:94 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." +msgstr "" + +#: nova/compute/api.py:109 +msgid "Creating a raw instance" +msgstr "" + +#: nova/compute/api.py:156 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: nova/compute/api.py:180 +#, python-format +msgid "Casting to scheduler for %s/%s's instance %s" +msgstr "" + +#: nova/compute/api.py:279 +#, python-format +msgid "Going to try and terminate %s" +msgstr "" + +#: nova/compute/api.py:283 +#, python-format +msgid "Instance %d was not found during terminate" +msgstr "" + +#: nova/compute/api.py:288 +#, python-format +msgid "Instance %d is already being terminated" +msgstr "" + +#: nova/compute/api.py:450 +#, python-format +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "" + +#: nova/compute/api.py:465 +msgid "Volume isn't attached to anything!" +msgstr "" + +#: nova/compute/disk.py:71 +#, python-format +msgid "Input partition size not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:75 +#, python-format +msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:128 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: nova/compute/disk.py:136 +#, python-format +msgid "Failed to load partition: %s" +msgstr "" + +#: nova/compute/disk.py:158 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "" + +#: nova/compute/manager.py:69 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: nova/compute/manager.py:71 +#, python-format +msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +msgstr "" + +#: nova/compute/manager.py:75 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: nova/compute/manager.py:77 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: nova/compute/manager.py:82 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: nova/compute/manager.py:86 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: nova/compute/manager.py:157 +msgid "Instance has already been created" +msgstr "" + +#: nova/compute/manager.py:158 +#, python-format +msgid "instance %s: starting..." +msgstr "" + +#: nova/compute/manager.py:197 +#, python-format +msgid "instance %s: Failed to spawn" +msgstr "" + +#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 +#, python-format +msgid "Terminating instance %s" +msgstr "" + +#: nova/compute/manager.py:217 +#, python-format +msgid "Disassociating address %s" +msgstr "" + +#: nova/compute/manager.py:230 +#, python-format +msgid "Deallocating address %s" +msgstr "" + +#: nova/compute/manager.py:243 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "" + +#: nova/compute/manager.py:257 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: nova/compute/manager.py:260 +#, python-format +msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:286 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: nova/compute/manager.py:289 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:301 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: nova/compute/manager.py:316 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: nova/compute/manager.py:335 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: nova/compute/manager.py:352 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: nova/compute/manager.py:369 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: nova/compute/manager.py:382 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: nova/compute/manager.py:401 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: nova/compute/manager.py:420 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: nova/compute/manager.py:432 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: nova/compute/manager.py:442 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: nova/compute/manager.py:462 +#, python-format +msgid "instance %s: attaching volume %s to %s" +msgstr "" + +#: nova/compute/manager.py:478 +#, python-format +msgid "instance %s: attach failed %s, removing" +msgstr "" + +#: nova/compute/manager.py:493 +#, python-format +msgid "Detach volume %s from mountpoint %s on instance %s" +msgstr "" + +#: nova/compute/manager.py:497 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: nova/compute/monitor.py:259 +#, python-format +msgid "updating %s..." +msgstr "" + +#: nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "" + +#: nova/compute/monitor.py:355 +#, python-format +msgid "Cannot get blockstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:377 +#, python-format +msgid "Cannot get ifstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:412 +msgid "unexpected exception getting connection" +msgstr "" + +#: nova/compute/monitor.py:427 +#, python-format +msgid "Found instance: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:43 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: nova/db/sqlalchemy/api.py:132 +#, python-format +msgid "No service for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:229 +#, python-format +msgid "No service for %s, %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:574 +#, python-format +msgid "No floating ip for address %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:668 +#, python-format +msgid "No instance for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 +#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#, python-format +msgid "Instance %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:891 +#, python-format +msgid "no keypair for user %s, name %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 +#, python-format +msgid "No network for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1036 +#, python-format +msgid "No network for bridge %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1050 +#, python-format +msgid "No network for instance %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1180 +#, python-format +msgid "Token %s does not exist" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1205 +#, python-format +msgid "No quota for project_id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1356 +#, python-format +msgid "No volume for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1401 +#, python-format +msgid "Volume %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1413 +#, python-format +msgid "No export device found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1426 +#, python-format +msgid "No target id found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1471 +#, python-format +msgid "No security group with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1488 +#, python-format +msgid "No security group named %s for project: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1576 +#, python-format +msgid "No secuity group rule with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1650 +#, python-format +msgid "No user for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1666 +#, python-format +msgid "No user for access key %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1728 +#, python-format +msgid "No project with id %s" +msgstr "" + +#: nova/image/glance.py:78 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images" +msgstr "" + +#: nova/image/glance.py:97 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images/detail" +msgstr "" + +#: nova/image/s3.py:82 +#, python-format +msgid "Image %s could not be found" +msgstr "" + +#: nova/network/api.py:39 +#, python-format +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "" + +#: nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" + +#: nova/network/linux_net.py:176 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: nova/network/linux_net.py:186 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#: nova/network/linux_net.py:254 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: nova/network/linux_net.py:256 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: nova/network/linux_net.py:334 +#, python-format +msgid "Killing dnsmasq threw %s" +msgstr "" + +#: nova/network/manager.py:135 +msgid "setting network host" +msgstr "" + +#: nova/network/manager.py:190 +#, python-format +msgid "Leasing IP %s" +msgstr "" + +#: nova/network/manager.py:194 +#, python-format +msgid "IP %s leased that isn't associated" +msgstr "" + +#: nova/network/manager.py:197 +#, python-format +msgid "IP %s leased to bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:205 +#, python-format +msgid "IP %s leased that was already deallocated" +msgstr "" + +#: nova/network/manager.py:214 +#, python-format +msgid "IP %s released that isn't associated" +msgstr "" + +#: nova/network/manager.py:217 +#, python-format +msgid "IP %s released from bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:220 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: nova/network/manager.py:442 +#, python-format +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "" + +#: nova/objectstore/handler.py:106 +#, python-format +msgid "Unknown S3 value type %r" +msgstr "" + +#: nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "" + +#: nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "" + +#: nova/objectstore/handler.py:209 +#, python-format +msgid "List keys for bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:217 +#, python-format +msgid "Unauthorized attempt to access bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:235 +#, python-format +msgid "Creating bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:245 +#, python-format +msgid "Deleting bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:249 +#, python-format +msgid "Unauthorized attempt to delete bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:271 +#, python-format +msgid "Getting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:274 +#, python-format +msgid "Unauthorized attempt to get object %s from bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:292 +#, python-format +msgid "Putting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:295 +#, python-format +msgid "Unauthorized attempt to upload object %s to bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:314 +#, python-format +msgid "Deleting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:393 +#, python-format +msgid "Not authorized to upload image: invalid directory %s" +msgstr "" + +#: nova/objectstore/handler.py:401 +#, python-format +msgid "Not authorized to upload image: unauthorized bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:406 +#, python-format +msgid "Starting image upload: %s" +msgstr "" + +#: nova/objectstore/handler.py:420 +#, python-format +msgid "Not authorized to update attributes of image %s" +msgstr "" + +#: nova/objectstore/handler.py:428 +#, python-format +msgid "Toggling publicity flag of image %s %r" +msgstr "" + +#: nova/objectstore/handler.py:433 +#, python-format +msgid "Updating user fields on image %s" +msgstr "" + +#: nova/objectstore/handler.py:447 +#, python-format +msgid "Unauthorized attempt to delete image %s" +msgstr "" + +#: nova/objectstore/handler.py:452 +#, python-format +msgid "Deleted image: %s" +msgstr "" + +#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 +#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 +msgid "No hosts found" +msgstr "" + +#: nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "" + +#: nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %s %s for %s" +msgstr "" + +#: nova/scheduler/simple.py:63 +msgid "All hosts have too many cores" +msgstr "" + +#: nova/scheduler/simple.py:95 +msgid "All hosts have too many gigabytes" +msgstr "" + +#: nova/scheduler/simple.py:115 +msgid "All hosts have too many networks" +msgstr "" + +#: nova/tests/test_cloud.py:198 +msgid "Can't test instances without a real virtual env." +msgstr "" + +#: nova/tests/test_cloud.py:210 +#, python-format +msgid "Need to watch instance %s until it's running..." +msgstr "" + +#: nova/tests/test_compute.py:104 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: nova/tests/test_compute.py:110 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: nova/tests/test_rpc.py:89 +#, python-format +msgid "Nested received %s, %s" +msgstr "" + +#: nova/tests/test_rpc.py:94 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#, python-format +msgid "Received %s" +msgstr "" + +#: nova/tests/test_volume.py:162 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: nova/virt/fake.py:210 +#, python-format +msgid "Instance %s Not Found" +msgstr "" + +#: nova/virt/hyperv.py:118 +msgid "In init host" +msgstr "" + +#: nova/virt/hyperv.py:131 +#, python-format +msgid "Attempt to create duplicate vm %s" +msgstr "" + +#: nova/virt/hyperv.py:148 +#, python-format +msgid "Starting VM %s " +msgstr "" + +#: nova/virt/hyperv.py:150 +#, python-format +msgid "Started VM %s " +msgstr "" + +#: nova/virt/hyperv.py:152 +#, python-format +msgid "spawn vm failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:169 +#, python-format +msgid "Failed to create VM %s" +msgstr "" + +#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 +#, python-format +msgid "Created VM %s..." +msgstr "" + +#: nova/virt/hyperv.py:188 +#, python-format +msgid "Set memory for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:198 +#, python-format +msgid "Set vcpus for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:202 +#, python-format +msgid "Creating disk for %s by attaching disk file %s" +msgstr "" + +#: nova/virt/hyperv.py:227 +#, python-format +msgid "Failed to add diskdrive to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:230 +#, python-format +msgid "New disk drive path is %s" +msgstr "" + +#: nova/virt/hyperv.py:247 +#, python-format +msgid "Failed to add vhd file to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:249 +#, python-format +msgid "Created disk for %s" +msgstr "" + +#: nova/virt/hyperv.py:253 +#, python-format +msgid "Creating nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:272 +msgid "Failed creating a port on the external vswitch" +msgstr "" + +#: nova/virt/hyperv.py:273 +#, python-format +msgid "Failed creating port for %s" +msgstr "" + +#: nova/virt/hyperv.py:275 +#, python-format +msgid "Created switch port %s on switch %s" +msgstr "" + +#: nova/virt/hyperv.py:285 +#, python-format +msgid "Failed to add nic to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:287 +#, python-format +msgid "Created nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:320 +#, python-format +msgid "WMI job failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:322 +#, python-format +msgid "WMI job succeeded: %s, Elapsed=%s " +msgstr "" + +#: nova/virt/hyperv.py:358 +#, python-format +msgid "Got request to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:383 +#, python-format +msgid "Failed to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:389 +#, python-format +msgid "Del: disk %s vm %s" +msgstr "" + +#: nova/virt/hyperv.py:405 +#, python-format +msgid "" +"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " +"cpu_time=%s" +msgstr "" + +#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#, python-format +msgid "duplicate name found: %s" +msgstr "" + +#: nova/virt/hyperv.py:444 +#, python-format +msgid "Successfully changed vm state of %s to %s" +msgstr "" + +#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#, python-format +msgid "Failed to change vm state of %s to %s" +msgstr "" + +#: nova/virt/images.py:70 +#, python-format +msgid "Finished retreving %s -- placed in %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:144 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:157 +msgid "Connection to libvirt broke" +msgstr "" + +#: nova/virt/libvirt_conn.py:229 +#, python-format +msgid "instance %s: deleting instance files %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:271 +#, python-format +msgid "No disk at %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:278 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "" + +#: nova/virt/libvirt_conn.py:294 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: nova/virt/libvirt_conn.py:297 +#, python-format +msgid "_wait_for_reboot failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:340 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: nova/virt/libvirt_conn.py:343 +#, python-format +msgid "_wait_for_rescue failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:370 +#, python-format +msgid "instance %s: is running" +msgstr "" + +#: nova/virt/libvirt_conn.py:381 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 +#, python-format +msgid "instance %s: failed to boot" +msgstr "" + +#: nova/virt/libvirt_conn.py:395 +#, python-format +msgid "virsh said: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:399 +msgid "cool, it's a device" +msgstr "" + +#: nova/virt/libvirt_conn.py:407 +#, python-format +msgid "data: %r, fpath: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:415 +#, python-format +msgid "Contents of file %s: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:449 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: nova/virt/libvirt_conn.py:505 +#, python-format +msgid "instance %s: injecting key into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:508 +#, python-format +msgid "instance %s: injecting net into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:516 +#, python-format +msgid "instance %s: ignoring error injecting data into image %s (%s)" +msgstr "" + +#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: nova/virt/libvirt_conn.py:589 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: nova/virt/xenapi_conn.py:113 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" +msgstr "" + +#: nova/virt/xenapi_conn.py:263 +#, python-format +msgid "Task [%s] %s status: success %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:271 +#, python-format +msgid "Task [%s] %s status: %s %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:72 +#, python-format +msgid "%s: _db_content => %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 +#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 +msgid "Raising NotImplemented" +msgstr "" + +#: nova/virt/xenapi/fake.py:249 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:283 +#, python-format +msgid "Calling %s %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:288 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:340 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:127 +#, python-format +msgid "Created VM %s as %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:147 +#, python-format +msgid "Creating VBD for VM %s, VDI %s ... " +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:149 +#, python-format +msgid "Created VBD %s for VM %s, VDI %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:165 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:175 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:202 +#, python-format +msgid "Creating VIF for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:205 +#, python-format +msgid "Created VIF %s for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:216 +#, python-format +msgid "Snapshotting VM %s with label '%s'..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:229 +#, python-format +msgid "Created snapshot %s from VM %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:243 +#, python-format +msgid "Asking xapi to upload %s as '%s'" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:261 +#, python-format +msgid "Asking xapi to fetch %s as %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:279 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:290 +#, python-format +msgid "PV Kernel in VDI:%d" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:318 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:331 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:333 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:390 +#, python-format +msgid "VHD %s has parent %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:407 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:448 +#, python-format +msgid "No VDIs found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:452 +#, python-format +msgid "Unexpected number of VDIs (%s) found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:62 +#, python-format +msgid "Attempted to create non-unique name %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:99 +#, python-format +msgid "Starting VM %s..." +msgstr "" + +#: nova/virt/xenapi/vmops.py:101 +#, python-format +msgid "Spawning VM %s created %s." +msgstr "" + +#: nova/virt/xenapi/vmops.py:112 +#, python-format +msgid "Instance %s: booted" +msgstr "" + +#: nova/virt/xenapi/vmops.py:137 +#, python-format +msgid "Instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:166 +#, python-format +msgid "Starting snapshot for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:174 +#, python-format +msgid "Unable to Snapshot %s: %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:184 +#, python-format +msgid "Finished snapshot and upload for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:252 +#, python-format +msgid "suspend: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:262 +#, python-format +msgid "resume: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:271 +#, python-format +msgid "Instance not found %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:57 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:74 +#, python-format +msgid "Introduced %s as %s." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:90 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:96 +#, python-format +msgid "Forgetting SR %s ... " +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:101 +#, python-format +msgid "Ignoring exception %s when getting PBDs for %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:107 +#, python-format +msgid "Ignoring exception %s when unplugging PBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:111 +#, python-format +msgid "Forgetting SR %s done." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:113 +#, python-format +msgid "Ignoring exception %s when forgetting SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:123 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:128 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:146 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:175 +#, python-format +msgid "Unable to obtain target information %s, %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:197 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %s, %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Unable to use SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:95 +#, python-format +msgid "Mountpoint %s attached to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:106 +#, python-format +msgid "Detach_volume: %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:113 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:128 +#, python-format +msgid "Mountpoint %s detached from instance %s" +msgstr "" + +#: nova/volume/api.py:44 +#, python-format +msgid "Quota exceeeded for %s, tried to create %sG volume" +msgstr "" + +#: nova/volume/api.py:46 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %s" +msgstr "" + +#: nova/volume/api.py:70 nova/volume/api.py:95 +msgid "Volume status must be available" +msgstr "" + +#: nova/volume/api.py:97 +msgid "Volume is already attached" +msgstr "" + +#: nova/volume/api.py:103 +msgid "Volume is already detached" +msgstr "" + +#: nova/volume/driver.py:76 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: nova/volume/driver.py:85 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: nova/volume/driver.py:210 +#, python-format +msgid "FAKE AOE: %s" +msgstr "" + +#: nova/volume/driver.py:315 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: nova/volume/manager.py:85 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: nova/volume/manager.py:93 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: nova/volume/manager.py:102 +#, python-format +msgid "volume %s: creating lv of size %sG" +msgstr "" + +#: nova/volume/manager.py:106 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: nova/volume/manager.py:113 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: nova/volume/manager.py:121 +msgid "Volume is still attached" +msgstr "" + +#: nova/volume/manager.py:123 +msgid "Volume is not local to this node" +msgstr "" + +#: nova/volume/manager.py:124 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: nova/volume/manager.py:126 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: nova/volume/manager.py:129 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" diff --git a/run_tests.py b/run_tests.py index 24786e8ad..3c8d410e1 100644 --- a/run_tests.py +++ b/run_tests.py @@ -17,26 +17,245 @@ # See the License for the specific language governing permissions and # limitations under the License. +# Colorizer Code is borrowed from Twisted: +# Copyright (c) 2001-2010 Twisted Matrix Laboratories. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +"""Unittest runner for Nova. + +To run all tests + python run_tests.py + +To run a single test: + python run_tests.py test_compute:ComputeTestCase.test_run_terminate + +To run a single test module: + python run_tests.py test_compute + + or + + python run_tests.py api.test_wsgi + +""" + import gettext import os import unittest import sys from nose import config -from nose import result from nose import core +from nose import result from nova import log as logging +from nova.tests import fake_flags + + +class _AnsiColorizer(object): + """ + A colorizer is an object that loosely wraps around a stream, allowing + callers to write text to the stream in a particular color. + + Colorizer classes must implement C{supported()} and C{write(text, color)}. + """ + _colors = dict(black=30, red=31, green=32, yellow=33, + blue=34, magenta=35, cyan=36, white=37) + + def __init__(self, stream): + self.stream = stream + + def supported(cls, stream=sys.stdout): + """ + A class method that returns True if the current platform supports + coloring terminal output using this method. Returns False otherwise. + """ + if not stream.isatty(): + return False # auto color only on TTYs + try: + import curses + except ImportError: + return False + else: + try: + try: + return curses.tigetnum("colors") > 2 + except curses.error: + curses.setupterm() + return curses.tigetnum("colors") > 2 + except: + raise + # guess false in case of error + return False + supported = classmethod(supported) + + def write(self, text, color): + """ + Write the given text to the stream in the given color. + + @param text: Text to be written to the stream. + + @param color: A string label for a color. e.g. 'red', 'white'. + """ + color = self._colors[color] + self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text)) + + +class _Win32Colorizer(object): + """ + See _AnsiColorizer docstring. + """ + def __init__(self, stream): + from win32console import GetStdHandle, STD_OUT_HANDLE, \ + FOREGROUND_RED, FOREGROUND_BLUE, FOREGROUND_GREEN, \ + FOREGROUND_INTENSITY + red, green, blue, bold = (FOREGROUND_RED, FOREGROUND_GREEN, + FOREGROUND_BLUE, FOREGROUND_INTENSITY) + self.stream = stream + self.screenBuffer = GetStdHandle(STD_OUT_HANDLE) + self._colors = { + 'normal': red | green | blue, + 'red': red | bold, + 'green': green | bold, + 'blue': blue | bold, + 'yellow': red | green | bold, + 'magenta': red | blue | bold, + 'cyan': green | blue | bold, + 'white': red | green | blue | bold + } + + def supported(cls, stream=sys.stdout): + try: + import win32console + screenBuffer = win32console.GetStdHandle( + win32console.STD_OUT_HANDLE) + except ImportError: + return False + import pywintypes + try: + screenBuffer.SetConsoleTextAttribute( + win32console.FOREGROUND_RED | + win32console.FOREGROUND_GREEN | + win32console.FOREGROUND_BLUE) + except pywintypes.error: + return False + else: + return True + supported = classmethod(supported) + + def write(self, text, color): + color = self._colors[color] + self.screenBuffer.SetConsoleTextAttribute(color) + self.stream.write(text) + self.screenBuffer.SetConsoleTextAttribute(self._colors['normal']) + + +class _NullColorizer(object): + """ + See _AnsiColorizer docstring. + """ + def __init__(self, stream): + self.stream = stream + + def supported(cls, stream=sys.stdout): + return True + supported = classmethod(supported) + + def write(self, text, color): + self.stream.write(text) class NovaTestResult(result.TextTestResult): def __init__(self, *args, **kw): result.TextTestResult.__init__(self, *args, **kw) self._last_case = None + self.colorizer = None + # NOTE(vish): reset stdout for the terminal check + stdout = sys.stdout + sys.stdout = sys.__stdout__ + for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]: + if colorizer.supported(): + self.colorizer = colorizer(self.stream) + break + sys.stdout = stdout def getDescription(self, test): return str(test) + # NOTE(vish): copied from unittest with edit to add color + def addSuccess(self, test): + unittest.TestResult.addSuccess(self, test) + if self.showAll: + self.colorizer.write("OK", 'green') + self.stream.writeln() + elif self.dots: + self.stream.write('.') + self.stream.flush() + + # NOTE(vish): copied from unittest with edit to add color + def addFailure(self, test, err): + unittest.TestResult.addFailure(self, test, err) + if self.showAll: + self.colorizer.write("FAIL", 'red') + self.stream.writeln() + elif self.dots: + self.stream.write('F') + self.stream.flush() + + # NOTE(vish): copied from nose with edit to add color + def addError(self, test, err): + """Overrides normal addError to add support for + errorClasses. If the exception is a registered class, the + error will be added to the list for that class, not errors. + """ + stream = getattr(self, 'stream', None) + ec, ev, tb = err + try: + exc_info = self._exc_info_to_string(err, test) + except TypeError: + # 2.3 compat + exc_info = self._exc_info_to_string(err) + for cls, (storage, label, isfail) in self.errorClasses.items(): + if result.isclass(ec) and issubclass(ec, cls): + if isfail: + test.passed = False + storage.append((test, exc_info)) + # Might get patched into a streamless result + if stream is not None: + if self.showAll: + message = [label] + detail = result._exception_detail(err[1]) + if detail: + message.append(detail) + stream.writeln(": ".join(message)) + elif self.dots: + stream.write(label[:1]) + return + self.errors.append((test, exc_info)) + test.passed = False + if stream is not None: + if self.showAll: + self.colorizer.write("ERROR", 'red') + self.stream.writeln() + elif self.dots: + stream.write('E') + def startTest(self, test): unittest.TestResult.startTest(self, test) current_case = test.test.__class__.__name__ @@ -60,13 +279,24 @@ class NovaTestRunner(core.TextTestRunner): if __name__ == '__main__': - logging.basicConfig() + logging.setup() + # If any argument looks like a test name but doesn't have "nova.tests" in + # front of it, automatically add that so we don't have to type as much + argv = [] + for x in sys.argv: + if x.startswith('test_'): + argv.append('nova.tests.%s' % x) + else: + argv.append(x) + + testdir = os.path.abspath(os.path.join("nova", "tests")) c = config.Config(stream=sys.stdout, env=os.environ, verbosity=3, + workingDir=testdir, plugins=core.DefaultPluginManager()) runner = NovaTestRunner(stream=c.stream, verbosity=c.verbosity, config=c) - sys.exit(not core.run(config=c, testRunner=runner)) + sys.exit(not core.run(config=c, testRunner=runner, argv=argv)) diff --git a/run_tests.sh b/run_tests.sh index 4e21fe945..8f4d37cd4 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -39,8 +39,18 @@ done function run_tests { # Just run the test suites in current environment - ${wrapper} rm -f nova.sqlite - ${wrapper} $NOSETESTS 2> run_tests.err.log + ${wrapper} $NOSETESTS 2> run_tests.log + # If we get some short import error right away, print the error log directly + RESULT=$? + if [ "$RESULT" -ne "0" ]; + then + ERRSIZE=`wc -l run_tests.log | awk '{print \$1}'` + if [ "$ERRSIZE" -lt "40" ]; + then + cat run_tests.log + fi + fi + return $RESULT } NOSETESTS="python run_tests.py $noseargs" @@ -73,7 +83,9 @@ fi if [ -z "$noseargs" ]; then - run_tests && pep8 --repeat --show-pep8 --show-source --exclude=vcsversion.py bin/* nova setup.py || exit 1 + srcfiles=`find bin -type f ! -name "nova.conf*"` + srcfiles+=" nova setup.py plugins/xenserver/xenapi/etc/xapi.d/plugins/glance" + run_tests && pep8 --repeat --show-pep8 --show-source --exclude=vcsversion.py ${srcfiles} || exit 1 else run_tests fi @@ -18,10 +18,21 @@ import os import subprocess +import sys -from setuptools import setup, find_packages +from setuptools import find_packages from setuptools.command.sdist import sdist +try: + import DistUtilsExtra.auto +except ImportError: + print >> sys.stderr, 'To build nova you need '\ + 'https://launchpad.net/python-distutils-extra' + sys.exit(1) +assert DistUtilsExtra.auto.__version__ >= '2.18',\ + 'needs DistUtilsExtra.auto >= 2.18' + + from nova.utils import parse_mailmap, str_dict_replace from nova import version @@ -75,7 +86,7 @@ try: except: pass -setup(name='nova', +DistUtilsExtra.auto.setup(name='nova', version=version.canonical_version_string(), description='cloud computing fabric controller', author='OpenStack', @@ -85,9 +96,12 @@ setup(name='nova', packages=find_packages(exclude=['bin', 'smoketests']), include_package_data=True, test_suite='nose.collector', - scripts=['bin/nova-api', + scripts=['bin/nova-ajax-console-proxy', + 'bin/nova-api', 'bin/nova-compute', + 'bin/nova-console', 'bin/nova-dhcpbridge', + 'bin/nova-direct-api', 'bin/nova-import-canonical-imagestore', 'bin/nova-instancemonitor', 'bin/nova-logspool', @@ -96,5 +110,6 @@ setup(name='nova', 'bin/nova-objectstore', 'bin/nova-scheduler', 'bin/nova-spoolsentry', + 'bin/stack', 'bin/nova-volume', 'tools/nova-debug']) diff --git a/smoketests/base.py b/smoketests/base.py index 610270c5c..204b4a1eb 100644 --- a/smoketests/base.py +++ b/smoketests/base.py @@ -17,19 +17,21 @@ # under the License. import boto -import boto_v6 import commands import httplib import os import paramiko -import random import sys +import time import unittest from boto.ec2.regioninfo import RegionInfo from smoketests import flags +SUITE_NAMES = '[image, instance, volume]' FLAGS = flags.FLAGS +flags.DEFINE_string('suite', None, 'Specific test suite to run ' + SUITE_NAMES) +boto_v6 = None class SmokeTestCase(unittest.TestCase): @@ -39,12 +41,10 @@ class SmokeTestCase(unittest.TestCase): client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.WarningPolicy()) client.connect(ip, username='root', pkey=key) - stdin, stdout, stderr = client.exec_command('uptime') - print 'uptime: ', stdout.read() return client - def can_ping(self, ip): - """ Attempt to ping the specified IP, and give up after 1 second. """ + def can_ping(self, ip, command="ping"): + """Attempt to ping the specified IP, and give up after 1 second.""" # NOTE(devcamcar): ping timeout flag is different in OSX. if sys.platform == 'darwin': @@ -52,10 +52,41 @@ class SmokeTestCase(unittest.TestCase): else: timeout_flag = 'w' - status, output = commands.getstatusoutput('ping -c1 -%s1 %s' % - (timeout_flag, ip)) + status, output = commands.getstatusoutput('%s -c1 -%s1 %s' % + (command, timeout_flag, ip)) return status == 0 + def wait_for_running(self, instance, tries=60, wait=1): + """Wait for instance to be running""" + for x in xrange(tries): + instance.update() + if instance.state.startswith('running'): + return True + time.sleep(wait) + else: + return False + + def wait_for_ping(self, ip, command="ping", tries=120): + """Wait for ip to be pingable""" + for x in xrange(tries): + if self.can_ping(ip, command): + return True + else: + return False + + def wait_for_ssh(self, ip, key_name, tries=30, wait=5): + """Wait for ip to be sshable""" + for x in xrange(tries): + try: + conn = self.connect_ssh(ip, key_name) + conn.close() + except Exception, e: + time.sleep(wait) + else: + return True + else: + return False + def connection_for_env(self, **kwargs): """ Returns a boto ec2 connection for the current environment. @@ -144,8 +175,21 @@ class SmokeTestCase(unittest.TestCase): return True +TEST_DATA = {} + + +class UserSmokeTestCase(SmokeTestCase): + def setUp(self): + global TEST_DATA + self.conn = self.connection_for_env() + self.data = TEST_DATA + + def run_tests(suites): argv = FLAGS(sys.argv) + if FLAGS.use_ipv6: + global boto_v6 + boto_v6 = __import__('boto_v6') if not os.getenv('EC2_ACCESS_KEY'): print >> sys.stderr, 'Missing EC2 environment variables. Please ' \ diff --git a/smoketests/flags.py b/smoketests/flags.py index 35f432a77..5f3c8505e 100644 --- a/smoketests/flags.py +++ b/smoketests/flags.py @@ -35,5 +35,5 @@ DEFINE_bool = DEFINE_bool # http://code.google.com/p/python-gflags/source/browse/trunk/gflags.py#39 DEFINE_string('region', 'nova', 'Region to use') -DEFINE_string('test_image', 'ami-tiny', 'Image to use for launch tests') -DEFINE_string('use_ipv6', True, 'use the ipv6 or not') +DEFINE_string('test_image', 'ami-tty', 'Image to use for launch tests') +DEFINE_bool('use_ipv6', False, 'use the ipv6 or not') diff --git a/smoketests/netadmin_smoketests.py b/smoketests/netadmin_smoketests.py new file mode 100644 index 000000000..38beb8fdc --- /dev/null +++ b/smoketests/netadmin_smoketests.py @@ -0,0 +1,194 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import commands +import os +import random +import sys +import time +import unittest + +# If ../nova/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): + sys.path.insert(0, possible_topdir) + +from smoketests import flags +from smoketests import base + + +FLAGS = flags.FLAGS + +TEST_PREFIX = 'test%s' % int(random.random() * 1000000) +TEST_BUCKET = '%s_bucket' % TEST_PREFIX +TEST_KEY = '%s_key' % TEST_PREFIX +TEST_GROUP = '%s_group' % TEST_PREFIX + + +class AddressTests(base.UserSmokeTestCase): + def test_000_setUp(self): + self.create_key_pair(self.conn, TEST_KEY) + reservation = self.conn.run_instances(FLAGS.test_image, + instance_type='m1.tiny', + key_name=TEST_KEY) + self.data['instance'] = reservation.instances[0] + if not self.wait_for_running(self.data['instance']): + self.fail('instance failed to start') + self.data['instance'].update() + if not self.wait_for_ping(self.data['instance'].private_dns_name): + self.fail('could not ping instance') + if not self.wait_for_ssh(self.data['instance'].private_dns_name, + TEST_KEY): + self.fail('could not ssh to instance') + + def test_001_can_allocate_floating_ip(self): + result = self.conn.allocate_address() + self.assertTrue(hasattr(result, 'public_ip')) + self.data['public_ip'] = result.public_ip + + def test_002_can_associate_ip_with_instance(self): + result = self.conn.associate_address(self.data['instance'].id, + self.data['public_ip']) + self.assertTrue(result) + + def test_003_can_ssh_with_public_ip(self): + ssh_authorized = False + groups = self.conn.get_all_security_groups(['default']) + for rule in groups[0].rules: + if (rule.ip_protocol == 'tcp' and + rule.from_port <= 22 and rule.to_port >= 22): + ssh_authorized = True + if not ssh_authorized: + self.conn.authorize_security_group('default', + ip_protocol='tcp', + from_port=22, + to_port=22) + try: + if not self.wait_for_ssh(self.data['public_ip'], TEST_KEY): + self.fail('could not ssh to public ip') + finally: + if not ssh_authorized: + self.conn.revoke_security_group('default', + ip_protocol='tcp', + from_port=22, + to_port=22) + + def test_004_can_disassociate_ip_from_instance(self): + result = self.conn.disassociate_address(self.data['public_ip']) + self.assertTrue(result) + + def test_005_can_deallocate_floating_ip(self): + result = self.conn.release_address(self.data['public_ip']) + self.assertTrue(result) + + def test_999_tearDown(self): + self.delete_key_pair(self.conn, TEST_KEY) + self.conn.terminate_instances([self.data['instance'].id]) + + +class SecurityGroupTests(base.UserSmokeTestCase): + + def __public_instance_is_accessible(self): + id_url = "latest/meta-data/instance-id" + options = "-s --max-time 1" + command = "curl %s %s/%s" % (options, self.data['public_ip'], id_url) + instance_id = commands.getoutput(command).strip() + if not instance_id: + return False + if instance_id != self.data['instance'].id: + raise Exception("Wrong instance id") + return True + + def test_001_can_create_security_group(self): + self.conn.create_security_group(TEST_GROUP, description='test') + + groups = self.conn.get_all_security_groups() + self.assertTrue(TEST_GROUP in [group.name for group in groups]) + + def test_002_can_launch_instance_in_security_group(self): + with open("proxy.sh") as f: + user_data = f.read() + self.create_key_pair(self.conn, TEST_KEY) + reservation = self.conn.run_instances(FLAGS.test_image, + key_name=TEST_KEY, + security_groups=[TEST_GROUP], + user_data=user_data, + instance_type='m1.tiny') + + self.data['instance'] = reservation.instances[0] + if not self.wait_for_running(self.data['instance']): + self.fail('instance failed to start') + self.data['instance'].update() + if not self.wait_for_ping(self.data['instance'].private_dns_name): + self.fail('could not ping instance') + if not self.wait_for_ssh(self.data['instance'].private_dns_name, + TEST_KEY): + self.fail('could not ssh to instance') + + def test_003_can_authorize_security_group_ingress(self): + self.assertTrue(self.conn.authorize_security_group(TEST_GROUP, + ip_protocol='tcp', + from_port=80, + to_port=80)) + + def test_004_can_access_metadata_over_public_ip(self): + result = self.conn.allocate_address() + self.assertTrue(hasattr(result, 'public_ip')) + self.data['public_ip'] = result.public_ip + + result = self.conn.associate_address(self.data['instance'].id, + self.data['public_ip']) + start_time = time.time() + try: + while not self.__public_instance_is_accessible(): + # 1 minute to launch + if time.time() - start_time > 60: + raise Exception("Timeout") + time.sleep(1) + finally: + result = self.conn.disassociate_address(self.data['public_ip']) + + def test_005_can_revoke_security_group_ingress(self): + self.assertTrue(self.conn.revoke_security_group(TEST_GROUP, + ip_protocol='tcp', + from_port=80, + to_port=80)) + start_time = time.time() + while self.__public_instance_is_accessible(): + # 1 minute to teardown + if time.time() - start_time > 60: + raise Exception("Timeout") + time.sleep(1) + + def test_999_tearDown(self): + self.conn.delete_key_pair(TEST_KEY) + self.conn.delete_security_group(TEST_GROUP) + groups = self.conn.get_all_security_groups() + self.assertFalse(TEST_GROUP in [group.name for group in groups]) + self.conn.terminate_instances([self.data['instance'].id]) + self.assertTrue(self.conn.release_address(self.data['public_ip'])) + + +if __name__ == "__main__": + suites = {'address': unittest.makeSuite(AddressTests), + 'security_group': unittest.makeSuite(SecurityGroupTests) + } + sys.exit(base.run_tests(suites)) diff --git a/smoketests/proxy.sh b/smoketests/proxy.sh new file mode 100755 index 000000000..9b3f3108a --- /dev/null +++ b/smoketests/proxy.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +# This is a simple shell script that uses netcat to set up a proxy to the +# metadata server on port 80 and to a google ip on port 8080. This is meant +# to be passed in by a script to an instance via user data, so that +# automatic testing of network connectivity can be performed. + +# Example usage: +# euca-run-instances -t m1.tiny -f proxy.sh ami-tty + +mkfifo backpipe1 +mkfifo backpipe2 + +# NOTE(vish): proxy metadata on port 80 +while true; do + nc -l -p 80 0<backpipe1 | nc 169.254.169.254 80 1>backpipe1 +done & + +# NOTE(vish): proxy google on port 8080 +while true; do + nc -l -p 8080 0<backpipe2 | nc 74.125.19.99 80 1>backpipe2 +done & diff --git a/smoketests/public_network_smoketests.py b/smoketests/public_network_smoketests.py index bfc2b20ba..5a4c67642 100644 --- a/smoketests/public_network_smoketests.py +++ b/smoketests/public_network_smoketests.py @@ -24,9 +24,16 @@ import sys import time import unittest +# If ../nova/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): + sys.path.insert(0, possible_topdir) + from smoketests import flags from smoketests import base -from smoketests import user_smoketests #Note that this test should run from #public network (outside of private network segments) @@ -42,7 +49,7 @@ TEST_KEY2 = '%s_key2' % TEST_PREFIX TEST_DATA = {} -class InstanceTestsFromPublic(user_smoketests.UserSmokeTestCase): +class InstanceTestsFromPublic(base.UserSmokeTestCase): def test_001_can_create_keypair(self): key = self.create_key_pair(self.conn, TEST_KEY) self.assertEqual(key.name, TEST_KEY) diff --git a/smoketests/user_smoketests.py b/smoketests/sysadmin_smoketests.py index d5a3a7556..e3b84d3d3 100644 --- a/smoketests/user_smoketests.py +++ b/smoketests/sysadmin_smoketests.py @@ -19,7 +19,6 @@ import commands import os import random -import socket import sys import time import unittest @@ -36,10 +35,8 @@ from smoketests import flags from smoketests import base -SUITE_NAMES = '[image, instance, volume]' FLAGS = flags.FLAGS -flags.DEFINE_string('suite', None, 'Specific test suite to run ' + SUITE_NAMES) flags.DEFINE_string('bundle_kernel', 'openwrt-x86-vmlinuz', 'Local kernel file to use for bundling tests') flags.DEFINE_string('bundle_image', 'openwrt-x86-ext2.image', @@ -49,17 +46,7 @@ TEST_PREFIX = 'test%s' % int(random.random() * 1000000) TEST_BUCKET = '%s_bucket' % TEST_PREFIX TEST_KEY = '%s_key' % TEST_PREFIX TEST_GROUP = '%s_group' % TEST_PREFIX -TEST_DATA = {} - - -class UserSmokeTestCase(base.SmokeTestCase): - def setUp(self): - global TEST_DATA - self.conn = self.connection_for_env() - self.data = TEST_DATA - - -class ImageTests(UserSmokeTestCase): +class ImageTests(base.UserSmokeTestCase): def test_001_can_bundle_image(self): self.assertTrue(self.bundle_image(FLAGS.bundle_image)) @@ -91,7 +78,6 @@ class ImageTests(UserSmokeTestCase): break time.sleep(1) else: - print image.state self.assert_(False) # wasn't available within 10 seconds self.assert_(image.type == 'machine') @@ -133,7 +119,7 @@ class ImageTests(UserSmokeTestCase): self.assertTrue(self.delete_bundle_bucket(TEST_BUCKET)) -class InstanceTests(UserSmokeTestCase): +class InstanceTests(base.UserSmokeTestCase): def test_001_can_create_keypair(self): key = self.create_key_pair(self.conn, TEST_KEY) self.assertEqual(key.name, TEST_KEY) @@ -143,109 +129,44 @@ class InstanceTests(UserSmokeTestCase): key_name=TEST_KEY, instance_type='m1.tiny') self.assertEqual(len(reservation.instances), 1) - self.data['instance_id'] = reservation.instances[0].id + self.data['instance'] = reservation.instances[0] def test_003_instance_runs_within_60_seconds(self): - reservations = self.conn.get_all_instances([self.data['instance_id']]) - instance = reservations[0].instances[0] + instance = self.data['instance'] # allow 60 seconds to exit pending with IP - for x in xrange(60): - instance.update() - if instance.state == u'running': - break - time.sleep(1) - else: + if not self.wait_for_running(self.data['instance']): self.fail('instance failed to start') - ip = reservations[0].instances[0].private_dns_name + self.data['instance'].update() + ip = self.data['instance'].private_dns_name self.failIf(ip == '0.0.0.0') - self.data['private_ip'] = ip if FLAGS.use_ipv6: - ipv6 = reservations[0].instances[0].dns_name_v6 + ipv6 = self.data['instance'].dns_name_v6 self.failIf(ipv6 is None) - self.data['ip_v6'] = ipv6 def test_004_can_ping_private_ip(self): - for x in xrange(120): - # ping waits for 1 second - status, output = commands.getstatusoutput( - 'ping -c1 %s' % self.data['private_ip']) - if status == 0: - break - else: + if not self.wait_for_ping(self.data['instance'].private_dns_name): self.fail('could not ping instance') if FLAGS.use_ipv6: - for x in xrange(120): - # ping waits for 1 second - status, output = commands.getstatusoutput( - 'ping6 -c1 %s' % self.data['ip_v6']) - if status == 0: - break - else: - self.fail('could not ping instance') + if not self.wait_for_ping(self.data['instance'].ip_v6, "ping6"): + self.fail('could not ping instance v6') def test_005_can_ssh_to_private_ip(self): - for x in xrange(30): - try: - conn = self.connect_ssh(self.data['private_ip'], TEST_KEY) - conn.close() - except Exception: - time.sleep(1) - else: - break - else: + if not self.wait_for_ssh(self.data['instance'].private_dns_name, + TEST_KEY): self.fail('could not ssh to instance') if FLAGS.use_ipv6: - for x in xrange(30): - try: - conn = self.connect_ssh( - self.data['ip_v6'], TEST_KEY) - conn.close() - except Exception: - time.sleep(1) - else: - break - else: + if not self.wait_for_ssh(self.data['instance'].ip_v6, + TEST_KEY): self.fail('could not ssh to instance v6') - def test_006_can_allocate_elastic_ip(self): - result = self.conn.allocate_address() - self.assertTrue(hasattr(result, 'public_ip')) - self.data['public_ip'] = result.public_ip - - def test_007_can_associate_ip_with_instance(self): - result = self.conn.associate_address(self.data['instance_id'], - self.data['public_ip']) - self.assertTrue(result) - - def test_008_can_ssh_with_public_ip(self): - for x in xrange(30): - try: - conn = self.connect_ssh(self.data['public_ip'], TEST_KEY) - conn.close() - except socket.error: - time.sleep(1) - else: - break - else: - self.fail('could not ssh to instance') - - def test_009_can_disassociate_ip_from_instance(self): - result = self.conn.disassociate_address(self.data['public_ip']) - self.assertTrue(result) - - def test_010_can_deallocate_elastic_ip(self): - result = self.conn.release_address(self.data['public_ip']) - self.assertTrue(result) - def test_999_tearDown(self): self.delete_key_pair(self.conn, TEST_KEY) - if self.data.has_key('instance_id'): - self.conn.terminate_instances([self.data['instance_id']]) + self.conn.terminate_instances([self.data['instance'].id]) -class VolumeTests(UserSmokeTestCase): +class VolumeTests(base.UserSmokeTestCase): def setUp(self): super(VolumeTests, self).setUp() self.device = '/dev/vdb' @@ -255,55 +176,65 @@ class VolumeTests(UserSmokeTestCase): reservation = self.conn.run_instances(FLAGS.test_image, instance_type='m1.tiny', key_name=TEST_KEY) - instance = reservation.instances[0] - self.data['instance'] = instance - for x in xrange(120): - if self.can_ping(instance.private_dns_name): - break - else: - self.fail('unable to start instance') + self.data['instance'] = reservation.instances[0] + if not self.wait_for_running(self.data['instance']): + self.fail('instance failed to start') + self.data['instance'].update() + if not self.wait_for_ping(self.data['instance'].private_dns_name): + self.fail('could not ping instance') + if not self.wait_for_ssh(self.data['instance'].private_dns_name, + TEST_KEY): + self.fail('could not ssh to instance') def test_001_can_create_volume(self): volume = self.conn.create_volume(1, 'nova') self.assertEqual(volume.size, 1) self.data['volume'] = volume # Give network time to find volume. - time.sleep(5) + time.sleep(10) def test_002_can_attach_volume(self): volume = self.data['volume'] for x in xrange(10): - if volume.status == u'available': - break - time.sleep(5) volume.update() + if volume.status.startswith('available'): + break + time.sleep(1) else: self.fail('cannot attach volume with state %s' % volume.status) volume.attach(self.data['instance'].id, self.device) - # Volumes seems to report "available" too soon. + # wait for x in xrange(10): - if volume.status == u'in-use': - break - time.sleep(5) volume.update() + if volume.status.startswith('in-use'): + break + time.sleep(1) + else: + self.fail('volume never got to in use') - self.assertEqual(volume.status, u'in-use') + self.assertTrue(volume.status.startswith('in-use')) # Give instance time to recognize volume. - time.sleep(5) + time.sleep(10) def test_003_can_mount_volume(self): ip = self.data['instance'].private_dns_name conn = self.connect_ssh(ip, TEST_KEY) - commands = [] - commands.append('mkdir -p /mnt/vol') - commands.append('mkfs.ext2 %s' % self.device) - commands.append('mount %s /mnt/vol' % self.device) - commands.append('echo success') - stdin, stdout, stderr = conn.exec_command(' && '.join(commands)) + # NOTE(vish): this will create an dev for images that don't have + # udev rules + stdin, stdout, stderr = conn.exec_command( + 'grep %s /proc/partitions | ' + '`awk \'{print "mknod /dev/"\\$4" b "\\$1" "\\$2}\'`' + % self.device.rpartition('/')[2]) + exec_list = [] + exec_list.append('mkdir -p /mnt/vol') + exec_list.append('/sbin/mke2fs %s' % self.device) + exec_list.append('mount %s /mnt/vol' % self.device) + exec_list.append('echo success') + stdin, stdout, stderr = conn.exec_command(' && '.join(exec_list)) out = stdout.read() conn.close() if not out.strip().endswith('success'): @@ -327,7 +258,7 @@ class VolumeTests(UserSmokeTestCase): "df -h | grep %s | awk {'print $2'}" % self.device) out = stdout.read() conn.close() - if not out.strip() == '1008M': + if not out.strip() == '1007.9M': self.fail('Volume is not the right size: %s %s' % (out, stderr.read())) @@ -354,79 +285,9 @@ class VolumeTests(UserSmokeTestCase): self.conn.delete_key_pair(TEST_KEY) -class SecurityGroupTests(UserSmokeTestCase): - - def __public_instance_is_accessible(self): - id_url = "latest/meta-data/instance-id" - options = "-s --max-time 1" - command = "curl %s %s/%s" % (options, self.data['public_ip'], id_url) - instance_id = commands.getoutput(command).strip() - if not instance_id: - return False - if instance_id != self.data['instance_id']: - raise Exception("Wrong instance id") - return True - - def test_001_can_create_security_group(self): - self.conn.create_security_group(TEST_GROUP, description='test') - - groups = self.conn.get_all_security_groups() - self.assertTrue(TEST_GROUP in [group.name for group in groups]) - - def test_002_can_launch_instance_in_security_group(self): - self.create_key_pair(self.conn, TEST_KEY) - reservation = self.conn.run_instances(FLAGS.test_image, - key_name=TEST_KEY, - security_groups=[TEST_GROUP], - instance_type='m1.tiny') - - self.data['instance_id'] = reservation.instances[0].id - - def test_003_can_authorize_security_group_ingress(self): - self.assertTrue(self.conn.authorize_security_group(TEST_GROUP, - ip_protocol='tcp', - from_port=80, - to_port=80)) - - def test_004_can_access_instance_over_public_ip(self): - result = self.conn.allocate_address() - self.assertTrue(hasattr(result, 'public_ip')) - self.data['public_ip'] = result.public_ip - - result = self.conn.associate_address(self.data['instance_id'], - self.data['public_ip']) - start_time = time.time() - while not self.__public_instance_is_accessible(): - # 1 minute to launch - if time.time() - start_time > 60: - raise Exception("Timeout") - time.sleep(1) - - def test_005_can_revoke_security_group_ingress(self): - self.assertTrue(self.conn.revoke_security_group(TEST_GROUP, - ip_protocol='tcp', - from_port=80, - to_port=80)) - start_time = time.time() - while self.__public_instance_is_accessible(): - # 1 minute to teardown - if time.time() - start_time > 60: - raise Exception("Timeout") - time.sleep(1) - - def test_999_tearDown(self): - self.conn.delete_key_pair(TEST_KEY) - self.conn.delete_security_group(TEST_GROUP) - groups = self.conn.get_all_security_groups() - self.assertFalse(TEST_GROUP in [group.name for group in groups]) - self.conn.terminate_instances([self.data['instance_id']]) - self.assertTrue(self.conn.release_address(self.data['public_ip'])) - - if __name__ == "__main__": suites = {'image': unittest.makeSuite(ImageTests), 'instance': unittest.makeSuite(InstanceTests), - 'security_group': unittest.makeSuite(SecurityGroupTests), 'volume': unittest.makeSuite(VolumeTests) } sys.exit(base.run_tests(suites)) diff --git a/tools/euca-get-ajax-console b/tools/euca-get-ajax-console index 37060e74f..e407dd566 100755 --- a/tools/euca-get-ajax-console +++ b/tools/euca-get-ajax-console @@ -35,7 +35,7 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): import boto import nova from boto.ec2.connection import EC2Connection -from euca2ools import Euca2ool, InstanceValidationError, Util, ConnectionFailed +from euca2ools import Euca2ool, InstanceValidationError, Util usage_string = """ Retrieves a url to an ajax console terminal @@ -147,7 +147,7 @@ def main(): try: euca_conn = euca.make_connection() - except ConnectionFailed, e: + except Exception, e: print e.message sys.exit(1) try: diff --git a/tools/pip-requires b/tools/pip-requires index 3587df644..3c9047e04 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -10,6 +10,7 @@ boto==1.9b carrot==0.10.5 eventlet==0.9.12 lockfile==0.8 +python-novaclient==2.3 python-daemon==1.5.5 python-gflags==1.3 redis==2.0.0 |