From 9bd72f56224a8cc980620b17210d9b9b9ede6166 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 18 Jan 2011 18:33:04 -0800 Subject: various fixes to smoketests, including allowing admin tests to run as a user, better timing, and allowing volume tests to run on non-udev linux --- smoketests/flags.py | 2 +- smoketests/user_smoketests.py | 28 ++++++++++++++++++++-------- 2 files changed, 21 insertions(+), 9 deletions(-) diff --git a/smoketests/flags.py b/smoketests/flags.py index 35f432a77..9dc310692 100644 --- a/smoketests/flags.py +++ b/smoketests/flags.py @@ -35,5 +35,5 @@ DEFINE_bool = DEFINE_bool # http://code.google.com/p/python-gflags/source/browse/trunk/gflags.py#39 DEFINE_string('region', 'nova', 'Region to use') -DEFINE_string('test_image', 'ami-tiny', 'Image to use for launch tests') +DEFINE_string('test_image', 'ami-tty', 'Image to use for launch tests') DEFINE_string('use_ipv6', True, 'use the ipv6 or not') diff --git a/smoketests/user_smoketests.py b/smoketests/user_smoketests.py index d5a3a7556..f73ab7e1c 100644 --- a/smoketests/user_smoketests.py +++ b/smoketests/user_smoketests.py @@ -258,10 +258,15 @@ class VolumeTests(UserSmokeTestCase): instance = reservation.instances[0] self.data['instance'] = instance for x in xrange(120): - if self.can_ping(instance.private_dns_name): + time.sleep(1) + instance.update() + #if self.can_ping(instance.private_dns_name): + if instance.state == u'running': break else: self.fail('unable to start instance') + time.sleep(10) + instance.update() def test_001_can_create_volume(self): volume = self.conn.create_volume(1, 'nova') @@ -273,10 +278,11 @@ class VolumeTests(UserSmokeTestCase): def test_002_can_attach_volume(self): volume = self.data['volume'] - for x in xrange(10): - if volume.status == u'available': + for x in xrange(30): + print volume.status + if volume.status.startswith('available'): break - time.sleep(5) + time.sleep(1) volume.update() else: self.fail('cannot attach volume with state %s' % volume.status) @@ -285,12 +291,12 @@ class VolumeTests(UserSmokeTestCase): # Volumes seems to report "available" too soon. for x in xrange(10): - if volume.status == u'in-use': + if volume.status.startswith('in-use'): break time.sleep(5) volume.update() - self.assertEqual(volume.status, u'in-use') + self.assertTrue(volume.status.startswith('in-use')) # Give instance time to recognize volume. time.sleep(5) @@ -298,9 +304,15 @@ class VolumeTests(UserSmokeTestCase): def test_003_can_mount_volume(self): ip = self.data['instance'].private_dns_name conn = self.connect_ssh(ip, TEST_KEY) + # NOTE(vish): this will create an dev for images that don't have + # udev rules + stdin, stdout, stderr = conn.exec_command( + 'grep %s /proc/partitions | ' + '`awk \'{print "mknod /dev/"\\$4" b "\\$1" "\\$2}\'`' + % self.device.rpartition('/')[2]) commands = [] commands.append('mkdir -p /mnt/vol') - commands.append('mkfs.ext2 %s' % self.device) + commands.append('/sbin/mke2fs %s' % self.device) commands.append('mount %s /mnt/vol' % self.device) commands.append('echo success') stdin, stdout, stderr = conn.exec_command(' && '.join(commands)) @@ -327,7 +339,7 @@ class VolumeTests(UserSmokeTestCase): "df -h | grep %s | awk {'print $2'}" % self.device) out = stdout.read() conn.close() - if not out.strip() == '1008M': + if not out.strip() == '1007.9M': self.fail('Volume is not the right size: %s %s' % (out, stderr.read())) -- cgit From 2f4258d99e8d97ec70645cd2df2f4e54dc869e89 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 20 Jan 2011 00:14:42 -0800 Subject: more smoketest fixes --- nova/__init__.py | 2 -- smoketests/base.py | 7 +++++-- smoketests/flags.py | 2 +- smoketests/user_smoketests.py | 26 ++++++++++++++++---------- 4 files changed, 22 insertions(+), 15 deletions(-) diff --git a/nova/__init__.py b/nova/__init__.py index 8745617bc..256db55a9 100644 --- a/nova/__init__.py +++ b/nova/__init__.py @@ -30,5 +30,3 @@ .. moduleauthor:: Manish Singh .. moduleauthor:: Andy Smith """ - -from exception import * diff --git a/smoketests/base.py b/smoketests/base.py index 610270c5c..89ee92840 100644 --- a/smoketests/base.py +++ b/smoketests/base.py @@ -17,12 +17,10 @@ # under the License. import boto -import boto_v6 import commands import httplib import os import paramiko -import random import sys import unittest from boto.ec2.regioninfo import RegionInfo @@ -30,6 +28,8 @@ from boto.ec2.regioninfo import RegionInfo from smoketests import flags FLAGS = flags.FLAGS +boto_v6 = None + class SmokeTestCase(unittest.TestCase): @@ -146,6 +146,9 @@ class SmokeTestCase(unittest.TestCase): def run_tests(suites): argv = FLAGS(sys.argv) + if FLAGS.use_ipv6: + global boto_v6 + boto_v6 = __import__('boto_v6') if not os.getenv('EC2_ACCESS_KEY'): print >> sys.stderr, 'Missing EC2 environment variables. Please ' \ diff --git a/smoketests/flags.py b/smoketests/flags.py index 9dc310692..dc756347b 100644 --- a/smoketests/flags.py +++ b/smoketests/flags.py @@ -36,4 +36,4 @@ DEFINE_bool = DEFINE_bool DEFINE_string('region', 'nova', 'Region to use') DEFINE_string('test_image', 'ami-tty', 'Image to use for launch tests') -DEFINE_string('use_ipv6', True, 'use the ipv6 or not') +DEFINE_bool('use_ipv6', True, 'use the ipv6 or not') diff --git a/smoketests/user_smoketests.py b/smoketests/user_smoketests.py index f73ab7e1c..e5bc98ede 100644 --- a/smoketests/user_smoketests.py +++ b/smoketests/user_smoketests.py @@ -189,8 +189,8 @@ class InstanceTests(UserSmokeTestCase): try: conn = self.connect_ssh(self.data['private_ip'], TEST_KEY) conn.close() - except Exception: - time.sleep(1) + except Exception, e: + time.sleep(5) else: break else: @@ -224,7 +224,7 @@ class InstanceTests(UserSmokeTestCase): try: conn = self.connect_ssh(self.data['public_ip'], TEST_KEY) conn.close() - except socket.error: + except Exception: time.sleep(1) else: break @@ -256,17 +256,24 @@ class VolumeTests(UserSmokeTestCase): instance_type='m1.tiny', key_name=TEST_KEY) instance = reservation.instances[0] - self.data['instance'] = instance for x in xrange(120): time.sleep(1) instance.update() - #if self.can_ping(instance.private_dns_name): - if instance.state == u'running': + if self.can_ping(instance.private_dns_name): break else: self.fail('unable to start instance') - time.sleep(10) - instance.update() + self.data['instance'] = instance + for x in xrange(30): + try: + conn = self.connect_ssh(instance.private_dns_name, TEST_KEY) + conn.close() + except Exception: + time.sleep(5) + else: + break + else: + self.fail('could not ssh to instance') def test_001_can_create_volume(self): volume = self.conn.create_volume(1, 'nova') @@ -279,7 +286,6 @@ class VolumeTests(UserSmokeTestCase): volume = self.data['volume'] for x in xrange(30): - print volume.status if volume.status.startswith('available'): break time.sleep(1) @@ -438,7 +444,7 @@ class SecurityGroupTests(UserSmokeTestCase): if __name__ == "__main__": suites = {'image': unittest.makeSuite(ImageTests), 'instance': unittest.makeSuite(InstanceTests), - 'security_group': unittest.makeSuite(SecurityGroupTests), + #'security_group': unittest.makeSuite(SecurityGroupTests), 'volume': unittest.makeSuite(VolumeTests) } sys.exit(base.run_tests(suites)) -- cgit From 089bdfa8c2f0f116b55c69bbcde6fca6632cb145 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 20 Jan 2011 15:20:23 -0600 Subject: should be writing some kindof network info to the xenstore now, hopefully --- nova/db/api.py | 5 +++++ nova/db/sqlalchemy/api.py | 6 ++++++ nova/db/sqlalchemy/models.py | 1 + nova/virt/xenapi/vmops.py | 39 +++++++++++++++++++++++++++++++-------- 4 files changed, 43 insertions(+), 8 deletions(-) diff --git a/nova/db/api.py b/nova/db/api.py index f9d561587..7b37bce2f 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -501,6 +501,11 @@ def network_get(context, network_id): return IMPL.network_get(context, network_id) +def network_get_all(context): + """Get all networks""" + returm IMPL.network_get_all(context) + + # pylint: disable-msg=C0103 def network_get_associated_fixed_ips(context, network_id): """Get all network's ips that have been associated.""" diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index b63b84bed..053780158 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1054,6 +1054,12 @@ def network_get(context, network_id, session=None): return result +@require_context +def network_get_all(context): + session = get_session() + return session.query(models.Network).all() + + # NOTE(vish): pylint complains because of the long method name, but # it fits with the names of the rest of the methods # pylint: disable-msg=C0103 diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index c54ebe3ba..dc476acf4 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -369,6 +369,7 @@ class Network(BASE, NovaBase): "vpn_public_port"), {'mysql_engine': 'InnoDB'}) id = Column(Integer, primary_key=True) + label = Column(String(255)) injected = Column(Boolean, default=False) cidr = Column(String(255), unique=True) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 6c2fd6a68..882b9d9d6 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -67,11 +67,6 @@ class VMOps(object): raise exception.Duplicate(_('Attempted to create' ' non-unique name %s') % instance.name) - bridge = db.network_get_by_instance(context.get_admin_context(), - instance['id'])['bridge'] - network_ref = \ - NetworkHelper.find_network_with_bridge(self._session, bridge) - user = AuthManager().get_user(instance.user_id) project = AuthManager().get_project(instance.project_id) #if kernel is not present we must download a raw disk @@ -99,9 +94,29 @@ class VMOps(object): instance, kernel, ramdisk, pv_kernel) VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True) - if network_ref: - VMHelper.create_vif(self._session, vm_ref, - network_ref, instance.mac_address) + # write network info + network = db.network_get_by_instance(context.get_admin_context(), + instance['id']) + for network in db.network_get_all(): + mapping = {'label': network['label'], + 'gateway': network['gateway'], + 'mac': instance.mac_address, + 'dns': network['dns'], + 'ips': [{'netmask': network['netmask'], + 'enabled': '1', + 'ip': 192.168.3.3}]} # <===== CHANGE!!!! + self.write_network_config_to_xenstore(vm_ref, mapping) + + bridge = network['bridge'] + network_ref = \ + NetworkHelper.find_network_with_bridge(self._session, bridge) + + if network_ref: + VMHelper.create_vif(self._session, vm_ref, + network_ref, instance.mac_address) + + # call reset networking + LOG.debug(_('Starting VM %s...'), vm_ref) self._session.call_xenapi('VM.start', vm_ref, False, False) LOG.info(_('Spawning VM %s created %s.'), instance.name, vm_ref) @@ -341,6 +356,14 @@ class VMOps(object): # TODO: implement this! return 'http://fakeajaxconsole/fake_url' + def reset_networking(self, instance): + vm = self._get_vm_opaque_ref(instance) + self.write_to_xenstore(vm, "resetnetwork", "") + + def write_network_config_to_xenstore(self, instance): + vm = self._get_vm_opaque_ref(instance) + self.write_to_param_xenstore(vm, mapping) + def list_from_xenstore(self, vm, path): """Runs the xenstore-ls command to get a listing of all records from 'path' downward. Returns a dict with the sub-paths as keys, -- cgit From 9b993d50835c79d23dca422335de362ebaf7f4fa Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 20 Jan 2011 15:47:08 -0600 Subject: added plugin call for resetnetworking --- nova/virt/xenapi/vmops.py | 3 ++- plugins/xenserver/xenapi/etc/xapi.d/plugins/agent | 12 ++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 882b9d9d6..7f9e78df5 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -358,7 +358,8 @@ class VMOps(object): def reset_networking(self, instance): vm = self._get_vm_opaque_ref(instance) - self.write_to_xenstore(vm, "resetnetwork", "") + args = {'id': str(uuid.uuid4())} + resp = self._make_agent_call('resetnetwork', vm, '', args) def write_network_config_to_xenstore(self, instance): vm = self._get_vm_opaque_ref(instance) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent index 12c3a19c8..5c5ec7c45 100755 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent @@ -91,6 +91,18 @@ def password(self, arg_dict): return resp +@jsonify +def resetnetwork(self, arg_dict): + """ + writes a resquest to xenstore that tells the agent to reset the networking + + """ + arg_dict['value'] = json.dumps({'name': 'resetnetwork', 'value': ''}) + request_id = arg_dict['id'] + arg_dict['path'] = "data/host/%s" % request_id + xenstore.write_record(self, arg_dict) + + def _wait_for_agent(self, request_id, arg_dict): """Periodically checks xenstore for a response from the agent. The request is always written to 'data/host/{id}', and -- cgit From 8d1798008fcec536f1117a275b168ca449f1dfbf Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 20 Jan 2011 16:19:07 -0600 Subject: syntax error --- nova/db/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/db/api.py b/nova/db/api.py index 7b37bce2f..f22cd5615 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -503,7 +503,7 @@ def network_get(context, network_id): def network_get_all(context): """Get all networks""" - returm IMPL.network_get_all(context) + return IMPL.network_get_all(context) # pylint: disable-msg=C0103 -- cgit From f77043d44aa640e1811a3fe236fc8fd5dfecf990 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 20 Jan 2011 16:27:09 -0600 Subject: syntax --- nova/virt/xenapi/vmops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 7f9e78df5..1045d5d98 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -104,7 +104,7 @@ class VMOps(object): 'dns': network['dns'], 'ips': [{'netmask': network['netmask'], 'enabled': '1', - 'ip': 192.168.3.3}]} # <===== CHANGE!!!! + 'ip': '192.168.3.3'}]} # <===== CHANGE!!!! self.write_network_config_to_xenstore(vm_ref, mapping) bridge = network['bridge'] -- cgit From f38196b0eb7a11501f9b0ffa9409c05510798761 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 20 Jan 2011 18:08:01 -0600 Subject: added default label to nova-manage and create_networks --- bin/nova-manage | 5 +++-- nova/network/manager.py | 8 +++++++- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index d0901ddfc..38d36ab0f 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -442,7 +442,7 @@ class NetworkCommands(object): def create(self, fixed_range=None, num_networks=None, network_size=None, vlan_start=None, vpn_start=None, - fixed_range_v6=None): + fixed_range_v6=None, label='public'): """Creates fixed ips for host by range arguments: [fixed_range=FLAG], [num_networks=FLAG], [network_size=FLAG], [vlan_start=FLAG], @@ -463,7 +463,8 @@ class NetworkCommands(object): net_manager.create_networks(context.get_admin_context(), fixed_range, int(num_networks), int(network_size), int(vlan_start), - int(vpn_start), fixed_range_v6) + int(vpn_start), fixed_range_v6, + label) class ServiceCommands(object): diff --git a/nova/network/manager.py b/nova/network/manager.py index 61de8055a..a377c40c6 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -325,11 +325,12 @@ class FlatManager(NetworkManager): pass def create_networks(self, context, cidr, num_networks, network_size, - cidr_v6, *args, **kwargs): + cidr_v6, label, *args, **kwargs): """Create networks based on parameters.""" fixed_net = IPy.IP(cidr) fixed_net_v6 = IPy.IP(cidr_v6) significant_bits_v6 = 64 + count = 1 for index in range(num_networks): start = index * network_size significant_bits = 32 - int(math.log(network_size, 2)) @@ -342,6 +343,11 @@ class FlatManager(NetworkManager): net['gateway'] = str(project_net[1]) net['broadcast'] = str(project_net.broadcast()) net['dhcp_start'] = str(project_net[2]) + if num_networks > 1: + net['label'] = "%s_%d" % (label, count) + else: + net['label'] = label + count += 1 if(FLAGS.use_ipv6): cidr_v6 = "%s/%s" % (fixed_net_v6[0], significant_bits_v6) -- cgit From a9f9a0fcb7443b93db3f4de8f68218f20f0cc1a9 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 20 Jan 2011 18:36:18 -0600 Subject: really added migration for networks label --- .../sqlalchemy/migrate_repo/versions/003_cactus.py | 47 ++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py b/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py new file mode 100644 index 000000000..13b4766d8 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py @@ -0,0 +1,47 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +# +# New Tables +# + + +# +# Tables to alter +# + +networks_label = Column( + 'label', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + networks.create_column(networks_label) -- cgit From d4a643976adbe49ec52db53694481e9ba687cddf Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 20 Jan 2011 18:40:04 -0600 Subject: fixed the migration --- nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py b/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py index 13b4766d8..ddfe114cb 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py @@ -25,6 +25,11 @@ from nova import log as logging meta = MetaData() +networks = Table('networks', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + + # # New Tables # -- cgit From 7ef1c34c2251eb32ef2effa58ea7ee85f46112f7 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 20 Jan 2011 18:44:00 -0600 Subject: moved argument for label --- bin/nova-manage | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 38d36ab0f..73832b0eb 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -441,8 +441,8 @@ class NetworkCommands(object): """Class for managing networks.""" def create(self, fixed_range=None, num_networks=None, - network_size=None, vlan_start=None, vpn_start=None, - fixed_range_v6=None, label='public'): + network_size=None, label='public', vlan_start=None, + vpn_start=None, fixed_range_v6=None): """Creates fixed ips for host by range arguments: [fixed_range=FLAG], [num_networks=FLAG], [network_size=FLAG], [vlan_start=FLAG], -- cgit From 00b029f60baca843487b3cfd89940ed65e85389a Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 20 Jan 2011 18:51:46 -0600 Subject: undid moving argument --- bin/nova-manage | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 73832b0eb..9603c6a49 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -441,8 +441,8 @@ class NetworkCommands(object): """Class for managing networks.""" def create(self, fixed_range=None, num_networks=None, - network_size=None, label='public', vlan_start=None, - vpn_start=None, fixed_range_v6=None): + network_size=None, vlan_start=None, + vpn_start=None, fixed_range_v6=None, label='public'): """Creates fixed ips for host by range arguments: [fixed_range=FLAG], [num_networks=FLAG], [network_size=FLAG], [vlan_start=FLAG], -- cgit From e6b7fa7ae31e90f2d7322445da3843281fff9a70 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sat, 22 Jan 2011 16:20:36 -0800 Subject: fixes and refactoring of smoketests --- smoketests/base.py | 43 +++++++++++--- smoketests/flags.py | 2 +- smoketests/user_smoketests.py | 133 +++++++++++++----------------------------- 3 files changed, 77 insertions(+), 101 deletions(-) diff --git a/smoketests/base.py b/smoketests/base.py index 89ee92840..afc618074 100644 --- a/smoketests/base.py +++ b/smoketests/base.py @@ -22,6 +22,7 @@ import httplib import os import paramiko import sys +import time import unittest from boto.ec2.regioninfo import RegionInfo @@ -31,7 +32,6 @@ FLAGS = flags.FLAGS boto_v6 = None - class SmokeTestCase(unittest.TestCase): def connect_ssh(self, ip, key_name): # TODO(devcamcar): set a more reasonable connection timeout time @@ -39,12 +39,10 @@ class SmokeTestCase(unittest.TestCase): client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.WarningPolicy()) client.connect(ip, username='root', pkey=key) - stdin, stdout, stderr = client.exec_command('uptime') - print 'uptime: ', stdout.read() return client - def can_ping(self, ip): - """ Attempt to ping the specified IP, and give up after 1 second. """ + def can_ping(self, ip, command="ping"): + """Attempt to ping the specified IP, and give up after 1 second.""" # NOTE(devcamcar): ping timeout flag is different in OSX. if sys.platform == 'darwin': @@ -52,10 +50,41 @@ class SmokeTestCase(unittest.TestCase): else: timeout_flag = 'w' - status, output = commands.getstatusoutput('ping -c1 -%s1 %s' % - (timeout_flag, ip)) + status, output = commands.getstatusoutput('%s -c1 -%s1 %s' % + (command, timeout_flag, ip)) return status == 0 + def wait_for_running(self, instance, tries=60, wait=1): + """Wait for instance to be running""" + for x in xrange(tries): + instance.update() + if instance.state.startswith('running'): + return True + time.sleep(wait) + else: + return False + + def wait_for_ping(self, ip, command="ping", tries=120): + """Wait for ip to be pingable""" + for x in xrange(tries): + if self.can_ping(ip, command): + return True + else: + return False + + def wait_for_ssh(self, ip, key_name, tries=30, wait=5): + """Wait for ip to be sshable""" + for x in xrange(tries): + try: + conn = self.connect_ssh(ip, key_name) + conn.close() + except Exception, e: + time.sleep(wait) + else: + return True + else: + return False + def connection_for_env(self, **kwargs): """ Returns a boto ec2 connection for the current environment. diff --git a/smoketests/flags.py b/smoketests/flags.py index dc756347b..5f3c8505e 100644 --- a/smoketests/flags.py +++ b/smoketests/flags.py @@ -36,4 +36,4 @@ DEFINE_bool = DEFINE_bool DEFINE_string('region', 'nova', 'Region to use') DEFINE_string('test_image', 'ami-tty', 'Image to use for launch tests') -DEFINE_bool('use_ipv6', True, 'use the ipv6 or not') +DEFINE_bool('use_ipv6', False, 'use the ipv6 or not') diff --git a/smoketests/user_smoketests.py b/smoketests/user_smoketests.py index e5bc98ede..26f6344f7 100644 --- a/smoketests/user_smoketests.py +++ b/smoketests/user_smoketests.py @@ -19,7 +19,6 @@ import commands import os import random -import socket import sys import time import unittest @@ -91,7 +90,6 @@ class ImageTests(UserSmokeTestCase): break time.sleep(1) else: - print image.state self.assert_(False) # wasn't available within 10 seconds self.assert_(image.type == 'machine') @@ -143,70 +141,36 @@ class InstanceTests(UserSmokeTestCase): key_name=TEST_KEY, instance_type='m1.tiny') self.assertEqual(len(reservation.instances), 1) - self.data['instance_id'] = reservation.instances[0].id + self.data['instance'] = reservation.instances[0] def test_003_instance_runs_within_60_seconds(self): - reservations = self.conn.get_all_instances([self.data['instance_id']]) - instance = reservations[0].instances[0] + instance = self.data['instance'] # allow 60 seconds to exit pending with IP - for x in xrange(60): - instance.update() - if instance.state == u'running': - break - time.sleep(1) - else: + if not self.wait_for_running(self.data['instance']): self.fail('instance failed to start') - ip = reservations[0].instances[0].private_dns_name + self.data['instance'].update() + ip = self.data['instance'].private_dns_name self.failIf(ip == '0.0.0.0') - self.data['private_ip'] = ip if FLAGS.use_ipv6: - ipv6 = reservations[0].instances[0].dns_name_v6 + ipv6 = self.data['instance'].dns_name_v6 self.failIf(ipv6 is None) - self.data['ip_v6'] = ipv6 def test_004_can_ping_private_ip(self): - for x in xrange(120): - # ping waits for 1 second - status, output = commands.getstatusoutput( - 'ping -c1 %s' % self.data['private_ip']) - if status == 0: - break - else: + if not self.wait_for_ping(self.data['instance'].private_dns_name): self.fail('could not ping instance') if FLAGS.use_ipv6: - for x in xrange(120): - # ping waits for 1 second - status, output = commands.getstatusoutput( - 'ping6 -c1 %s' % self.data['ip_v6']) - if status == 0: - break - else: - self.fail('could not ping instance') + if not self.wait_for_ping(self.data['instance'].ip_v6, "ping6"): + self.fail('could not ping instance v6') def test_005_can_ssh_to_private_ip(self): - for x in xrange(30): - try: - conn = self.connect_ssh(self.data['private_ip'], TEST_KEY) - conn.close() - except Exception, e: - time.sleep(5) - else: - break - else: + if not self.wait_for_ssh(self.data['instance'].private_dns_name, + TEST_KEY): self.fail('could not ssh to instance') if FLAGS.use_ipv6: - for x in xrange(30): - try: - conn = self.connect_ssh( - self.data['ip_v6'], TEST_KEY) - conn.close() - except Exception: - time.sleep(1) - else: - break - else: + if not self.wait_for_ssh(self.data['instance'].ip_v6, + TEST_KEY): self.fail('could not ssh to instance v6') def test_006_can_allocate_elastic_ip(self): @@ -215,21 +179,13 @@ class InstanceTests(UserSmokeTestCase): self.data['public_ip'] = result.public_ip def test_007_can_associate_ip_with_instance(self): - result = self.conn.associate_address(self.data['instance_id'], + result = self.conn.associate_address(self.data['instance'].id, self.data['public_ip']) self.assertTrue(result) def test_008_can_ssh_with_public_ip(self): - for x in xrange(30): - try: - conn = self.connect_ssh(self.data['public_ip'], TEST_KEY) - conn.close() - except Exception: - time.sleep(1) - else: - break - else: - self.fail('could not ssh to instance') + if not self.wait_for_ssh(self.data['public_ip'], TEST_KEY): + self.fail('could not ssh to public ip') def test_009_can_disassociate_ip_from_instance(self): result = self.conn.disassociate_address(self.data['public_ip']) @@ -241,8 +197,7 @@ class InstanceTests(UserSmokeTestCase): def test_999_tearDown(self): self.delete_key_pair(self.conn, TEST_KEY) - if self.data.has_key('instance_id'): - self.conn.terminate_instances([self.data['instance_id']]) + self.conn.terminate_instances([self.data['instance'].id]) class VolumeTests(UserSmokeTestCase): @@ -255,24 +210,14 @@ class VolumeTests(UserSmokeTestCase): reservation = self.conn.run_instances(FLAGS.test_image, instance_type='m1.tiny', key_name=TEST_KEY) - instance = reservation.instances[0] - for x in xrange(120): - time.sleep(1) - instance.update() - if self.can_ping(instance.private_dns_name): - break - else: - self.fail('unable to start instance') - self.data['instance'] = instance - for x in xrange(30): - try: - conn = self.connect_ssh(instance.private_dns_name, TEST_KEY) - conn.close() - except Exception: - time.sleep(5) - else: - break - else: + self.data['instance'] = reservation.instances[0] + if not self.wait_for_running(self.data['instance']): + self.fail('instance failed to start') + self.data['instance'].update() + if not self.wait_for_ping(self.data['instance'].private_dns_name): + self.fail('could not ping instance') + if not self.wait_for_ssh(self.data['instance'].private_dns_name, + TEST_KEY): self.fail('could not ssh to instance') def test_001_can_create_volume(self): @@ -280,32 +225,34 @@ class VolumeTests(UserSmokeTestCase): self.assertEqual(volume.size, 1) self.data['volume'] = volume # Give network time to find volume. - time.sleep(5) + time.sleep(10) def test_002_can_attach_volume(self): volume = self.data['volume'] - for x in xrange(30): + for x in xrange(10): + volume.update() if volume.status.startswith('available'): break time.sleep(1) - volume.update() else: self.fail('cannot attach volume with state %s' % volume.status) volume.attach(self.data['instance'].id, self.device) - # Volumes seems to report "available" too soon. + # wait for x in xrange(10): + volume.update() if volume.status.startswith('in-use'): break - time.sleep(5) - volume.update() + time.sleep(1) + else: + self.fail('volume never got to in use') self.assertTrue(volume.status.startswith('in-use')) # Give instance time to recognize volume. - time.sleep(5) + time.sleep(10) def test_003_can_mount_volume(self): ip = self.data['instance'].private_dns_name @@ -316,12 +263,12 @@ class VolumeTests(UserSmokeTestCase): 'grep %s /proc/partitions | ' '`awk \'{print "mknod /dev/"\\$4" b "\\$1" "\\$2}\'`' % self.device.rpartition('/')[2]) - commands = [] - commands.append('mkdir -p /mnt/vol') - commands.append('/sbin/mke2fs %s' % self.device) - commands.append('mount %s /mnt/vol' % self.device) - commands.append('echo success') - stdin, stdout, stderr = conn.exec_command(' && '.join(commands)) + exec_list = [] + exec_list.append('mkdir -p /mnt/vol') + exec_list.append('/sbin/mke2fs %s' % self.device) + exec_list.append('mount %s /mnt/vol' % self.device) + exec_list.append('echo success') + stdin, stdout, stderr = conn.exec_command(' && '.join(exec_list)) out = stdout.read() conn.close() if not out.strip().endswith('success'): -- cgit From 0c7893e4119bcccdfdfdcdef0931fcc8802688e8 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 26 Jan 2011 14:59:17 -0600 Subject: added mapping parameter to write_network_config_to_xenstore --- nova/virt/xenapi/vmops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index c7310987b..68fa1ecd6 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -391,7 +391,7 @@ class VMOps(object): args = {'id': str(uuid.uuid4())} resp = self._make_agent_call('resetnetwork', vm, '', args) - def write_network_config_to_xenstore(self, instance): + def write_network_config_to_xenstore(self, instance, mapping): vm = self._get_vm_opaque_ref(instance) self.write_to_param_xenstore(vm, mapping) -- cgit From fe3836c5ce16f7c4921eaee746c108d7ae7b4d1a Mon Sep 17 00:00:00 2001 From: Launchpad Translations on behalf of nova-core <> Date: Fri, 28 Jan 2011 05:21:04 +0000 Subject: Launchpad automatic translations update. --- locale/ast.po | 2130 +++++++++++++++++++++++++++++++++++++++++++++++++++++ locale/da.po | 2130 +++++++++++++++++++++++++++++++++++++++++++++++++++++ locale/es.po | 2177 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ locale/it.po | 2141 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ locale/ja.po | 2143 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ locale/pt_BR.po | 2148 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ locale/ru.po | 2136 +++++++++++++++++++++++++++++++++++++++++++++++++++++ locale/uk.po | 2130 +++++++++++++++++++++++++++++++++++++++++++++++++++++ locale/zh_CN.po | 2135 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 9 files changed, 19270 insertions(+) create mode 100644 locale/ast.po create mode 100644 locale/da.po create mode 100644 locale/es.po create mode 100644 locale/it.po create mode 100644 locale/ja.po create mode 100644 locale/pt_BR.po create mode 100644 locale/ru.po create mode 100644 locale/uk.po create mode 100644 locale/zh_CN.po diff --git a/locale/ast.po b/locale/ast.po new file mode 100644 index 000000000..c887bbc91 --- /dev/null +++ b/locale/ast.po @@ -0,0 +1,2130 @@ +# Asturian translation for nova +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the nova package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: nova\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2011-01-10 11:25-0800\n" +"PO-Revision-Date: 2011-01-12 19:50+0000\n" +"Last-Translator: Xuacu Saturio \n" +"Language-Team: Asturian \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Launchpad-Export-Date: 2011-01-28 05:20+0000\n" +"X-Generator: Launchpad (build 12177)\n" + +#: nova/crypto.py:46 +msgid "Filename of root CA" +msgstr "Nome del ficheru de l'autoridá de certificáu raíz" + +#: nova/crypto.py:49 +msgid "Filename of private key" +msgstr "Nome del ficheru de clave privada" + +#: nova/crypto.py:51 +msgid "Filename of root Certificate Revokation List" +msgstr "Nome del ficheru de llista de refugu de certificáu raíz" + +#: nova/crypto.py:53 +msgid "Where we keep our keys" +msgstr "" + +#: nova/crypto.py:55 +msgid "Where we keep our root CA" +msgstr "" + +#: nova/crypto.py:57 +msgid "Should we use a CA for each project?" +msgstr "" + +#: nova/crypto.py:61 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" + +#: nova/crypto.py:66 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "" + +#: nova/crypto.py:71 +#, python-format +msgid "Subject for certificate for vpns, %s for project, timestamp" +msgstr "" + +#: nova/crypto.py:258 +#, python-format +msgid "Flags path: %s" +msgstr "" + +#: nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "" + +#: nova/exception.py:36 +#, python-format +msgid "" +"%s\n" +"Command: %s\n" +"Exit code: %s\n" +"Stdout: %r\n" +"Stderr: %r" +msgstr "" + +#: nova/exception.py:86 +msgid "Uncaught exception" +msgstr "" + +#: nova/fakerabbit.py:48 +#, python-format +msgid "(%s) publish (key: %s) %s" +msgstr "" + +#: nova/fakerabbit.py:53 +#, python-format +msgid "Publishing to route %s" +msgstr "" + +#: nova/fakerabbit.py:83 +#, python-format +msgid "Declaring queue %s" +msgstr "" + +#: nova/fakerabbit.py:89 +#, python-format +msgid "Declaring exchange %s" +msgstr "" + +#: nova/fakerabbit.py:95 +#, python-format +msgid "Binding %s to %s with key %s" +msgstr "" + +#: nova/fakerabbit.py:120 +#, python-format +msgid "Getting from %s: %s" +msgstr "" + +#: nova/rpc.py:92 +#, python-format +msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +msgstr "" + +#: nova/rpc.py:99 +#, python-format +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "" + +#: nova/rpc.py:118 +msgid "Reconnected to queue" +msgstr "" + +#: nova/rpc.py:125 +msgid "Failed to fetch message from queue" +msgstr "" + +#: nova/rpc.py:155 +#, python-format +msgid "Initing the Adapter Consumer for %s" +msgstr "" + +#: nova/rpc.py:170 +#, python-format +msgid "received %s" +msgstr "" + +#: nova/rpc.py:183 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: nova/rpc.py:184 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: nova/rpc.py:245 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: nova/rpc.py:286 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: nova/rpc.py:305 +msgid "Making asynchronous call..." +msgstr "" + +#: nova/rpc.py:308 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: nova/rpc.py:356 +#, python-format +msgid "response %s" +msgstr "" + +#: nova/rpc.py:365 +#, python-format +msgid "topic is %s" +msgstr "" + +#: nova/rpc.py:366 +#, python-format +msgid "message %s" +msgstr "" + +#: nova/service.py:157 +#, python-format +msgid "Starting %s node" +msgstr "" + +#: nova/service.py:169 +msgid "Service killed that has no database entry" +msgstr "" + +#: nova/service.py:190 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: nova/service.py:202 +msgid "Recovered model server connection!" +msgstr "" + +#: nova/service.py:208 +msgid "model server went away" +msgstr "" + +#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#, python-format +msgid "Data store %s is unreachable. Trying again in %d seconds." +msgstr "" + +#: nova/service.py:232 nova/twistd.py:232 +#, python-format +msgid "Serving %s" +msgstr "" + +#: nova/service.py:234 nova/twistd.py:264 +msgid "Full set of FLAGS:" +msgstr "" + +#: nova/twistd.py:211 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "" + +#: nova/twistd.py:268 +#, python-format +msgid "Starting %s" +msgstr "" + +#: nova/utils.py:53 +#, python-format +msgid "Inner Exception: %s" +msgstr "" + +#: nova/utils.py:54 +#, python-format +msgid "Class %s cannot be found" +msgstr "" + +#: nova/utils.py:113 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: nova/utils.py:125 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: nova/utils.py:138 +#, python-format +msgid "Result was %s" +msgstr "" + +#: nova/utils.py:171 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: nova/utils.py:176 +#, python-format +msgid "Running %s" +msgstr "" + +#: nova/utils.py:207 +#, python-format +msgid "Couldn't get IP, using 127.0.0.1 %s" +msgstr "" + +#: nova/utils.py:289 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: nova/utils.py:300 +#, python-format +msgid "backend %s" +msgstr "" + +#: nova/api/ec2/__init__.py:133 +msgid "Too many failed authentications." +msgstr "" + +#: nova/api/ec2/__init__.py:142 +#, python-format +msgid "" +"Access key %s has had %d failed authentications and will be locked out for " +"%d minutes." +msgstr "" + +#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#, python-format +msgid "Authentication Failure: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:190 +#, python-format +msgid "Authenticated Request For %s:%s)" +msgstr "" + +#: nova/api/ec2/__init__.py:227 +#, python-format +msgid "action: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:229 +#, python-format +msgid "arg: %s\t\tval: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:301 +#, python-format +msgid "Unauthorized request for controller=%s and action=%s" +msgstr "" + +#: nova/api/ec2/__init__.py:339 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:342 +#, python-format +msgid "ApiError raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:349 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:354 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: nova/api/ec2/admin.py:84 +#, python-format +msgid "Creating new user: %s" +msgstr "" + +#: nova/api/ec2/admin.py:92 +#, python-format +msgid "Deleting user: %s" +msgstr "" + +#: nova/api/ec2/admin.py:114 +#, python-format +msgid "Adding role %s to user %s for project %s" +msgstr "" + +#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#, python-format +msgid "Adding sitewide role %s to user %s" +msgstr "" + +#: nova/api/ec2/admin.py:122 +#, python-format +msgid "Removing role %s from user %s for project %s" +msgstr "" + +#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#, python-format +msgid "Removing sitewide role %s from user %s" +msgstr "" + +#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 +msgid "operation must be add or remove" +msgstr "" + +#: nova/api/ec2/admin.py:142 +#, python-format +msgid "Getting x509 for user: %s on project: %s" +msgstr "" + +#: nova/api/ec2/admin.py:159 +#, python-format +msgid "Create project %s managed by %s" +msgstr "" + +#: nova/api/ec2/admin.py:170 +#, python-format +msgid "Delete project: %s" +msgstr "" + +#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#, python-format +msgid "Adding user %s to project %s" +msgstr "" + +#: nova/api/ec2/admin.py:188 +#, python-format +msgid "Removing user %s from project %s" +msgstr "" + +#: nova/api/ec2/apirequest.py:95 +#, python-format +msgid "Unsupported API request: controller = %s,action = %s" +msgstr "" + +#: nova/api/ec2/cloud.py:117 +#, python-format +msgid "Generating root CA: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:277 +#, python-format +msgid "Create key pair %s" +msgstr "" + +#: nova/api/ec2/cloud.py:285 +#, python-format +msgid "Delete key pair %s" +msgstr "" + +#: nova/api/ec2/cloud.py:357 +#, python-format +msgid "%s is not a valid ipProtocol" +msgstr "" + +#: nova/api/ec2/cloud.py:361 +msgid "Invalid port range" +msgstr "" + +#: nova/api/ec2/cloud.py:392 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 +msgid "No rule for the specified parameters." +msgstr "" + +#: nova/api/ec2/cloud.py:421 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: nova/api/ec2/cloud.py:432 +#, python-format +msgid "This rule already exists in group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:460 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:463 +#, python-format +msgid "group %s already exists" +msgstr "" + +#: nova/api/ec2/cloud.py:475 +#, python-format +msgid "Delete security group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 +#, python-format +msgid "Get console output for instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:543 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: nova/api/ec2/cloud.py:567 +#, python-format +msgid "Attach volume %s to instacne %s at %s" +msgstr "" + +#: nova/api/ec2/cloud.py:579 +#, python-format +msgid "Detach volume %s" +msgstr "" + +#: nova/api/ec2/cloud.py:686 +msgid "Allocate address" +msgstr "" + +#: nova/api/ec2/cloud.py:691 +#, python-format +msgid "Release address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:696 +#, python-format +msgid "Associate address %s to instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:703 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:730 +msgid "Going to start terminating instances" +msgstr "" + +#: nova/api/ec2/cloud.py:738 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: nova/api/ec2/cloud.py:775 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: nova/api/ec2/cloud.py:783 +#, python-format +msgid "Registered image %s with id %s" +msgstr "" + +#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#, python-format +msgid "attribute not supported: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:794 +#, python-format +msgid "invalid id: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:807 +msgid "user or group not specified" +msgstr "" + +#: nova/api/ec2/cloud.py:809 +msgid "only group \"all\" is supported" +msgstr "" + +#: nova/api/ec2/cloud.py:811 +msgid "operation_type must be add or remove" +msgstr "" + +#: nova/api/ec2/cloud.py:812 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: nova/api/ec2/metadatarequesthandler.py:75 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:70 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:86 +msgid "Including admin operations in API." +msgstr "" + +#: nova/api/openstack/servers.py:184 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:199 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: nova/api/openstack/servers.py:213 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:224 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: nova/api/openstack/servers.py:235 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: nova/api/openstack/servers.py:246 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: nova/api/openstack/servers.py:257 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" +msgstr "" + +#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "" + +#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#, python-format +msgid "Project can't be created because project %s already exists" +msgstr "" + +#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#, python-format +msgid "Project can't be modified because manager %s doesn't exist" +msgstr "" + +#: nova/auth/dbdriver.py:245 +#, python-format +msgid "User \"%s\" not found" +msgstr "" + +#: nova/auth/dbdriver.py:248 +#, python-format +msgid "Project \"%s\" not found" +msgstr "" + +#: nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: nova/auth/ldapdriver.py:181 +#, python-format +msgid "LDAP object for %s doesn't exist" +msgstr "" + +#: nova/auth/ldapdriver.py:218 +#, python-format +msgid "Project can't be created because user %s doesn't exist" +msgstr "" + +#: nova/auth/ldapdriver.py:478 +#, python-format +msgid "User %s is already a member of the group %s" +msgstr "" + +#: nova/auth/ldapdriver.py:507 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: nova/auth/ldapdriver.py:528 +#, python-format +msgid "Group at dn %s doesn't exist" +msgstr "" + +#: nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" +msgstr "" + +#: nova/auth/manager.py:270 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: nova/auth/manager.py:275 +#, python-format +msgid "failed authorization: no project named %s (user=%s)" +msgstr "" + +#: nova/auth/manager.py:277 +#, python-format +msgid "No project called %s could be found" +msgstr "" + +#: nova/auth/manager.py:281 +#, python-format +msgid "Failed authorization: user %s not admin and not member of project %s" +msgstr "" + +#: nova/auth/manager.py:283 +#, python-format +msgid "User %s is not a member of project %s" +msgstr "" + +#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: nova/auth/manager.py:293 nova/auth/manager.py:304 +msgid "Signature does not match" +msgstr "" + +#: nova/auth/manager.py:374 +msgid "Must specify project" +msgstr "" + +#: nova/auth/manager.py:408 +#, python-format +msgid "The %s role can not be found" +msgstr "" + +#: nova/auth/manager.py:410 +#, python-format +msgid "The %s role is global only" +msgstr "" + +#: nova/auth/manager.py:412 +#, python-format +msgid "Adding role %s to user %s in project %s" +msgstr "" + +#: nova/auth/manager.py:438 +#, python-format +msgid "Removing role %s from user %s on project %s" +msgstr "" + +#: nova/auth/manager.py:505 +#, python-format +msgid "Created project %s with manager %s" +msgstr "" + +#: nova/auth/manager.py:523 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: nova/auth/manager.py:553 +#, python-format +msgid "Remove user %s from project %s" +msgstr "" + +#: nova/auth/manager.py:581 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: nova/auth/manager.py:637 +#, python-format +msgid "Created user %s (admin: %r)" +msgstr "" + +#: nova/auth/manager.py:645 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: nova/auth/manager.py:655 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:657 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:659 +#, python-format +msgid "Admin status set to %r for user %s" +msgstr "" + +#: nova/auth/manager.py:708 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "" + +#: nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:97 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: nova/compute/api.py:67 +#, python-format +msgid "Instance %d was not found in get_network_topic" +msgstr "" + +#: nova/compute/api.py:73 +#, python-format +msgid "Instance %d has no host" +msgstr "" + +#: nova/compute/api.py:92 +#, python-format +msgid "Quota exceeeded for %s, tried to run %s instances" +msgstr "" + +#: nova/compute/api.py:94 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." +msgstr "" + +#: nova/compute/api.py:109 +msgid "Creating a raw instance" +msgstr "" + +#: nova/compute/api.py:156 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: nova/compute/api.py:180 +#, python-format +msgid "Casting to scheduler for %s/%s's instance %s" +msgstr "" + +#: nova/compute/api.py:279 +#, python-format +msgid "Going to try and terminate %s" +msgstr "" + +#: nova/compute/api.py:283 +#, python-format +msgid "Instance %d was not found during terminate" +msgstr "" + +#: nova/compute/api.py:288 +#, python-format +msgid "Instance %d is already being terminated" +msgstr "" + +#: nova/compute/api.py:450 +#, python-format +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "" + +#: nova/compute/api.py:465 +msgid "Volume isn't attached to anything!" +msgstr "" + +#: nova/compute/disk.py:71 +#, python-format +msgid "Input partition size not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:75 +#, python-format +msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:128 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: nova/compute/disk.py:136 +#, python-format +msgid "Failed to load partition: %s" +msgstr "" + +#: nova/compute/disk.py:158 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "" + +#: nova/compute/manager.py:69 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: nova/compute/manager.py:71 +#, python-format +msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +msgstr "" + +#: nova/compute/manager.py:75 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: nova/compute/manager.py:77 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: nova/compute/manager.py:82 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: nova/compute/manager.py:86 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: nova/compute/manager.py:157 +msgid "Instance has already been created" +msgstr "" + +#: nova/compute/manager.py:158 +#, python-format +msgid "instance %s: starting..." +msgstr "" + +#: nova/compute/manager.py:197 +#, python-format +msgid "instance %s: Failed to spawn" +msgstr "" + +#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 +#, python-format +msgid "Terminating instance %s" +msgstr "" + +#: nova/compute/manager.py:217 +#, python-format +msgid "Disassociating address %s" +msgstr "" + +#: nova/compute/manager.py:230 +#, python-format +msgid "Deallocating address %s" +msgstr "" + +#: nova/compute/manager.py:243 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "" + +#: nova/compute/manager.py:257 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: nova/compute/manager.py:260 +#, python-format +msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:286 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: nova/compute/manager.py:289 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:301 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: nova/compute/manager.py:316 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: nova/compute/manager.py:335 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: nova/compute/manager.py:352 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: nova/compute/manager.py:369 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: nova/compute/manager.py:382 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: nova/compute/manager.py:401 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: nova/compute/manager.py:420 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: nova/compute/manager.py:432 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: nova/compute/manager.py:442 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: nova/compute/manager.py:462 +#, python-format +msgid "instance %s: attaching volume %s to %s" +msgstr "" + +#: nova/compute/manager.py:478 +#, python-format +msgid "instance %s: attach failed %s, removing" +msgstr "" + +#: nova/compute/manager.py:493 +#, python-format +msgid "Detach volume %s from mountpoint %s on instance %s" +msgstr "" + +#: nova/compute/manager.py:497 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: nova/compute/monitor.py:259 +#, python-format +msgid "updating %s..." +msgstr "" + +#: nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "" + +#: nova/compute/monitor.py:355 +#, python-format +msgid "Cannot get blockstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:377 +#, python-format +msgid "Cannot get ifstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:412 +msgid "unexpected exception getting connection" +msgstr "" + +#: nova/compute/monitor.py:427 +#, python-format +msgid "Found instance: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:43 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: nova/db/sqlalchemy/api.py:132 +#, python-format +msgid "No service for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:229 +#, python-format +msgid "No service for %s, %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:574 +#, python-format +msgid "No floating ip for address %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:668 +#, python-format +msgid "No instance for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 +#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#, python-format +msgid "Instance %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:891 +#, python-format +msgid "no keypair for user %s, name %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 +#, python-format +msgid "No network for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1036 +#, python-format +msgid "No network for bridge %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1050 +#, python-format +msgid "No network for instance %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1180 +#, python-format +msgid "Token %s does not exist" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1205 +#, python-format +msgid "No quota for project_id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1356 +#, python-format +msgid "No volume for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1401 +#, python-format +msgid "Volume %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1413 +#, python-format +msgid "No export device found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1426 +#, python-format +msgid "No target id found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1471 +#, python-format +msgid "No security group with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1488 +#, python-format +msgid "No security group named %s for project: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1576 +#, python-format +msgid "No secuity group rule with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1650 +#, python-format +msgid "No user for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1666 +#, python-format +msgid "No user for access key %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1728 +#, python-format +msgid "No project with id %s" +msgstr "" + +#: nova/image/glance.py:78 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images" +msgstr "" + +#: nova/image/glance.py:97 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images/detail" +msgstr "" + +#: nova/image/s3.py:82 +#, python-format +msgid "Image %s could not be found" +msgstr "" + +#: nova/network/api.py:39 +#, python-format +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "" + +#: nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" + +#: nova/network/linux_net.py:176 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: nova/network/linux_net.py:186 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#: nova/network/linux_net.py:254 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: nova/network/linux_net.py:256 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: nova/network/linux_net.py:334 +#, python-format +msgid "Killing dnsmasq threw %s" +msgstr "" + +#: nova/network/manager.py:135 +msgid "setting network host" +msgstr "" + +#: nova/network/manager.py:190 +#, python-format +msgid "Leasing IP %s" +msgstr "" + +#: nova/network/manager.py:194 +#, python-format +msgid "IP %s leased that isn't associated" +msgstr "" + +#: nova/network/manager.py:197 +#, python-format +msgid "IP %s leased to bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:205 +#, python-format +msgid "IP %s leased that was already deallocated" +msgstr "" + +#: nova/network/manager.py:214 +#, python-format +msgid "IP %s released that isn't associated" +msgstr "" + +#: nova/network/manager.py:217 +#, python-format +msgid "IP %s released from bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:220 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: nova/network/manager.py:442 +#, python-format +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "" + +#: nova/objectstore/handler.py:106 +#, python-format +msgid "Unknown S3 value type %r" +msgstr "" + +#: nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "" + +#: nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "" + +#: nova/objectstore/handler.py:209 +#, python-format +msgid "List keys for bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:217 +#, python-format +msgid "Unauthorized attempt to access bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:235 +#, python-format +msgid "Creating bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:245 +#, python-format +msgid "Deleting bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:249 +#, python-format +msgid "Unauthorized attempt to delete bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:271 +#, python-format +msgid "Getting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:274 +#, python-format +msgid "Unauthorized attempt to get object %s from bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:292 +#, python-format +msgid "Putting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:295 +#, python-format +msgid "Unauthorized attempt to upload object %s to bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:314 +#, python-format +msgid "Deleting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:393 +#, python-format +msgid "Not authorized to upload image: invalid directory %s" +msgstr "" + +#: nova/objectstore/handler.py:401 +#, python-format +msgid "Not authorized to upload image: unauthorized bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:406 +#, python-format +msgid "Starting image upload: %s" +msgstr "" + +#: nova/objectstore/handler.py:420 +#, python-format +msgid "Not authorized to update attributes of image %s" +msgstr "" + +#: nova/objectstore/handler.py:428 +#, python-format +msgid "Toggling publicity flag of image %s %r" +msgstr "" + +#: nova/objectstore/handler.py:433 +#, python-format +msgid "Updating user fields on image %s" +msgstr "" + +#: nova/objectstore/handler.py:447 +#, python-format +msgid "Unauthorized attempt to delete image %s" +msgstr "" + +#: nova/objectstore/handler.py:452 +#, python-format +msgid "Deleted image: %s" +msgstr "" + +#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 +#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 +msgid "No hosts found" +msgstr "" + +#: nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "" + +#: nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %s %s for %s" +msgstr "" + +#: nova/scheduler/simple.py:63 +msgid "All hosts have too many cores" +msgstr "" + +#: nova/scheduler/simple.py:95 +msgid "All hosts have too many gigabytes" +msgstr "" + +#: nova/scheduler/simple.py:115 +msgid "All hosts have too many networks" +msgstr "" + +#: nova/tests/test_cloud.py:198 +msgid "Can't test instances without a real virtual env." +msgstr "" + +#: nova/tests/test_cloud.py:210 +#, python-format +msgid "Need to watch instance %s until it's running..." +msgstr "" + +#: nova/tests/test_compute.py:104 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: nova/tests/test_compute.py:110 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: nova/tests/test_rpc.py:89 +#, python-format +msgid "Nested received %s, %s" +msgstr "" + +#: nova/tests/test_rpc.py:94 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#, python-format +msgid "Received %s" +msgstr "" + +#: nova/tests/test_volume.py:162 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: nova/virt/fake.py:210 +#, python-format +msgid "Instance %s Not Found" +msgstr "" + +#: nova/virt/hyperv.py:118 +msgid "In init host" +msgstr "" + +#: nova/virt/hyperv.py:131 +#, python-format +msgid "Attempt to create duplicate vm %s" +msgstr "" + +#: nova/virt/hyperv.py:148 +#, python-format +msgid "Starting VM %s " +msgstr "" + +#: nova/virt/hyperv.py:150 +#, python-format +msgid "Started VM %s " +msgstr "" + +#: nova/virt/hyperv.py:152 +#, python-format +msgid "spawn vm failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:169 +#, python-format +msgid "Failed to create VM %s" +msgstr "" + +#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 +#, python-format +msgid "Created VM %s..." +msgstr "" + +#: nova/virt/hyperv.py:188 +#, python-format +msgid "Set memory for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:198 +#, python-format +msgid "Set vcpus for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:202 +#, python-format +msgid "Creating disk for %s by attaching disk file %s" +msgstr "" + +#: nova/virt/hyperv.py:227 +#, python-format +msgid "Failed to add diskdrive to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:230 +#, python-format +msgid "New disk drive path is %s" +msgstr "" + +#: nova/virt/hyperv.py:247 +#, python-format +msgid "Failed to add vhd file to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:249 +#, python-format +msgid "Created disk for %s" +msgstr "" + +#: nova/virt/hyperv.py:253 +#, python-format +msgid "Creating nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:272 +msgid "Failed creating a port on the external vswitch" +msgstr "" + +#: nova/virt/hyperv.py:273 +#, python-format +msgid "Failed creating port for %s" +msgstr "" + +#: nova/virt/hyperv.py:275 +#, python-format +msgid "Created switch port %s on switch %s" +msgstr "" + +#: nova/virt/hyperv.py:285 +#, python-format +msgid "Failed to add nic to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:287 +#, python-format +msgid "Created nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:320 +#, python-format +msgid "WMI job failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:322 +#, python-format +msgid "WMI job succeeded: %s, Elapsed=%s " +msgstr "" + +#: nova/virt/hyperv.py:358 +#, python-format +msgid "Got request to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:383 +#, python-format +msgid "Failed to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:389 +#, python-format +msgid "Del: disk %s vm %s" +msgstr "" + +#: nova/virt/hyperv.py:405 +#, python-format +msgid "" +"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " +"cpu_time=%s" +msgstr "" + +#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#, python-format +msgid "duplicate name found: %s" +msgstr "" + +#: nova/virt/hyperv.py:444 +#, python-format +msgid "Successfully changed vm state of %s to %s" +msgstr "" + +#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#, python-format +msgid "Failed to change vm state of %s to %s" +msgstr "" + +#: nova/virt/images.py:70 +#, python-format +msgid "Finished retreving %s -- placed in %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:144 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:157 +msgid "Connection to libvirt broke" +msgstr "" + +#: nova/virt/libvirt_conn.py:229 +#, python-format +msgid "instance %s: deleting instance files %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:271 +#, python-format +msgid "No disk at %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:278 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "" + +#: nova/virt/libvirt_conn.py:294 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: nova/virt/libvirt_conn.py:297 +#, python-format +msgid "_wait_for_reboot failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:340 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: nova/virt/libvirt_conn.py:343 +#, python-format +msgid "_wait_for_rescue failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:370 +#, python-format +msgid "instance %s: is running" +msgstr "" + +#: nova/virt/libvirt_conn.py:381 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 +#, python-format +msgid "instance %s: failed to boot" +msgstr "" + +#: nova/virt/libvirt_conn.py:395 +#, python-format +msgid "virsh said: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:399 +msgid "cool, it's a device" +msgstr "" + +#: nova/virt/libvirt_conn.py:407 +#, python-format +msgid "data: %r, fpath: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:415 +#, python-format +msgid "Contents of file %s: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:449 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: nova/virt/libvirt_conn.py:505 +#, python-format +msgid "instance %s: injecting key into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:508 +#, python-format +msgid "instance %s: injecting net into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:516 +#, python-format +msgid "instance %s: ignoring error injecting data into image %s (%s)" +msgstr "" + +#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: nova/virt/libvirt_conn.py:589 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: nova/virt/xenapi_conn.py:113 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" +msgstr "" + +#: nova/virt/xenapi_conn.py:263 +#, python-format +msgid "Task [%s] %s status: success %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:271 +#, python-format +msgid "Task [%s] %s status: %s %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:72 +#, python-format +msgid "%s: _db_content => %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 +#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 +msgid "Raising NotImplemented" +msgstr "" + +#: nova/virt/xenapi/fake.py:249 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:283 +#, python-format +msgid "Calling %s %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:288 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:340 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:127 +#, python-format +msgid "Created VM %s as %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:147 +#, python-format +msgid "Creating VBD for VM %s, VDI %s ... " +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:149 +#, python-format +msgid "Created VBD %s for VM %s, VDI %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:165 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:175 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:202 +#, python-format +msgid "Creating VIF for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:205 +#, python-format +msgid "Created VIF %s for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:216 +#, python-format +msgid "Snapshotting VM %s with label '%s'..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:229 +#, python-format +msgid "Created snapshot %s from VM %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:243 +#, python-format +msgid "Asking xapi to upload %s as '%s'" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:261 +#, python-format +msgid "Asking xapi to fetch %s as %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:279 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:290 +#, python-format +msgid "PV Kernel in VDI:%d" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:318 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:331 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:333 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:390 +#, python-format +msgid "VHD %s has parent %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:407 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:448 +#, python-format +msgid "No VDIs found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:452 +#, python-format +msgid "Unexpected number of VDIs (%s) found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:62 +#, python-format +msgid "Attempted to create non-unique name %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:99 +#, python-format +msgid "Starting VM %s..." +msgstr "" + +#: nova/virt/xenapi/vmops.py:101 +#, python-format +msgid "Spawning VM %s created %s." +msgstr "" + +#: nova/virt/xenapi/vmops.py:112 +#, python-format +msgid "Instance %s: booted" +msgstr "" + +#: nova/virt/xenapi/vmops.py:137 +#, python-format +msgid "Instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:166 +#, python-format +msgid "Starting snapshot for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:174 +#, python-format +msgid "Unable to Snapshot %s: %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:184 +#, python-format +msgid "Finished snapshot and upload for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:252 +#, python-format +msgid "suspend: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:262 +#, python-format +msgid "resume: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:271 +#, python-format +msgid "Instance not found %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:57 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:74 +#, python-format +msgid "Introduced %s as %s." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:90 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:96 +#, python-format +msgid "Forgetting SR %s ... " +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:101 +#, python-format +msgid "Ignoring exception %s when getting PBDs for %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:107 +#, python-format +msgid "Ignoring exception %s when unplugging PBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:111 +#, python-format +msgid "Forgetting SR %s done." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:113 +#, python-format +msgid "Ignoring exception %s when forgetting SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:123 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:128 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:146 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:175 +#, python-format +msgid "Unable to obtain target information %s, %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:197 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %s, %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Unable to use SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:95 +#, python-format +msgid "Mountpoint %s attached to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:106 +#, python-format +msgid "Detach_volume: %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:113 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:128 +#, python-format +msgid "Mountpoint %s detached from instance %s" +msgstr "" + +#: nova/volume/api.py:44 +#, python-format +msgid "Quota exceeeded for %s, tried to create %sG volume" +msgstr "" + +#: nova/volume/api.py:46 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %s" +msgstr "" + +#: nova/volume/api.py:70 nova/volume/api.py:95 +msgid "Volume status must be available" +msgstr "" + +#: nova/volume/api.py:97 +msgid "Volume is already attached" +msgstr "" + +#: nova/volume/api.py:103 +msgid "Volume is already detached" +msgstr "" + +#: nova/volume/driver.py:76 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: nova/volume/driver.py:85 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: nova/volume/driver.py:210 +#, python-format +msgid "FAKE AOE: %s" +msgstr "" + +#: nova/volume/driver.py:315 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: nova/volume/manager.py:85 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: nova/volume/manager.py:93 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: nova/volume/manager.py:102 +#, python-format +msgid "volume %s: creating lv of size %sG" +msgstr "" + +#: nova/volume/manager.py:106 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: nova/volume/manager.py:113 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: nova/volume/manager.py:121 +msgid "Volume is still attached" +msgstr "" + +#: nova/volume/manager.py:123 +msgid "Volume is not local to this node" +msgstr "" + +#: nova/volume/manager.py:124 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: nova/volume/manager.py:126 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: nova/volume/manager.py:129 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" diff --git a/locale/da.po b/locale/da.po new file mode 100644 index 000000000..524b27a64 --- /dev/null +++ b/locale/da.po @@ -0,0 +1,2130 @@ +# Danish translation for nova +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the nova package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: nova\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2011-01-10 11:25-0800\n" +"PO-Revision-Date: 2011-01-15 21:46+0000\n" +"Last-Translator: Soren Hansen \n" +"Language-Team: Danish \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Launchpad-Export-Date: 2011-01-28 05:20+0000\n" +"X-Generator: Launchpad (build 12177)\n" + +#: nova/crypto.py:46 +msgid "Filename of root CA" +msgstr "" + +#: nova/crypto.py:49 +msgid "Filename of private key" +msgstr "Filnavn for privatnøgle" + +#: nova/crypto.py:51 +msgid "Filename of root Certificate Revokation List" +msgstr "" + +#: nova/crypto.py:53 +msgid "Where we keep our keys" +msgstr "" + +#: nova/crypto.py:55 +msgid "Where we keep our root CA" +msgstr "" + +#: nova/crypto.py:57 +msgid "Should we use a CA for each project?" +msgstr "" + +#: nova/crypto.py:61 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" + +#: nova/crypto.py:66 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "" + +#: nova/crypto.py:71 +#, python-format +msgid "Subject for certificate for vpns, %s for project, timestamp" +msgstr "" + +#: nova/crypto.py:258 +#, python-format +msgid "Flags path: %s" +msgstr "" + +#: nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "" + +#: nova/exception.py:36 +#, python-format +msgid "" +"%s\n" +"Command: %s\n" +"Exit code: %s\n" +"Stdout: %r\n" +"Stderr: %r" +msgstr "" + +#: nova/exception.py:86 +msgid "Uncaught exception" +msgstr "" + +#: nova/fakerabbit.py:48 +#, python-format +msgid "(%s) publish (key: %s) %s" +msgstr "" + +#: nova/fakerabbit.py:53 +#, python-format +msgid "Publishing to route %s" +msgstr "" + +#: nova/fakerabbit.py:83 +#, python-format +msgid "Declaring queue %s" +msgstr "" + +#: nova/fakerabbit.py:89 +#, python-format +msgid "Declaring exchange %s" +msgstr "" + +#: nova/fakerabbit.py:95 +#, python-format +msgid "Binding %s to %s with key %s" +msgstr "" + +#: nova/fakerabbit.py:120 +#, python-format +msgid "Getting from %s: %s" +msgstr "" + +#: nova/rpc.py:92 +#, python-format +msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +msgstr "" + +#: nova/rpc.py:99 +#, python-format +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "" + +#: nova/rpc.py:118 +msgid "Reconnected to queue" +msgstr "" + +#: nova/rpc.py:125 +msgid "Failed to fetch message from queue" +msgstr "" + +#: nova/rpc.py:155 +#, python-format +msgid "Initing the Adapter Consumer for %s" +msgstr "" + +#: nova/rpc.py:170 +#, python-format +msgid "received %s" +msgstr "" + +#: nova/rpc.py:183 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: nova/rpc.py:184 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: nova/rpc.py:245 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: nova/rpc.py:286 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: nova/rpc.py:305 +msgid "Making asynchronous call..." +msgstr "" + +#: nova/rpc.py:308 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: nova/rpc.py:356 +#, python-format +msgid "response %s" +msgstr "" + +#: nova/rpc.py:365 +#, python-format +msgid "topic is %s" +msgstr "" + +#: nova/rpc.py:366 +#, python-format +msgid "message %s" +msgstr "" + +#: nova/service.py:157 +#, python-format +msgid "Starting %s node" +msgstr "" + +#: nova/service.py:169 +msgid "Service killed that has no database entry" +msgstr "" + +#: nova/service.py:190 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: nova/service.py:202 +msgid "Recovered model server connection!" +msgstr "" + +#: nova/service.py:208 +msgid "model server went away" +msgstr "" + +#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#, python-format +msgid "Data store %s is unreachable. Trying again in %d seconds." +msgstr "" + +#: nova/service.py:232 nova/twistd.py:232 +#, python-format +msgid "Serving %s" +msgstr "" + +#: nova/service.py:234 nova/twistd.py:264 +msgid "Full set of FLAGS:" +msgstr "" + +#: nova/twistd.py:211 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "" + +#: nova/twistd.py:268 +#, python-format +msgid "Starting %s" +msgstr "" + +#: nova/utils.py:53 +#, python-format +msgid "Inner Exception: %s" +msgstr "" + +#: nova/utils.py:54 +#, python-format +msgid "Class %s cannot be found" +msgstr "" + +#: nova/utils.py:113 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: nova/utils.py:125 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: nova/utils.py:138 +#, python-format +msgid "Result was %s" +msgstr "" + +#: nova/utils.py:171 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: nova/utils.py:176 +#, python-format +msgid "Running %s" +msgstr "" + +#: nova/utils.py:207 +#, python-format +msgid "Couldn't get IP, using 127.0.0.1 %s" +msgstr "" + +#: nova/utils.py:289 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: nova/utils.py:300 +#, python-format +msgid "backend %s" +msgstr "" + +#: nova/api/ec2/__init__.py:133 +msgid "Too many failed authentications." +msgstr "" + +#: nova/api/ec2/__init__.py:142 +#, python-format +msgid "" +"Access key %s has had %d failed authentications and will be locked out for " +"%d minutes." +msgstr "" + +#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#, python-format +msgid "Authentication Failure: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:190 +#, python-format +msgid "Authenticated Request For %s:%s)" +msgstr "" + +#: nova/api/ec2/__init__.py:227 +#, python-format +msgid "action: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:229 +#, python-format +msgid "arg: %s\t\tval: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:301 +#, python-format +msgid "Unauthorized request for controller=%s and action=%s" +msgstr "" + +#: nova/api/ec2/__init__.py:339 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:342 +#, python-format +msgid "ApiError raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:349 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:354 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: nova/api/ec2/admin.py:84 +#, python-format +msgid "Creating new user: %s" +msgstr "" + +#: nova/api/ec2/admin.py:92 +#, python-format +msgid "Deleting user: %s" +msgstr "" + +#: nova/api/ec2/admin.py:114 +#, python-format +msgid "Adding role %s to user %s for project %s" +msgstr "" + +#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#, python-format +msgid "Adding sitewide role %s to user %s" +msgstr "" + +#: nova/api/ec2/admin.py:122 +#, python-format +msgid "Removing role %s from user %s for project %s" +msgstr "" + +#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#, python-format +msgid "Removing sitewide role %s from user %s" +msgstr "" + +#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 +msgid "operation must be add or remove" +msgstr "" + +#: nova/api/ec2/admin.py:142 +#, python-format +msgid "Getting x509 for user: %s on project: %s" +msgstr "" + +#: nova/api/ec2/admin.py:159 +#, python-format +msgid "Create project %s managed by %s" +msgstr "" + +#: nova/api/ec2/admin.py:170 +#, python-format +msgid "Delete project: %s" +msgstr "" + +#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#, python-format +msgid "Adding user %s to project %s" +msgstr "" + +#: nova/api/ec2/admin.py:188 +#, python-format +msgid "Removing user %s from project %s" +msgstr "" + +#: nova/api/ec2/apirequest.py:95 +#, python-format +msgid "Unsupported API request: controller = %s,action = %s" +msgstr "" + +#: nova/api/ec2/cloud.py:117 +#, python-format +msgid "Generating root CA: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:277 +#, python-format +msgid "Create key pair %s" +msgstr "" + +#: nova/api/ec2/cloud.py:285 +#, python-format +msgid "Delete key pair %s" +msgstr "" + +#: nova/api/ec2/cloud.py:357 +#, python-format +msgid "%s is not a valid ipProtocol" +msgstr "" + +#: nova/api/ec2/cloud.py:361 +msgid "Invalid port range" +msgstr "" + +#: nova/api/ec2/cloud.py:392 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 +msgid "No rule for the specified parameters." +msgstr "" + +#: nova/api/ec2/cloud.py:421 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: nova/api/ec2/cloud.py:432 +#, python-format +msgid "This rule already exists in group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:460 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:463 +#, python-format +msgid "group %s already exists" +msgstr "" + +#: nova/api/ec2/cloud.py:475 +#, python-format +msgid "Delete security group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 +#, python-format +msgid "Get console output for instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:543 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: nova/api/ec2/cloud.py:567 +#, python-format +msgid "Attach volume %s to instacne %s at %s" +msgstr "" + +#: nova/api/ec2/cloud.py:579 +#, python-format +msgid "Detach volume %s" +msgstr "" + +#: nova/api/ec2/cloud.py:686 +msgid "Allocate address" +msgstr "" + +#: nova/api/ec2/cloud.py:691 +#, python-format +msgid "Release address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:696 +#, python-format +msgid "Associate address %s to instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:703 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:730 +msgid "Going to start terminating instances" +msgstr "" + +#: nova/api/ec2/cloud.py:738 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: nova/api/ec2/cloud.py:775 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: nova/api/ec2/cloud.py:783 +#, python-format +msgid "Registered image %s with id %s" +msgstr "" + +#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#, python-format +msgid "attribute not supported: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:794 +#, python-format +msgid "invalid id: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:807 +msgid "user or group not specified" +msgstr "" + +#: nova/api/ec2/cloud.py:809 +msgid "only group \"all\" is supported" +msgstr "" + +#: nova/api/ec2/cloud.py:811 +msgid "operation_type must be add or remove" +msgstr "" + +#: nova/api/ec2/cloud.py:812 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: nova/api/ec2/metadatarequesthandler.py:75 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:70 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:86 +msgid "Including admin operations in API." +msgstr "" + +#: nova/api/openstack/servers.py:184 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:199 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: nova/api/openstack/servers.py:213 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:224 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: nova/api/openstack/servers.py:235 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: nova/api/openstack/servers.py:246 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: nova/api/openstack/servers.py:257 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" +msgstr "" + +#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "" + +#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#, python-format +msgid "Project can't be created because project %s already exists" +msgstr "" + +#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#, python-format +msgid "Project can't be modified because manager %s doesn't exist" +msgstr "" + +#: nova/auth/dbdriver.py:245 +#, python-format +msgid "User \"%s\" not found" +msgstr "" + +#: nova/auth/dbdriver.py:248 +#, python-format +msgid "Project \"%s\" not found" +msgstr "" + +#: nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: nova/auth/ldapdriver.py:181 +#, python-format +msgid "LDAP object for %s doesn't exist" +msgstr "" + +#: nova/auth/ldapdriver.py:218 +#, python-format +msgid "Project can't be created because user %s doesn't exist" +msgstr "" + +#: nova/auth/ldapdriver.py:478 +#, python-format +msgid "User %s is already a member of the group %s" +msgstr "" + +#: nova/auth/ldapdriver.py:507 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: nova/auth/ldapdriver.py:528 +#, python-format +msgid "Group at dn %s doesn't exist" +msgstr "" + +#: nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" +msgstr "" + +#: nova/auth/manager.py:270 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: nova/auth/manager.py:275 +#, python-format +msgid "failed authorization: no project named %s (user=%s)" +msgstr "" + +#: nova/auth/manager.py:277 +#, python-format +msgid "No project called %s could be found" +msgstr "" + +#: nova/auth/manager.py:281 +#, python-format +msgid "Failed authorization: user %s not admin and not member of project %s" +msgstr "" + +#: nova/auth/manager.py:283 +#, python-format +msgid "User %s is not a member of project %s" +msgstr "" + +#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: nova/auth/manager.py:293 nova/auth/manager.py:304 +msgid "Signature does not match" +msgstr "" + +#: nova/auth/manager.py:374 +msgid "Must specify project" +msgstr "" + +#: nova/auth/manager.py:408 +#, python-format +msgid "The %s role can not be found" +msgstr "" + +#: nova/auth/manager.py:410 +#, python-format +msgid "The %s role is global only" +msgstr "" + +#: nova/auth/manager.py:412 +#, python-format +msgid "Adding role %s to user %s in project %s" +msgstr "" + +#: nova/auth/manager.py:438 +#, python-format +msgid "Removing role %s from user %s on project %s" +msgstr "" + +#: nova/auth/manager.py:505 +#, python-format +msgid "Created project %s with manager %s" +msgstr "" + +#: nova/auth/manager.py:523 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: nova/auth/manager.py:553 +#, python-format +msgid "Remove user %s from project %s" +msgstr "" + +#: nova/auth/manager.py:581 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: nova/auth/manager.py:637 +#, python-format +msgid "Created user %s (admin: %r)" +msgstr "" + +#: nova/auth/manager.py:645 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: nova/auth/manager.py:655 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:657 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:659 +#, python-format +msgid "Admin status set to %r for user %s" +msgstr "" + +#: nova/auth/manager.py:708 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "" + +#: nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:97 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: nova/compute/api.py:67 +#, python-format +msgid "Instance %d was not found in get_network_topic" +msgstr "" + +#: nova/compute/api.py:73 +#, python-format +msgid "Instance %d has no host" +msgstr "" + +#: nova/compute/api.py:92 +#, python-format +msgid "Quota exceeeded for %s, tried to run %s instances" +msgstr "" + +#: nova/compute/api.py:94 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." +msgstr "" + +#: nova/compute/api.py:109 +msgid "Creating a raw instance" +msgstr "" + +#: nova/compute/api.py:156 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: nova/compute/api.py:180 +#, python-format +msgid "Casting to scheduler for %s/%s's instance %s" +msgstr "" + +#: nova/compute/api.py:279 +#, python-format +msgid "Going to try and terminate %s" +msgstr "" + +#: nova/compute/api.py:283 +#, python-format +msgid "Instance %d was not found during terminate" +msgstr "" + +#: nova/compute/api.py:288 +#, python-format +msgid "Instance %d is already being terminated" +msgstr "" + +#: nova/compute/api.py:450 +#, python-format +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "" + +#: nova/compute/api.py:465 +msgid "Volume isn't attached to anything!" +msgstr "" + +#: nova/compute/disk.py:71 +#, python-format +msgid "Input partition size not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:75 +#, python-format +msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:128 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: nova/compute/disk.py:136 +#, python-format +msgid "Failed to load partition: %s" +msgstr "" + +#: nova/compute/disk.py:158 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "" + +#: nova/compute/manager.py:69 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: nova/compute/manager.py:71 +#, python-format +msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +msgstr "" + +#: nova/compute/manager.py:75 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: nova/compute/manager.py:77 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: nova/compute/manager.py:82 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: nova/compute/manager.py:86 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: nova/compute/manager.py:157 +msgid "Instance has already been created" +msgstr "" + +#: nova/compute/manager.py:158 +#, python-format +msgid "instance %s: starting..." +msgstr "" + +#: nova/compute/manager.py:197 +#, python-format +msgid "instance %s: Failed to spawn" +msgstr "" + +#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 +#, python-format +msgid "Terminating instance %s" +msgstr "" + +#: nova/compute/manager.py:217 +#, python-format +msgid "Disassociating address %s" +msgstr "" + +#: nova/compute/manager.py:230 +#, python-format +msgid "Deallocating address %s" +msgstr "" + +#: nova/compute/manager.py:243 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "" + +#: nova/compute/manager.py:257 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: nova/compute/manager.py:260 +#, python-format +msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:286 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: nova/compute/manager.py:289 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:301 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: nova/compute/manager.py:316 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: nova/compute/manager.py:335 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: nova/compute/manager.py:352 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: nova/compute/manager.py:369 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: nova/compute/manager.py:382 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: nova/compute/manager.py:401 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: nova/compute/manager.py:420 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: nova/compute/manager.py:432 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: nova/compute/manager.py:442 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: nova/compute/manager.py:462 +#, python-format +msgid "instance %s: attaching volume %s to %s" +msgstr "" + +#: nova/compute/manager.py:478 +#, python-format +msgid "instance %s: attach failed %s, removing" +msgstr "" + +#: nova/compute/manager.py:493 +#, python-format +msgid "Detach volume %s from mountpoint %s on instance %s" +msgstr "" + +#: nova/compute/manager.py:497 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: nova/compute/monitor.py:259 +#, python-format +msgid "updating %s..." +msgstr "" + +#: nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "" + +#: nova/compute/monitor.py:355 +#, python-format +msgid "Cannot get blockstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:377 +#, python-format +msgid "Cannot get ifstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:412 +msgid "unexpected exception getting connection" +msgstr "" + +#: nova/compute/monitor.py:427 +#, python-format +msgid "Found instance: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:43 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: nova/db/sqlalchemy/api.py:132 +#, python-format +msgid "No service for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:229 +#, python-format +msgid "No service for %s, %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:574 +#, python-format +msgid "No floating ip for address %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:668 +#, python-format +msgid "No instance for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 +#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#, python-format +msgid "Instance %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:891 +#, python-format +msgid "no keypair for user %s, name %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 +#, python-format +msgid "No network for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1036 +#, python-format +msgid "No network for bridge %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1050 +#, python-format +msgid "No network for instance %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1180 +#, python-format +msgid "Token %s does not exist" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1205 +#, python-format +msgid "No quota for project_id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1356 +#, python-format +msgid "No volume for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1401 +#, python-format +msgid "Volume %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1413 +#, python-format +msgid "No export device found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1426 +#, python-format +msgid "No target id found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1471 +#, python-format +msgid "No security group with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1488 +#, python-format +msgid "No security group named %s for project: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1576 +#, python-format +msgid "No secuity group rule with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1650 +#, python-format +msgid "No user for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1666 +#, python-format +msgid "No user for access key %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1728 +#, python-format +msgid "No project with id %s" +msgstr "" + +#: nova/image/glance.py:78 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images" +msgstr "" + +#: nova/image/glance.py:97 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images/detail" +msgstr "" + +#: nova/image/s3.py:82 +#, python-format +msgid "Image %s could not be found" +msgstr "" + +#: nova/network/api.py:39 +#, python-format +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "" + +#: nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" + +#: nova/network/linux_net.py:176 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: nova/network/linux_net.py:186 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#: nova/network/linux_net.py:254 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: nova/network/linux_net.py:256 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: nova/network/linux_net.py:334 +#, python-format +msgid "Killing dnsmasq threw %s" +msgstr "" + +#: nova/network/manager.py:135 +msgid "setting network host" +msgstr "" + +#: nova/network/manager.py:190 +#, python-format +msgid "Leasing IP %s" +msgstr "" + +#: nova/network/manager.py:194 +#, python-format +msgid "IP %s leased that isn't associated" +msgstr "" + +#: nova/network/manager.py:197 +#, python-format +msgid "IP %s leased to bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:205 +#, python-format +msgid "IP %s leased that was already deallocated" +msgstr "" + +#: nova/network/manager.py:214 +#, python-format +msgid "IP %s released that isn't associated" +msgstr "" + +#: nova/network/manager.py:217 +#, python-format +msgid "IP %s released from bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:220 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: nova/network/manager.py:442 +#, python-format +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "" + +#: nova/objectstore/handler.py:106 +#, python-format +msgid "Unknown S3 value type %r" +msgstr "" + +#: nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "" + +#: nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "" + +#: nova/objectstore/handler.py:209 +#, python-format +msgid "List keys for bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:217 +#, python-format +msgid "Unauthorized attempt to access bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:235 +#, python-format +msgid "Creating bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:245 +#, python-format +msgid "Deleting bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:249 +#, python-format +msgid "Unauthorized attempt to delete bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:271 +#, python-format +msgid "Getting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:274 +#, python-format +msgid "Unauthorized attempt to get object %s from bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:292 +#, python-format +msgid "Putting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:295 +#, python-format +msgid "Unauthorized attempt to upload object %s to bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:314 +#, python-format +msgid "Deleting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:393 +#, python-format +msgid "Not authorized to upload image: invalid directory %s" +msgstr "" + +#: nova/objectstore/handler.py:401 +#, python-format +msgid "Not authorized to upload image: unauthorized bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:406 +#, python-format +msgid "Starting image upload: %s" +msgstr "" + +#: nova/objectstore/handler.py:420 +#, python-format +msgid "Not authorized to update attributes of image %s" +msgstr "" + +#: nova/objectstore/handler.py:428 +#, python-format +msgid "Toggling publicity flag of image %s %r" +msgstr "" + +#: nova/objectstore/handler.py:433 +#, python-format +msgid "Updating user fields on image %s" +msgstr "" + +#: nova/objectstore/handler.py:447 +#, python-format +msgid "Unauthorized attempt to delete image %s" +msgstr "" + +#: nova/objectstore/handler.py:452 +#, python-format +msgid "Deleted image: %s" +msgstr "" + +#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 +#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 +msgid "No hosts found" +msgstr "" + +#: nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "" + +#: nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %s %s for %s" +msgstr "" + +#: nova/scheduler/simple.py:63 +msgid "All hosts have too many cores" +msgstr "" + +#: nova/scheduler/simple.py:95 +msgid "All hosts have too many gigabytes" +msgstr "" + +#: nova/scheduler/simple.py:115 +msgid "All hosts have too many networks" +msgstr "" + +#: nova/tests/test_cloud.py:198 +msgid "Can't test instances without a real virtual env." +msgstr "" + +#: nova/tests/test_cloud.py:210 +#, python-format +msgid "Need to watch instance %s until it's running..." +msgstr "" + +#: nova/tests/test_compute.py:104 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: nova/tests/test_compute.py:110 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: nova/tests/test_rpc.py:89 +#, python-format +msgid "Nested received %s, %s" +msgstr "" + +#: nova/tests/test_rpc.py:94 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#, python-format +msgid "Received %s" +msgstr "" + +#: nova/tests/test_volume.py:162 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: nova/virt/fake.py:210 +#, python-format +msgid "Instance %s Not Found" +msgstr "" + +#: nova/virt/hyperv.py:118 +msgid "In init host" +msgstr "" + +#: nova/virt/hyperv.py:131 +#, python-format +msgid "Attempt to create duplicate vm %s" +msgstr "" + +#: nova/virt/hyperv.py:148 +#, python-format +msgid "Starting VM %s " +msgstr "" + +#: nova/virt/hyperv.py:150 +#, python-format +msgid "Started VM %s " +msgstr "" + +#: nova/virt/hyperv.py:152 +#, python-format +msgid "spawn vm failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:169 +#, python-format +msgid "Failed to create VM %s" +msgstr "" + +#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 +#, python-format +msgid "Created VM %s..." +msgstr "" + +#: nova/virt/hyperv.py:188 +#, python-format +msgid "Set memory for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:198 +#, python-format +msgid "Set vcpus for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:202 +#, python-format +msgid "Creating disk for %s by attaching disk file %s" +msgstr "" + +#: nova/virt/hyperv.py:227 +#, python-format +msgid "Failed to add diskdrive to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:230 +#, python-format +msgid "New disk drive path is %s" +msgstr "" + +#: nova/virt/hyperv.py:247 +#, python-format +msgid "Failed to add vhd file to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:249 +#, python-format +msgid "Created disk for %s" +msgstr "" + +#: nova/virt/hyperv.py:253 +#, python-format +msgid "Creating nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:272 +msgid "Failed creating a port on the external vswitch" +msgstr "" + +#: nova/virt/hyperv.py:273 +#, python-format +msgid "Failed creating port for %s" +msgstr "" + +#: nova/virt/hyperv.py:275 +#, python-format +msgid "Created switch port %s on switch %s" +msgstr "" + +#: nova/virt/hyperv.py:285 +#, python-format +msgid "Failed to add nic to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:287 +#, python-format +msgid "Created nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:320 +#, python-format +msgid "WMI job failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:322 +#, python-format +msgid "WMI job succeeded: %s, Elapsed=%s " +msgstr "" + +#: nova/virt/hyperv.py:358 +#, python-format +msgid "Got request to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:383 +#, python-format +msgid "Failed to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:389 +#, python-format +msgid "Del: disk %s vm %s" +msgstr "" + +#: nova/virt/hyperv.py:405 +#, python-format +msgid "" +"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " +"cpu_time=%s" +msgstr "" + +#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#, python-format +msgid "duplicate name found: %s" +msgstr "" + +#: nova/virt/hyperv.py:444 +#, python-format +msgid "Successfully changed vm state of %s to %s" +msgstr "" + +#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#, python-format +msgid "Failed to change vm state of %s to %s" +msgstr "" + +#: nova/virt/images.py:70 +#, python-format +msgid "Finished retreving %s -- placed in %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:144 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:157 +msgid "Connection to libvirt broke" +msgstr "" + +#: nova/virt/libvirt_conn.py:229 +#, python-format +msgid "instance %s: deleting instance files %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:271 +#, python-format +msgid "No disk at %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:278 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "" + +#: nova/virt/libvirt_conn.py:294 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: nova/virt/libvirt_conn.py:297 +#, python-format +msgid "_wait_for_reboot failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:340 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: nova/virt/libvirt_conn.py:343 +#, python-format +msgid "_wait_for_rescue failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:370 +#, python-format +msgid "instance %s: is running" +msgstr "" + +#: nova/virt/libvirt_conn.py:381 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 +#, python-format +msgid "instance %s: failed to boot" +msgstr "" + +#: nova/virt/libvirt_conn.py:395 +#, python-format +msgid "virsh said: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:399 +msgid "cool, it's a device" +msgstr "" + +#: nova/virt/libvirt_conn.py:407 +#, python-format +msgid "data: %r, fpath: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:415 +#, python-format +msgid "Contents of file %s: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:449 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: nova/virt/libvirt_conn.py:505 +#, python-format +msgid "instance %s: injecting key into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:508 +#, python-format +msgid "instance %s: injecting net into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:516 +#, python-format +msgid "instance %s: ignoring error injecting data into image %s (%s)" +msgstr "" + +#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: nova/virt/libvirt_conn.py:589 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: nova/virt/xenapi_conn.py:113 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" +msgstr "" + +#: nova/virt/xenapi_conn.py:263 +#, python-format +msgid "Task [%s] %s status: success %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:271 +#, python-format +msgid "Task [%s] %s status: %s %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:72 +#, python-format +msgid "%s: _db_content => %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 +#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 +msgid "Raising NotImplemented" +msgstr "" + +#: nova/virt/xenapi/fake.py:249 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:283 +#, python-format +msgid "Calling %s %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:288 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:340 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:127 +#, python-format +msgid "Created VM %s as %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:147 +#, python-format +msgid "Creating VBD for VM %s, VDI %s ... " +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:149 +#, python-format +msgid "Created VBD %s for VM %s, VDI %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:165 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:175 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:202 +#, python-format +msgid "Creating VIF for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:205 +#, python-format +msgid "Created VIF %s for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:216 +#, python-format +msgid "Snapshotting VM %s with label '%s'..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:229 +#, python-format +msgid "Created snapshot %s from VM %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:243 +#, python-format +msgid "Asking xapi to upload %s as '%s'" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:261 +#, python-format +msgid "Asking xapi to fetch %s as %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:279 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:290 +#, python-format +msgid "PV Kernel in VDI:%d" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:318 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:331 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:333 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:390 +#, python-format +msgid "VHD %s has parent %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:407 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:448 +#, python-format +msgid "No VDIs found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:452 +#, python-format +msgid "Unexpected number of VDIs (%s) found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:62 +#, python-format +msgid "Attempted to create non-unique name %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:99 +#, python-format +msgid "Starting VM %s..." +msgstr "" + +#: nova/virt/xenapi/vmops.py:101 +#, python-format +msgid "Spawning VM %s created %s." +msgstr "" + +#: nova/virt/xenapi/vmops.py:112 +#, python-format +msgid "Instance %s: booted" +msgstr "" + +#: nova/virt/xenapi/vmops.py:137 +#, python-format +msgid "Instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:166 +#, python-format +msgid "Starting snapshot for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:174 +#, python-format +msgid "Unable to Snapshot %s: %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:184 +#, python-format +msgid "Finished snapshot and upload for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:252 +#, python-format +msgid "suspend: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:262 +#, python-format +msgid "resume: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:271 +#, python-format +msgid "Instance not found %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:57 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:74 +#, python-format +msgid "Introduced %s as %s." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:90 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:96 +#, python-format +msgid "Forgetting SR %s ... " +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:101 +#, python-format +msgid "Ignoring exception %s when getting PBDs for %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:107 +#, python-format +msgid "Ignoring exception %s when unplugging PBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:111 +#, python-format +msgid "Forgetting SR %s done." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:113 +#, python-format +msgid "Ignoring exception %s when forgetting SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:123 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:128 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:146 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:175 +#, python-format +msgid "Unable to obtain target information %s, %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:197 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %s, %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Unable to use SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:95 +#, python-format +msgid "Mountpoint %s attached to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:106 +#, python-format +msgid "Detach_volume: %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:113 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:128 +#, python-format +msgid "Mountpoint %s detached from instance %s" +msgstr "" + +#: nova/volume/api.py:44 +#, python-format +msgid "Quota exceeeded for %s, tried to create %sG volume" +msgstr "" + +#: nova/volume/api.py:46 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %s" +msgstr "" + +#: nova/volume/api.py:70 nova/volume/api.py:95 +msgid "Volume status must be available" +msgstr "" + +#: nova/volume/api.py:97 +msgid "Volume is already attached" +msgstr "" + +#: nova/volume/api.py:103 +msgid "Volume is already detached" +msgstr "" + +#: nova/volume/driver.py:76 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: nova/volume/driver.py:85 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: nova/volume/driver.py:210 +#, python-format +msgid "FAKE AOE: %s" +msgstr "" + +#: nova/volume/driver.py:315 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: nova/volume/manager.py:85 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: nova/volume/manager.py:93 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: nova/volume/manager.py:102 +#, python-format +msgid "volume %s: creating lv of size %sG" +msgstr "" + +#: nova/volume/manager.py:106 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: nova/volume/manager.py:113 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: nova/volume/manager.py:121 +msgid "Volume is still attached" +msgstr "" + +#: nova/volume/manager.py:123 +msgid "Volume is not local to this node" +msgstr "" + +#: nova/volume/manager.py:124 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: nova/volume/manager.py:126 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: nova/volume/manager.py:129 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "bind %s: slettet" diff --git a/locale/es.po b/locale/es.po new file mode 100644 index 000000000..a1cf5b7f6 --- /dev/null +++ b/locale/es.po @@ -0,0 +1,2177 @@ +# Spanish translation for nova +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the nova package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: nova\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2011-01-10 11:25-0800\n" +"PO-Revision-Date: 2011-01-18 14:56+0000\n" +"Last-Translator: Javier Turégano \n" +"Language-Team: Spanish \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Launchpad-Export-Date: 2011-01-28 05:20+0000\n" +"X-Generator: Launchpad (build 12177)\n" + +#: nova/crypto.py:46 +msgid "Filename of root CA" +msgstr "Nombre de fichero de la CA raíz" + +#: nova/crypto.py:49 +msgid "Filename of private key" +msgstr "Nombre de fichero de la clave privada" + +#: nova/crypto.py:51 +msgid "Filename of root Certificate Revokation List" +msgstr "Nombre de fichero de la lista de certificados de revocación raíz" + +#: nova/crypto.py:53 +msgid "Where we keep our keys" +msgstr "Donde guardamos nuestras claves" + +#: nova/crypto.py:55 +msgid "Where we keep our root CA" +msgstr "Dónde guardamos nuestra CA raíz" + +#: nova/crypto.py:57 +msgid "Should we use a CA for each project?" +msgstr "¿Deberíamos usar una CA para cada proyecto?" + +#: nova/crypto.py:61 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" +"Sujeto (Subject) para el certificado de usuarios, %s para el proyecto, " +"usuario, marca de tiempo" + +#: nova/crypto.py:66 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "" +"Sujeto (Subject) para el certificado del proyecto, %s para el proyecto, " +"marca de tiempo" + +#: nova/crypto.py:71 +#, python-format +msgid "Subject for certificate for vpns, %s for project, timestamp" +msgstr "" +"Sujeto (Subject) para el certificado para vpns, %s para el proyecto, marca " +"de tiempo" + +#: nova/crypto.py:258 +#, python-format +msgid "Flags path: %s" +msgstr "" + +#: nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "Sucedió un error inesperado mientras el comando se ejecutaba." + +#: nova/exception.py:36 +#, python-format +msgid "" +"%s\n" +"Command: %s\n" +"Exit code: %s\n" +"Stdout: %r\n" +"Stderr: %r" +msgstr "" +"%s\n" +"Comando: %s\n" +"Código de salida: %s\n" +"Stdout: %s\n" +"Stderr: %r" + +#: nova/exception.py:86 +msgid "Uncaught exception" +msgstr "Excepción no controlada" + +#: nova/fakerabbit.py:48 +#, python-format +msgid "(%s) publish (key: %s) %s" +msgstr "(%s) públicar (clave: %s) %s" + +#: nova/fakerabbit.py:53 +#, python-format +msgid "Publishing to route %s" +msgstr "Publicando la ruta %s" + +#: nova/fakerabbit.py:83 +#, python-format +msgid "Declaring queue %s" +msgstr "Declarando cola %s" + +#: nova/fakerabbit.py:89 +#, python-format +msgid "Declaring exchange %s" +msgstr "Declarando intercambio %s" + +#: nova/fakerabbit.py:95 +#, python-format +msgid "Binding %s to %s with key %s" +msgstr "Asociando %s a %s con clave %s" + +#: nova/fakerabbit.py:120 +#, python-format +msgid "Getting from %s: %s" +msgstr "Obteniendo desde %s: %s" + +#: nova/rpc.py:92 +#, python-format +msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +msgstr "" +"El servidor AMQP en %s:%d no se puede alcanzar. Se reintentará en %d " +"segundos." + +#: nova/rpc.py:99 +#, python-format +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "" +"Imposible conectar al servidor AMQP después de %d intentos. Apagando." + +#: nova/rpc.py:118 +msgid "Reconnected to queue" +msgstr "Reconectado a la cola" + +#: nova/rpc.py:125 +msgid "Failed to fetch message from queue" +msgstr "Fallo al obtener el mensaje de la cola" + +#: nova/rpc.py:155 +#, python-format +msgid "Initing the Adapter Consumer for %s" +msgstr "" + +#: nova/rpc.py:170 +#, python-format +msgid "received %s" +msgstr "recibido %s" + +#: nova/rpc.py:183 +#, python-format +msgid "no method for message: %s" +msgstr "no hay método para el mensaje: %s" + +#: nova/rpc.py:184 +#, python-format +msgid "No method for message: %s" +msgstr "No hay método para el mensaje: %s" + +#: nova/rpc.py:245 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: nova/rpc.py:286 +#, python-format +msgid "unpacked context: %s" +msgstr "contenido desempaquetado: %s" + +#: nova/rpc.py:305 +msgid "Making asynchronous call..." +msgstr "Haciendo una llamada asíncrona..." + +#: nova/rpc.py:308 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID es %s" + +#: nova/rpc.py:356 +#, python-format +msgid "response %s" +msgstr "respuesta %s" + +#: nova/rpc.py:365 +#, python-format +msgid "topic is %s" +msgstr "" + +#: nova/rpc.py:366 +#, python-format +msgid "message %s" +msgstr "mensaje %s" + +#: nova/service.py:157 +#, python-format +msgid "Starting %s node" +msgstr "Inciando nodo %s" + +#: nova/service.py:169 +msgid "Service killed that has no database entry" +msgstr "Se detuvo un servicio sin entrada en la base de datos" + +#: nova/service.py:190 +msgid "The service database object disappeared, Recreating it." +msgstr "El servicio objeto de base de datos ha desaparecido, recreándolo." + +#: nova/service.py:202 +msgid "Recovered model server connection!" +msgstr "Recuperada la conexión al servidor de modelos." + +#: nova/service.py:208 +msgid "model server went away" +msgstr "el servidor de modelos se ha ido" + +#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#, python-format +msgid "Data store %s is unreachable. Trying again in %d seconds." +msgstr "" +"El almacen de datos %s es inalcanzable. Reintentandolo en %d segundos." + +#: nova/service.py:232 nova/twistd.py:232 +#, python-format +msgid "Serving %s" +msgstr "Sirviendo %s" + +#: nova/service.py:234 nova/twistd.py:264 +msgid "Full set of FLAGS:" +msgstr "Conjunto completo de opciones:" + +#: nova/twistd.py:211 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "el pidfile %s no existe. ¿No estará el demonio parado?\n" + +#: nova/twistd.py:268 +#, python-format +msgid "Starting %s" +msgstr "Comenzando %s" + +#: nova/utils.py:53 +#, python-format +msgid "Inner Exception: %s" +msgstr "Excepción interna: %s" + +#: nova/utils.py:54 +#, python-format +msgid "Class %s cannot be found" +msgstr "La clase %s no ha podido ser encontrada." + +#: nova/utils.py:113 +#, python-format +msgid "Fetching %s" +msgstr "Obteniendo %s" + +#: nova/utils.py:125 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Ejecutando cmd (subprocesos): %s" + +#: nova/utils.py:138 +#, python-format +msgid "Result was %s" +msgstr "El resultado fue %s" + +#: nova/utils.py:171 +#, python-format +msgid "debug in callback: %s" +msgstr "Depuración de la devolución de llamada: %s" + +#: nova/utils.py:176 +#, python-format +msgid "Running %s" +msgstr "Ejecutando %s" + +#: nova/utils.py:207 +#, python-format +msgid "Couldn't get IP, using 127.0.0.1 %s" +msgstr "No puedo obtener IP, usando 127.0.0.1 %s" + +#: nova/utils.py:289 +#, python-format +msgid "Invalid backend: %s" +msgstr "backend inválido: %s" + +#: nova/utils.py:300 +#, python-format +msgid "backend %s" +msgstr "backend %s" + +#: nova/api/ec2/__init__.py:133 +msgid "Too many failed authentications." +msgstr "Demasiados intentos de autenticacion fallidos." + +#: nova/api/ec2/__init__.py:142 +#, python-format +msgid "" +"Access key %s has had %d failed authentications and will be locked out for " +"%d minutes." +msgstr "" +"La clave de acceso %s ha tenido %d fallos de autenticación y se bloqueará " +"por %d minutos." + +#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#, python-format +msgid "Authentication Failure: %s" +msgstr "Fallo de autenticación: %s" + +#: nova/api/ec2/__init__.py:190 +#, python-format +msgid "Authenticated Request For %s:%s)" +msgstr "Solicitud de autenticación para %s:%s" + +#: nova/api/ec2/__init__.py:227 +#, python-format +msgid "action: %s" +msgstr "acción: %s" + +#: nova/api/ec2/__init__.py:229 +#, python-format +msgid "arg: %s\t\tval: %s" +msgstr "arg: %s \t \t val: %s" + +#: nova/api/ec2/__init__.py:301 +#, python-format +msgid "Unauthorized request for controller=%s and action=%s" +msgstr "Solicitud no autorizada para controller=%s y action=%s" + +#: nova/api/ec2/__init__.py:339 +#, python-format +msgid "NotFound raised: %s" +msgstr "No encontrado: %s" + +#: nova/api/ec2/__init__.py:342 +#, python-format +msgid "ApiError raised: %s" +msgstr "Sucedió un ApiError: %s" + +#: nova/api/ec2/__init__.py:349 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "Sucedió un error inexperado: %s" + +#: nova/api/ec2/__init__.py:354 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" +"Ha sucedido un error desconocido. Por favor repite el intento de nuevo." + +#: nova/api/ec2/admin.py:84 +#, python-format +msgid "Creating new user: %s" +msgstr "Creando nuevo usuario: %s" + +#: nova/api/ec2/admin.py:92 +#, python-format +msgid "Deleting user: %s" +msgstr "Eliminando usuario: %s" + +#: nova/api/ec2/admin.py:114 +#, python-format +msgid "Adding role %s to user %s for project %s" +msgstr "Añadiendo rol %s al usuario %s para el proyecto %s" + +#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#, python-format +msgid "Adding sitewide role %s to user %s" +msgstr "Añadiendo rol global %s al usuario %s" + +#: nova/api/ec2/admin.py:122 +#, python-format +msgid "Removing role %s from user %s for project %s" +msgstr "Eliminando rol %s del usuario %s para el proyecto %s" + +#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#, python-format +msgid "Removing sitewide role %s from user %s" +msgstr "Eliminando rol global %s del usuario %s" + +#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 +msgid "operation must be add or remove" +msgstr "la operación debe ser añadir o eliminar" + +#: nova/api/ec2/admin.py:142 +#, python-format +msgid "Getting x509 for user: %s on project: %s" +msgstr "Obteniendo x509 para el usuario: %s en el proyecto %s" + +#: nova/api/ec2/admin.py:159 +#, python-format +msgid "Create project %s managed by %s" +msgstr "Creación del proyecto %s gestionada por %s" + +#: nova/api/ec2/admin.py:170 +#, python-format +msgid "Delete project: %s" +msgstr "Borrar proyecto: %s" + +#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#, python-format +msgid "Adding user %s to project %s" +msgstr "Añadiendo usuario %s al proyecto %s" + +#: nova/api/ec2/admin.py:188 +#, python-format +msgid "Removing user %s from project %s" +msgstr "Eliminando usuario %s del proyecto %s" + +#: nova/api/ec2/apirequest.py:95 +#, python-format +msgid "Unsupported API request: controller = %s,action = %s" +msgstr "Solicitud de API no soportada: controller=%s,action=%s" + +#: nova/api/ec2/cloud.py:117 +#, python-format +msgid "Generating root CA: %s" +msgstr "Generando CA raiz: %s" + +#: nova/api/ec2/cloud.py:277 +#, python-format +msgid "Create key pair %s" +msgstr "Creando par de claves %s" + +#: nova/api/ec2/cloud.py:285 +#, python-format +msgid "Delete key pair %s" +msgstr "Borrar para de claves %s" + +#: nova/api/ec2/cloud.py:357 +#, python-format +msgid "%s is not a valid ipProtocol" +msgstr "%s no es un ipProtocol valido" + +#: nova/api/ec2/cloud.py:361 +msgid "Invalid port range" +msgstr "Rango de puerto inválido" + +#: nova/api/ec2/cloud.py:392 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "Revocar ingreso al grupo de seguridad %s" + +#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 +msgid "No rule for the specified parameters." +msgstr "No hay regla para los parámetros especificados." + +#: nova/api/ec2/cloud.py:421 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "Autorizar ingreso al grupo de seguridad %s" + +#: nova/api/ec2/cloud.py:432 +#, python-format +msgid "This rule already exists in group %s" +msgstr "Esta regla ya existe en el grupo %s" + +#: nova/api/ec2/cloud.py:460 +#, python-format +msgid "Create Security Group %s" +msgstr "Crear Grupo de Seguridad %s" + +#: nova/api/ec2/cloud.py:463 +#, python-format +msgid "group %s already exists" +msgstr "el grupo %s ya existe" + +#: nova/api/ec2/cloud.py:475 +#, python-format +msgid "Delete security group %s" +msgstr "Borrar grupo de seguridad %s" + +#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 +#, python-format +msgid "Get console output for instance %s" +msgstr "Obtener salida de la consola para la instancia %s" + +#: nova/api/ec2/cloud.py:543 +#, python-format +msgid "Create volume of %s GB" +msgstr "Crear volumen de %s GB" + +#: nova/api/ec2/cloud.py:567 +#, python-format +msgid "Attach volume %s to instacne %s at %s" +msgstr "Asociar volumen %s a la instancia %s en %s" + +#: nova/api/ec2/cloud.py:579 +#, python-format +msgid "Detach volume %s" +msgstr "Desasociar volumen %s" + +#: nova/api/ec2/cloud.py:686 +msgid "Allocate address" +msgstr "Asignar dirección" + +#: nova/api/ec2/cloud.py:691 +#, python-format +msgid "Release address %s" +msgstr "Liberar dirección %s" + +#: nova/api/ec2/cloud.py:696 +#, python-format +msgid "Associate address %s to instance %s" +msgstr "Asociar dirección %s a la instancia %s" + +#: nova/api/ec2/cloud.py:703 +#, python-format +msgid "Disassociate address %s" +msgstr "Desasociar dirección %s" + +#: nova/api/ec2/cloud.py:730 +msgid "Going to start terminating instances" +msgstr "Se va a iniciar la finalización de las instancias" + +#: nova/api/ec2/cloud.py:738 +#, python-format +msgid "Reboot instance %r" +msgstr "Reiniciar instancia %r" + +#: nova/api/ec2/cloud.py:775 +#, python-format +msgid "De-registering image %s" +msgstr "Des-registrando la imagen %s" + +#: nova/api/ec2/cloud.py:783 +#, python-format +msgid "Registered image %s with id %s" +msgstr "Registrada imagen %s con id %s" + +#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#, python-format +msgid "attribute not supported: %s" +msgstr "atributo no soportado: %s" + +#: nova/api/ec2/cloud.py:794 +#, python-format +msgid "invalid id: %s" +msgstr "id no valido: %s" + +#: nova/api/ec2/cloud.py:807 +msgid "user or group not specified" +msgstr "usuario o grupo no especificado" + +#: nova/api/ec2/cloud.py:809 +msgid "only group \"all\" is supported" +msgstr "sólo el grupo \"all\" está soportado" + +#: nova/api/ec2/cloud.py:811 +msgid "operation_type must be add or remove" +msgstr "operation_type debe ser añadir o eliminar" + +#: nova/api/ec2/cloud.py:812 +#, python-format +msgid "Updating image %s publicity" +msgstr "Actualizando imagen %s públicamente" + +#: nova/api/ec2/metadatarequesthandler.py:75 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "Fallo al generar metadatos para la ip %s" + +#: nova/api/openstack/__init__.py:70 +#, python-format +msgid "Caught error: %s" +msgstr "Capturado error: %s" + +#: nova/api/openstack/__init__.py:86 +msgid "Including admin operations in API." +msgstr "Incluyendo operaciones de administración in API." + +#: nova/api/openstack/servers.py:184 +#, python-format +msgid "Compute.api::lock %s" +msgstr "Compute.api::lock %s" + +#: nova/api/openstack/servers.py:199 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "Compute.api::unlock %s" + +#: nova/api/openstack/servers.py:213 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "Compute.api::get_lock %s" + +#: nova/api/openstack/servers.py:224 +#, python-format +msgid "Compute.api::pause %s" +msgstr "Compute.api::pause %s" + +#: nova/api/openstack/servers.py:235 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "Compute.api::unpause %s" + +#: nova/api/openstack/servers.py:246 +#, python-format +msgid "compute.api::suspend %s" +msgstr "compute.api::suspend %s" + +#: nova/api/openstack/servers.py:257 +#, python-format +msgid "compute.api::resume %s" +msgstr "compute.api::resume %s" + +#: nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" +msgstr "El usuario %s ya existe" + +#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "El proyecto no puede ser creado porque el administrador %s no existe" + +#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#, python-format +msgid "Project can't be created because project %s already exists" +msgstr "El proyecto no puede ser creado porque el proyecto %s ya existe" + +#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#, python-format +msgid "Project can't be modified because manager %s doesn't exist" +msgstr "" +"El proyecto no puede ser modificado porque el administrador %s no existe" + +#: nova/auth/dbdriver.py:245 +#, python-format +msgid "User \"%s\" not found" +msgstr "No se ha encontrado el usuario \"%s\"" + +#: nova/auth/dbdriver.py:248 +#, python-format +msgid "Project \"%s\" not found" +msgstr "No se ha encontrado el proyecto \"%s\"" + +#: nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "Intento de instanciar sigleton" + +#: nova/auth/ldapdriver.py:181 +#, python-format +msgid "LDAP object for %s doesn't exist" +msgstr "El objeto LDAP para %s no existe" + +#: nova/auth/ldapdriver.py:218 +#, python-format +msgid "Project can't be created because user %s doesn't exist" +msgstr "El proyecto no puede ser creado porque el usuario %s no existe" + +#: nova/auth/ldapdriver.py:478 +#, python-format +msgid "User %s is already a member of the group %s" +msgstr "El usuario %s ya es miembro de el grupo %s" + +#: nova/auth/ldapdriver.py:507 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" +"Se ha intentado eliminar el último miembro de un grupo. Eliminando el grupo " +"%s en su lugar." + +#: nova/auth/ldapdriver.py:528 +#, python-format +msgid "Group at dn %s doesn't exist" +msgstr "El grupo con dn %s no existe" + +#: nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" +msgstr "Buscando usuario: %r" + +#: nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "Fallo de autorización para la clave de acceso %s" + +#: nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" +msgstr "No se ha encontrado usuario para la clave de acceso %s" + +#: nova/auth/manager.py:270 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "Utilizando nombre de proyecto = nombre de usuario (%s)" + +#: nova/auth/manager.py:275 +#, python-format +msgid "failed authorization: no project named %s (user=%s)" +msgstr "" +"fallo de autorización: no existe proyecto con el nombre %s (usuario=%s)" + +#: nova/auth/manager.py:277 +#, python-format +msgid "No project called %s could be found" +msgstr "No se ha podido encontrar un proyecto con nombre %s" + +#: nova/auth/manager.py:281 +#, python-format +msgid "Failed authorization: user %s not admin and not member of project %s" +msgstr "" +"Fallo de autorización: el usuario %s no es administrador y no es miembro del " +"proyecto %s" + +#: nova/auth/manager.py:283 +#, python-format +msgid "User %s is not a member of project %s" +msgstr "El usuario %s no es miembro del proyecto %s" + +#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#, python-format +msgid "Invalid signature for user %s" +msgstr "Firma invalida para el usuario %s" + +#: nova/auth/manager.py:293 nova/auth/manager.py:304 +msgid "Signature does not match" +msgstr "Las firmas no concuerdan" + +#: nova/auth/manager.py:374 +msgid "Must specify project" +msgstr "Debes especificar un proyecto" + +#: nova/auth/manager.py:408 +#, python-format +msgid "The %s role can not be found" +msgstr "El rol %s no se ha podido encontrar" + +#: nova/auth/manager.py:410 +#, python-format +msgid "The %s role is global only" +msgstr "El rol %s es únicamente global" + +#: nova/auth/manager.py:412 +#, python-format +msgid "Adding role %s to user %s in project %s" +msgstr "Añadiendo rol %s al usuario %s en el proyecto %s" + +#: nova/auth/manager.py:438 +#, python-format +msgid "Removing role %s from user %s on project %s" +msgstr "Eliminando rol %s al usuario %s en el proyecto %s" + +#: nova/auth/manager.py:505 +#, python-format +msgid "Created project %s with manager %s" +msgstr "Proyecto %s creado con administrador %s" + +#: nova/auth/manager.py:523 +#, python-format +msgid "modifying project %s" +msgstr "modificando proyecto %s" + +#: nova/auth/manager.py:553 +#, python-format +msgid "Remove user %s from project %s" +msgstr "Eliminar usuario %s del proyecto %s" + +#: nova/auth/manager.py:581 +#, python-format +msgid "Deleting project %s" +msgstr "Eliminando proyecto %s" + +#: nova/auth/manager.py:637 +#, python-format +msgid "Created user %s (admin: %r)" +msgstr "Creado usuario %s (administrador: %r)" + +#: nova/auth/manager.py:645 +#, python-format +msgid "Deleting user %s" +msgstr "Eliminando usuario %s" + +#: nova/auth/manager.py:655 +#, python-format +msgid "Access Key change for user %s" +msgstr "Cambio de clave de acceso para el usuario %s" + +#: nova/auth/manager.py:657 +#, python-format +msgid "Secret Key change for user %s" +msgstr "Cambio de clave secreta para el usuario %s" + +#: nova/auth/manager.py:659 +#, python-format +msgid "Admin status set to %r for user %s" +msgstr "El estado del administrador se ha fijado a %r para el usuario %s" + +#: nova/auth/manager.py:708 +#, python-format +msgid "No vpn data for project %s" +msgstr "No hay datos vpn para el proyecto %s" + +#: nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "" + +#: nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "Red a insertar en la configuración de openvpn" + +#: nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "Mascara de red a insertar en la configuración de openvpn" + +#: nova/cloudpipe/pipelib.py:97 +#, python-format +msgid "Launching VPN for %s" +msgstr "Lanzando VPN para %s" + +#: nova/compute/api.py:67 +#, python-format +msgid "Instance %d was not found in get_network_topic" +msgstr "La instancia %d no se ha encontrado en get_network_topic" + +#: nova/compute/api.py:73 +#, python-format +msgid "Instance %d has no host" +msgstr "La instancia %d no tiene host" + +#: nova/compute/api.py:92 +#, python-format +msgid "Quota exceeeded for %s, tried to run %s instances" +msgstr "Quota superada por %s, intentando lanzar %s instancias" + +#: nova/compute/api.py:94 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." +msgstr "" +"Quota de instancias superada. Sólo puedes ejecutar %s instancias más de este " +"tipo." + +#: nova/compute/api.py:109 +msgid "Creating a raw instance" +msgstr "Creando una instancia raw" + +#: nova/compute/api.py:156 +#, python-format +msgid "Going to run %s instances..." +msgstr "Vamos a ejecutar %s insntacias..." + +#: nova/compute/api.py:180 +#, python-format +msgid "Casting to scheduler for %s/%s's instance %s" +msgstr "Llamando al planificar para %s/%s insntancia %s" + +#: nova/compute/api.py:279 +#, python-format +msgid "Going to try and terminate %s" +msgstr "Se va a probar y terminar %s" + +#: nova/compute/api.py:283 +#, python-format +msgid "Instance %d was not found during terminate" +msgstr "La instancia %d no se ha encontrado durante la finalización" + +#: nova/compute/api.py:288 +#, python-format +msgid "Instance %d is already being terminated" +msgstr "La instancia %d ha sido finalizada" + +#: nova/compute/api.py:450 +#, python-format +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "" +"El dispositivo especificado no es válido: %s. Ejemplo de dispositivo: " +"/dev/vdb" + +#: nova/compute/api.py:465 +msgid "Volume isn't attached to anything!" +msgstr "¡El volumen no está unido a nada!" + +#: nova/compute/disk.py:71 +#, python-format +msgid "Input partition size not evenly divisible by sector size: %d / %d" +msgstr "" +"El tamaño de la partición de entrada no es divisible de forma uniforme por " +"el tamaño del sector: %d / %d" + +#: nova/compute/disk.py:75 +#, python-format +msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +msgstr "" +"Los bytes del almacenamiento local no son divisibles de forma uniforme por " +"el tamaño del sector: %d / %d" + +#: nova/compute/disk.py:128 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "No se puede unir la imagen con el loopback: %s" + +#: nova/compute/disk.py:136 +#, python-format +msgid "Failed to load partition: %s" +msgstr "Fallo al cargar la partición: %s" + +#: nova/compute/disk.py:158 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "Fallo al montar el sistema de ficheros: %s" + +#: nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "Tipo de instancia desconocido: %s" + +#: nova/compute/manager.py:69 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "check_instance_lock: decorating: |%s|" + +#: nova/compute/manager.py:71 +#, python-format +msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +msgstr "check_instance_lock: arguments: |%s| |%s| |%s|" + +#: nova/compute/manager.py:75 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "check_instance_lock: locked: |%s|" + +#: nova/compute/manager.py:77 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "check_instance_lock: admin: |%s|" + +#: nova/compute/manager.py:82 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "check_instance_lock: ejecutando: |%s|" + +#: nova/compute/manager.py:86 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "check_instance_lock: no ejecutando |%s|" + +#: nova/compute/manager.py:157 +msgid "Instance has already been created" +msgstr "La instancia ha sido creada previamente" + +#: nova/compute/manager.py:158 +#, python-format +msgid "instance %s: starting..." +msgstr "instancia %s: iniciando..." + +#: nova/compute/manager.py:197 +#, python-format +msgid "instance %s: Failed to spawn" +msgstr "Instancia %s: no se pudo iniciar" + +#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 +#, python-format +msgid "Terminating instance %s" +msgstr "Finalizando la instancia %s" + +#: nova/compute/manager.py:217 +#, python-format +msgid "Disassociating address %s" +msgstr "Desasociando la dirección %s" + +#: nova/compute/manager.py:230 +#, python-format +msgid "Deallocating address %s" +msgstr "Desasociando la dirección %s" + +#: nova/compute/manager.py:243 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "intentando finalizar una instancia que ya había sido finalizada: %s" + +#: nova/compute/manager.py:257 +#, python-format +msgid "Rebooting instance %s" +msgstr "Reiniciando instancia %s" + +#: nova/compute/manager.py:260 +#, python-format +msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" +"intentando reiniciar una instancia que no está en ejecución: %s (estado: %s " +"esperado: %s)" + +#: nova/compute/manager.py:286 +#, python-format +msgid "instance %s: snapshotting" +msgstr "instancia %s: creando snapshot" + +#: nova/compute/manager.py:289 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" +"intentando crear un snapshot de una instancia que no está en ejecución: %s " +"(estado: %s esperado: %s)" + +#: nova/compute/manager.py:301 +#, python-format +msgid "instance %s: rescuing" +msgstr "instancia %s: rescatando" + +#: nova/compute/manager.py:316 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: nova/compute/manager.py:335 +#, python-format +msgid "instance %s: pausing" +msgstr "instancia %s: pausando" + +#: nova/compute/manager.py:352 +#, python-format +msgid "instance %s: unpausing" +msgstr "instnacia %s: continuando tras pausa" + +#: nova/compute/manager.py:369 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "instancia %s: obteniendo los diagnosticos" + +#: nova/compute/manager.py:382 +#, python-format +msgid "instance %s: suspending" +msgstr "instancia %s: suspendiendo" + +#: nova/compute/manager.py:401 +#, python-format +msgid "instance %s: resuming" +msgstr "instancia %s: continuando" + +#: nova/compute/manager.py:420 +#, python-format +msgid "instance %s: locking" +msgstr "instancia %s: bloqueando" + +#: nova/compute/manager.py:432 +#, python-format +msgid "instance %s: unlocking" +msgstr "instancia %s: desbloqueando" + +#: nova/compute/manager.py:442 +#, python-format +msgid "instance %s: getting locked state" +msgstr "instancia %s: pasando a estado bloqueado" + +#: nova/compute/manager.py:462 +#, python-format +msgid "instance %s: attaching volume %s to %s" +msgstr "instancia %s: asociando volumen %s a %s" + +#: nova/compute/manager.py:478 +#, python-format +msgid "instance %s: attach failed %s, removing" +msgstr "instalación %s: asociación fallida %s, eliminando" + +#: nova/compute/manager.py:493 +#, python-format +msgid "Detach volume %s from mountpoint %s on instance %s" +msgstr "Desvinculando volumen %s del punto de montaje %s en la instancia %s" + +#: nova/compute/manager.py:497 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "Desvinculando volumen de instancia desconocida %s" + +#: nova/compute/monitor.py:259 +#, python-format +msgid "updating %s..." +msgstr "actualizando %s..." + +#: nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "error inesperado durante la actualización" + +#: nova/compute/monitor.py:355 +#, python-format +msgid "Cannot get blockstats for \"%s\" on \"%s\"" +msgstr "No puedo obtener estadísticas del bloque para \"%s\" en \"%s\"" + +#: nova/compute/monitor.py:377 +#, python-format +msgid "Cannot get ifstats for \"%s\" on \"%s\"" +msgstr "No puedo obtener estadísticas de la interfaz para \"%s\" en \"%s\"" + +#: nova/compute/monitor.py:412 +msgid "unexpected exception getting connection" +msgstr "excepción inexperada al obtener la conexión" + +#: nova/compute/monitor.py:427 +#, python-format +msgid "Found instance: %s" +msgstr "Encontrada interfaz: %s" + +#: nova/db/sqlalchemy/api.py:43 +msgid "Use of empty request context is deprecated" +msgstr "El uso de una petición de contexto vacía está en desuso" + +#: nova/db/sqlalchemy/api.py:132 +#, python-format +msgid "No service for id %s" +msgstr "No hay servicio para el id %s" + +#: nova/db/sqlalchemy/api.py:229 +#, python-format +msgid "No service for %s, %s" +msgstr "No hay servicio para %s, %s" + +#: nova/db/sqlalchemy/api.py:574 +#, python-format +msgid "No floating ip for address %s" +msgstr "No hay ip flotante para la dirección %s" + +#: nova/db/sqlalchemy/api.py:668 +#, python-format +msgid "No instance for id %s" +msgstr "No hay instancia con id %s" + +#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 +#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#, python-format +msgid "Instance %s not found" +msgstr "La instancia %s no se ha encontrado" + +#: nova/db/sqlalchemy/api.py:891 +#, python-format +msgid "no keypair for user %s, name %s" +msgstr "no hay par de claves para el usuario %s, nombre %s" + +#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 +#, python-format +msgid "No network for id %s" +msgstr "No hay red para el id %s" + +#: nova/db/sqlalchemy/api.py:1036 +#, python-format +msgid "No network for bridge %s" +msgstr "No hay red para el puente %s" + +#: nova/db/sqlalchemy/api.py:1050 +#, python-format +msgid "No network for instance %s" +msgstr "No hay red para la instancia %s" + +#: nova/db/sqlalchemy/api.py:1180 +#, python-format +msgid "Token %s does not exist" +msgstr "El token %s no existe" + +#: nova/db/sqlalchemy/api.py:1205 +#, python-format +msgid "No quota for project_id %s" +msgstr "No hay quota para el project:id %s" + +#: nova/db/sqlalchemy/api.py:1356 +#, python-format +msgid "No volume for id %s" +msgstr "No hay volumen para el id %s" + +#: nova/db/sqlalchemy/api.py:1401 +#, python-format +msgid "Volume %s not found" +msgstr "El volumen %s no se ha encontrado" + +#: nova/db/sqlalchemy/api.py:1413 +#, python-format +msgid "No export device found for volume %s" +msgstr "No se ha encontrado dispositivo exportado para el volumen %s" + +#: nova/db/sqlalchemy/api.py:1426 +#, python-format +msgid "No target id found for volume %s" +msgstr "No se ha encontrado id de destino para el volumen %s" + +#: nova/db/sqlalchemy/api.py:1471 +#, python-format +msgid "No security group with id %s" +msgstr "No hay un grupo de seguridad con el id %s" + +#: nova/db/sqlalchemy/api.py:1488 +#, python-format +msgid "No security group named %s for project: %s" +msgstr "No hay un grupo de seguridad con nombre %s para el proyecto: %s" + +#: nova/db/sqlalchemy/api.py:1576 +#, python-format +msgid "No secuity group rule with id %s" +msgstr "No hay una regla para el grupo de seguridad con el id %s" + +#: nova/db/sqlalchemy/api.py:1650 +#, python-format +msgid "No user for id %s" +msgstr "No hay un usuario con el id %s" + +#: nova/db/sqlalchemy/api.py:1666 +#, python-format +msgid "No user for access key %s" +msgstr "No hay un usuario para la clave de acceso %s" + +#: nova/db/sqlalchemy/api.py:1728 +#, python-format +msgid "No project with id %s" +msgstr "No hay proyecto con id %s" + +#: nova/image/glance.py:78 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images" +msgstr "Parallax ha devuelto un error HTTP %d a la petición para /images" + +#: nova/image/glance.py:97 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images/detail" +msgstr "" +"Parallax ha devuelto un error HTTP %d para la petición para /images/detail" + +#: nova/image/s3.py:82 +#, python-format +msgid "Image %s could not be found" +msgstr "La imagen %s no ha podido ser encontrada" + +#: nova/network/api.py:39 +#, python-format +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "Quota excedida para %s, intentando asignar direcciones" + +#: nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" +"La quota de direcciones ha sido excedida. No puedes asignar más direcciones" + +#: nova/network/linux_net.py:176 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "Iniciando interfaz VLAN %s" + +#: nova/network/linux_net.py:186 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "Iniciando interfaz puente para %s" + +#: nova/network/linux_net.py:254 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "Excepción al recargar la configuración de dnsmasq: %s" + +#: nova/network/linux_net.py:256 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "El pid %d está pasado, relanzando dnsmasq" + +#: nova/network/linux_net.py:334 +#, python-format +msgid "Killing dnsmasq threw %s" +msgstr "Al matar dnsmasq se lanzó %s" + +#: nova/network/manager.py:135 +msgid "setting network host" +msgstr "configurando la red del host" + +#: nova/network/manager.py:190 +#, python-format +msgid "Leasing IP %s" +msgstr "Liberando IP %s" + +#: nova/network/manager.py:194 +#, python-format +msgid "IP %s leased that isn't associated" +msgstr "" + +#: nova/network/manager.py:197 +#, python-format +msgid "IP %s leased to bad mac %s vs %s" +msgstr "IP %s asociada a una mac incorrecta %s vs %s" + +#: nova/network/manager.py:205 +#, python-format +msgid "IP %s leased that was already deallocated" +msgstr "" + +#: nova/network/manager.py:214 +#, python-format +msgid "IP %s released that isn't associated" +msgstr "" + +#: nova/network/manager.py:217 +#, python-format +msgid "IP %s released from bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:220 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: nova/network/manager.py:442 +#, python-format +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "" + +#: nova/objectstore/handler.py:106 +#, python-format +msgid "Unknown S3 value type %r" +msgstr "Tipo de valor S3 %r desconocido" + +#: nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "Petición autenticada" + +#: nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "Listado de cubos solicitado" + +#: nova/objectstore/handler.py:209 +#, python-format +msgid "List keys for bucket %s" +msgstr "Lista de claves para el cubo %s" + +#: nova/objectstore/handler.py:217 +#, python-format +msgid "Unauthorized attempt to access bucket %s" +msgstr "Intento no autorizado para acceder al cubo %s" + +#: nova/objectstore/handler.py:235 +#, python-format +msgid "Creating bucket %s" +msgstr "Creando el cubo %s" + +#: nova/objectstore/handler.py:245 +#, python-format +msgid "Deleting bucket %s" +msgstr "Eliminando el cubo %s" + +#: nova/objectstore/handler.py:249 +#, python-format +msgid "Unauthorized attempt to delete bucket %s" +msgstr "Intento no autorizado de eliminar el cubo %s" + +#: nova/objectstore/handler.py:271 +#, python-format +msgid "Getting object: %s / %s" +msgstr "Obteniendo objeto: %s / %s" + +#: nova/objectstore/handler.py:274 +#, python-format +msgid "Unauthorized attempt to get object %s from bucket %s" +msgstr "Intento no autorizado de obtener el objeto %s en el cubo %s" + +#: nova/objectstore/handler.py:292 +#, python-format +msgid "Putting object: %s / %s" +msgstr "Colocando objeto: %s / %s" + +#: nova/objectstore/handler.py:295 +#, python-format +msgid "Unauthorized attempt to upload object %s to bucket %s" +msgstr "Intento no autorizado de subir el objeto %s al cubo %s" + +#: nova/objectstore/handler.py:314 +#, python-format +msgid "Deleting object: %s / %s" +msgstr "Eliminando objeto: %s / %s" + +#: nova/objectstore/handler.py:393 +#, python-format +msgid "Not authorized to upload image: invalid directory %s" +msgstr "No autorizado para subir imagen: directorio incorrecto %s" + +#: nova/objectstore/handler.py:401 +#, python-format +msgid "Not authorized to upload image: unauthorized bucket %s" +msgstr "No autorizado para subir imagen: cubo %s no autorizado" + +#: nova/objectstore/handler.py:406 +#, python-format +msgid "Starting image upload: %s" +msgstr "Comenzando la subida de la imagen: %s" + +#: nova/objectstore/handler.py:420 +#, python-format +msgid "Not authorized to update attributes of image %s" +msgstr "No autorizado para actualizar los atributos de la imagen %s" + +#: nova/objectstore/handler.py:428 +#, python-format +msgid "Toggling publicity flag of image %s %r" +msgstr "Cambiando los atributos de publicidad de la imagen %s %r" + +#: nova/objectstore/handler.py:433 +#, python-format +msgid "Updating user fields on image %s" +msgstr "Actualizando los campos de usuario de la imagen %s" + +#: nova/objectstore/handler.py:447 +#, python-format +msgid "Unauthorized attempt to delete image %s" +msgstr "Intento no autorizado de borrar la imagen %s" + +#: nova/objectstore/handler.py:452 +#, python-format +msgid "Deleted image: %s" +msgstr "Eliminada imagen: %s" + +#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 +#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 +msgid "No hosts found" +msgstr "No se han encontrado hosts" + +#: nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "Debe de implementar un horario de reserva" + +#: nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %s %s for %s" +msgstr "" + +#: nova/scheduler/simple.py:63 +msgid "All hosts have too many cores" +msgstr "Todos los hosts tienen demasiados cores" + +#: nova/scheduler/simple.py:95 +msgid "All hosts have too many gigabytes" +msgstr "Todos los hosts tienen demasiados gigabytes" + +#: nova/scheduler/simple.py:115 +msgid "All hosts have too many networks" +msgstr "Todos los hosts tienen demasiadas redes" + +#: nova/tests/test_cloud.py:198 +msgid "Can't test instances without a real virtual env." +msgstr "No puedo probar las imágenes sin un entorno real virtual" + +#: nova/tests/test_cloud.py:210 +#, python-format +msgid "Need to watch instance %s until it's running..." +msgstr "Hay que vigilar la instancia %s hasta que este en ejecución..." + +#: nova/tests/test_compute.py:104 +#, python-format +msgid "Running instances: %s" +msgstr "Ejecutando instancias: %s" + +#: nova/tests/test_compute.py:110 +#, python-format +msgid "After terminating instances: %s" +msgstr "Después de terminar las instancias: %s" + +#: nova/tests/test_rpc.py:89 +#, python-format +msgid "Nested received %s, %s" +msgstr "" + +#: nova/tests/test_rpc.py:94 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#, python-format +msgid "Received %s" +msgstr "Recibido %s" + +#: nova/tests/test_volume.py:162 +#, python-format +msgid "Target %s allocated" +msgstr "Destino %s asignado" + +#: nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "Fallo al abrir conexión con el hypervisor" + +#: nova/virt/fake.py:210 +#, python-format +msgid "Instance %s Not Found" +msgstr "La instancia %s no ha sido encontrada" + +#: nova/virt/hyperv.py:118 +msgid "In init host" +msgstr "En el host inicial" + +#: nova/virt/hyperv.py:131 +#, python-format +msgid "Attempt to create duplicate vm %s" +msgstr "Intento de crear una vm duplicada %s" + +#: nova/virt/hyperv.py:148 +#, python-format +msgid "Starting VM %s " +msgstr "Comenzando VM %s " + +#: nova/virt/hyperv.py:150 +#, python-format +msgid "Started VM %s " +msgstr "VM %s iniciada " + +#: nova/virt/hyperv.py:152 +#, python-format +msgid "spawn vm failed: %s" +msgstr "Inicio de vm fallido: %s" + +#: nova/virt/hyperv.py:169 +#, python-format +msgid "Failed to create VM %s" +msgstr "Fallo al crear la VM %s" + +#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 +#, python-format +msgid "Created VM %s..." +msgstr "Creada VM %s..." + +#: nova/virt/hyperv.py:188 +#, python-format +msgid "Set memory for vm %s..." +msgstr "Se ha establecido la memoria para vm %s..." + +#: nova/virt/hyperv.py:198 +#, python-format +msgid "Set vcpus for vm %s..." +msgstr "Establecidas vcpus para vm %s..." + +#: nova/virt/hyperv.py:202 +#, python-format +msgid "Creating disk for %s by attaching disk file %s" +msgstr "" +"Creando disco para %s a través de la asignación del fichero de disco %s" + +#: nova/virt/hyperv.py:227 +#, python-format +msgid "Failed to add diskdrive to VM %s" +msgstr "Fallo al añadir unidad de disco a la VM %s" + +#: nova/virt/hyperv.py:230 +#, python-format +msgid "New disk drive path is %s" +msgstr "La nueva ruta para unidad de disco es %s" + +#: nova/virt/hyperv.py:247 +#, python-format +msgid "Failed to add vhd file to VM %s" +msgstr "Fallo al añadir el fichero vhd a la VM %s" + +#: nova/virt/hyperv.py:249 +#, python-format +msgid "Created disk for %s" +msgstr "Discos creados para %s" + +#: nova/virt/hyperv.py:253 +#, python-format +msgid "Creating nic for %s " +msgstr "Creando nic para %s " + +#: nova/virt/hyperv.py:272 +msgid "Failed creating a port on the external vswitch" +msgstr "Fallo al crear un puerto en el vswitch externo" + +#: nova/virt/hyperv.py:273 +#, python-format +msgid "Failed creating port for %s" +msgstr "Fallo creando puerto para %s" + +#: nova/virt/hyperv.py:275 +#, python-format +msgid "Created switch port %s on switch %s" +msgstr "Creado puerto %s en el switch %s" + +#: nova/virt/hyperv.py:285 +#, python-format +msgid "Failed to add nic to VM %s" +msgstr "Fallo al añadir nic a la VM %s" + +#: nova/virt/hyperv.py:287 +#, python-format +msgid "Created nic for %s " +msgstr "Creando nic para %s " + +#: nova/virt/hyperv.py:320 +#, python-format +msgid "WMI job failed: %s" +msgstr "Trabajo WMI falló: %s" + +#: nova/virt/hyperv.py:322 +#, python-format +msgid "WMI job succeeded: %s, Elapsed=%s " +msgstr "Trabajo WMI ha tenido exito: %s, Transcurrido=%s " + +#: nova/virt/hyperv.py:358 +#, python-format +msgid "Got request to destroy vm %s" +msgstr "Recibida solicitud para destruir vm %s" + +#: nova/virt/hyperv.py:383 +#, python-format +msgid "Failed to destroy vm %s" +msgstr "Fallo al destruir vm %s" + +#: nova/virt/hyperv.py:389 +#, python-format +msgid "Del: disk %s vm %s" +msgstr "Del: disco %s vm %s" + +#: nova/virt/hyperv.py:405 +#, python-format +msgid "" +"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " +"cpu_time=%s" +msgstr "" +"Obtenida información para vm %s: state=%s, mem=%s, num_cpu=%s, cpu_time=%s" + +#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#, python-format +msgid "duplicate name found: %s" +msgstr "se ha encontrado un nombre duplicado: %s" + +#: nova/virt/hyperv.py:444 +#, python-format +msgid "Successfully changed vm state of %s to %s" +msgstr "Cambio de estado de la vm con éxito de %s a %s" + +#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#, python-format +msgid "Failed to change vm state of %s to %s" +msgstr "Fallo al cambiar el estado de la vm de %s a %s" + +#: nova/virt/images.py:70 +#, python-format +msgid "Finished retreving %s -- placed in %s" +msgstr "Finalizada la obtención de %s -- coloado en %s" + +#: nova/virt/libvirt_conn.py:144 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "Conectando a libvirt: %s" + +#: nova/virt/libvirt_conn.py:157 +msgid "Connection to libvirt broke" +msgstr "Conexión a libvirt rota" + +#: nova/virt/libvirt_conn.py:229 +#, python-format +msgid "instance %s: deleting instance files %s" +msgstr "instancia %s: eliminando los ficheros de la instancia %s" + +#: nova/virt/libvirt_conn.py:271 +#, python-format +msgid "No disk at %s" +msgstr "No hay disco en %s" + +#: nova/virt/libvirt_conn.py:278 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "" +"El snapshotting de instancias no está soportado en libvirt en este momento" + +#: nova/virt/libvirt_conn.py:294 +#, python-format +msgid "instance %s: rebooted" +msgstr "instancia %s: reiniciada" + +#: nova/virt/libvirt_conn.py:297 +#, python-format +msgid "_wait_for_reboot failed: %s" +msgstr "_wait_for_reboot falló: %s" + +#: nova/virt/libvirt_conn.py:340 +#, python-format +msgid "instance %s: rescued" +msgstr "instancia %s: rescatada" + +#: nova/virt/libvirt_conn.py:343 +#, python-format +msgid "_wait_for_rescue failed: %s" +msgstr "_wait_for_rescue falló: %s" + +#: nova/virt/libvirt_conn.py:370 +#, python-format +msgid "instance %s: is running" +msgstr "instancia %s: está ejecutándose" + +#: nova/virt/libvirt_conn.py:381 +#, python-format +msgid "instance %s: booted" +msgstr "instancia %s: arrancada" + +#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 +#, python-format +msgid "instance %s: failed to boot" +msgstr "insntancia %s: falló al arrancar" + +#: nova/virt/libvirt_conn.py:395 +#, python-format +msgid "virsh said: %r" +msgstr "virsh dijo: %r" + +#: nova/virt/libvirt_conn.py:399 +msgid "cool, it's a device" +msgstr "genial, es un dispositivo" + +#: nova/virt/libvirt_conn.py:407 +#, python-format +msgid "data: %r, fpath: %r" +msgstr "datos: %r, fpath: %r" + +#: nova/virt/libvirt_conn.py:415 +#, python-format +msgid "Contents of file %s: %r" +msgstr "Contenidos del fichero %s: %r" + +#: nova/virt/libvirt_conn.py:449 +#, python-format +msgid "instance %s: Creating image" +msgstr "instancia %s: Creando imagen" + +#: nova/virt/libvirt_conn.py:505 +#, python-format +msgid "instance %s: injecting key into image %s" +msgstr "instancia %s: inyectando clave en la imagen %s" + +#: nova/virt/libvirt_conn.py:508 +#, python-format +msgid "instance %s: injecting net into image %s" +msgstr "instancia %s: inyectando red en la imagen %s" + +#: nova/virt/libvirt_conn.py:516 +#, python-format +msgid "instance %s: ignoring error injecting data into image %s (%s)" +msgstr "" +"instancia %s: ignorando el error al inyectar datos en la imagen %s (%s)" + +#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "instancia %s: comenzando método toXML" + +#: nova/virt/libvirt_conn.py:589 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "instancia %s: finalizado método toXML" + +#: nova/virt/xenapi_conn.py:113 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" +msgstr "" +"Debes especificar xenapi_connection_url, xenapi_connection_username " +"(opcional), y xenapi_connection_password para usar connection_type=xenapi" + +#: nova/virt/xenapi_conn.py:263 +#, python-format +msgid "Task [%s] %s status: success %s" +msgstr "Tarea [%s] %s estado: éxito %s" + +#: nova/virt/xenapi_conn.py:271 +#, python-format +msgid "Task [%s] %s status: %s %s" +msgstr "Tarea [%s] %s estado: %s %s" + +#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 +#, python-format +msgid "Got exception: %s" +msgstr "Obtenida excepción %s" + +#: nova/virt/xenapi/fake.py:72 +#, python-format +msgid "%s: _db_content => %s" +msgstr "%s: _db_content => %s" + +#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 +#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 +msgid "Raising NotImplemented" +msgstr "Lanzando NotImplemented" + +#: nova/virt/xenapi/fake.py:249 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "xenapi.fake no tiene una implementación para %s" + +#: nova/virt/xenapi/fake.py:283 +#, python-format +msgid "Calling %s %s" +msgstr "Llamando %s %s" + +#: nova/virt/xenapi/fake.py:288 +#, python-format +msgid "Calling getter %s" +msgstr "Llanado al adquiridor %s" + +#: nova/virt/xenapi/fake.py:340 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" +"xenapi.fake no tiene una implementación para %s o ha sido llamada con un " +"número incorrecto de argumentos" + +#: nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "Encontrada una red no única para el puente %s" + +#: nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "No se ha encontrado red para el puente %s" + +#: nova/virt/xenapi/vm_utils.py:127 +#, python-format +msgid "Created VM %s as %s." +msgstr "Creada VM %s cómo %s" + +#: nova/virt/xenapi/vm_utils.py:147 +#, python-format +msgid "Creating VBD for VM %s, VDI %s ... " +msgstr "Creando VBD para VM %s, VDI %s... " + +#: nova/virt/xenapi/vm_utils.py:149 +#, python-format +msgid "Created VBD %s for VM %s, VDI %s." +msgstr "Creado VBD %s for VM %s, VDI %s." + +#: nova/virt/xenapi/vm_utils.py:165 +#, python-format +msgid "VBD not found in instance %s" +msgstr "VBD no encontrado en la instancia %s" + +#: nova/virt/xenapi/vm_utils.py:175 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "Imposible desconectar VBD %s" + +#: nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "Imposible destruir VBD %s" + +#: nova/virt/xenapi/vm_utils.py:202 +#, python-format +msgid "Creating VIF for VM %s, network %s." +msgstr "Creando VIF para VM %s, red %s." + +#: nova/virt/xenapi/vm_utils.py:205 +#, python-format +msgid "Created VIF %s for VM %s, network %s." +msgstr "Creado VIF %s para VM %s, red %s." + +#: nova/virt/xenapi/vm_utils.py:216 +#, python-format +msgid "Snapshotting VM %s with label '%s'..." +msgstr "Creando snapshot de la VM %s con la etiqueta '%s'..." + +#: nova/virt/xenapi/vm_utils.py:229 +#, python-format +msgid "Created snapshot %s from VM %s." +msgstr "Creando snapshot %s de la VM %s" + +#: nova/virt/xenapi/vm_utils.py:243 +#, python-format +msgid "Asking xapi to upload %s as '%s'" +msgstr "Solicitando a xapi la subida de %s cómo %s'" + +#: nova/virt/xenapi/vm_utils.py:261 +#, python-format +msgid "Asking xapi to fetch %s as %s" +msgstr "Solicitando a xapi obtener %s cómo %s" + +#: nova/virt/xenapi/vm_utils.py:279 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "Buscando vid %s para el kernel PV" + +#: nova/virt/xenapi/vm_utils.py:290 +#, python-format +msgid "PV Kernel in VDI:%d" +msgstr "PV Kernel en VDI:%d" + +#: nova/virt/xenapi/vm_utils.py:318 +#, python-format +msgid "VDI %s is still available" +msgstr "VDI %s está todavía disponible" + +#: nova/virt/xenapi/vm_utils.py:331 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "(VM_UTILS) xenserver vm state -> |%s|" + +#: nova/virt/xenapi/vm_utils.py:333 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "(VM_UTILS) xenapi power_state -> |%s|" + +#: nova/virt/xenapi/vm_utils.py:390 +#, python-format +msgid "VHD %s has parent %s" +msgstr "VHD %s tiene cómo padre a %s" + +#: nova/virt/xenapi/vm_utils.py:407 +#, python-format +msgid "Re-scanning SR %s" +msgstr "Re-escaneando SR %s" + +#: nova/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +msgstr "" +"El padre %s no concuerda con el padre original %s, esperando la unión..." + +#: nova/virt/xenapi/vm_utils.py:448 +#, python-format +msgid "No VDIs found for VM %s" +msgstr "No se han encontrado VDI's para VM %s" + +#: nova/virt/xenapi/vm_utils.py:452 +#, python-format +msgid "Unexpected number of VDIs (%s) found for VM %s" +msgstr "Número no esperado de VDIs (%s) encontrados para VM %s" + +#: nova/virt/xenapi/vmops.py:62 +#, python-format +msgid "Attempted to create non-unique name %s" +msgstr "Intentado la creación del nombre no único %s" + +#: nova/virt/xenapi/vmops.py:99 +#, python-format +msgid "Starting VM %s..." +msgstr "Iniciando VM %s..." + +#: nova/virt/xenapi/vmops.py:101 +#, python-format +msgid "Spawning VM %s created %s." +msgstr "Iniciando VM %s creado %s." + +#: nova/virt/xenapi/vmops.py:112 +#, python-format +msgid "Instance %s: booted" +msgstr "Instancia %s: iniciada" + +#: nova/virt/xenapi/vmops.py:137 +#, python-format +msgid "Instance not present %s" +msgstr "Instancia no existente %s" + +#: nova/virt/xenapi/vmops.py:166 +#, python-format +msgid "Starting snapshot for VM %s" +msgstr "Comenzando snapshot para la VM %s" + +#: nova/virt/xenapi/vmops.py:174 +#, python-format +msgid "Unable to Snapshot %s: %s" +msgstr "Incapaz de realizar snapshot %s: %s" + +#: nova/virt/xenapi/vmops.py:184 +#, python-format +msgid "Finished snapshot and upload for VM %s" +msgstr "Finalizado el snapshot y la subida de la VM %s" + +#: nova/virt/xenapi/vmops.py:252 +#, python-format +msgid "suspend: instance not present %s" +msgstr "suspendido: instancia no encontrada: %s" + +#: nova/virt/xenapi/vmops.py:262 +#, python-format +msgid "resume: instance not present %s" +msgstr "reanudar: instancia no encontrada %s" + +#: nova/virt/xenapi/vmops.py:271 +#, python-format +msgid "Instance not found %s" +msgstr "instancia no encontrada %s" + +#: nova/virt/xenapi/volume_utils.py:57 +#, python-format +msgid "Introducing %s..." +msgstr "Introduciendo %s..." + +#: nova/virt/xenapi/volume_utils.py:74 +#, python-format +msgid "Introduced %s as %s." +msgstr "Introducido %s cómo %s." + +#: nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "Imposible crear el repositorio de almacenamiento" + +#: nova/virt/xenapi/volume_utils.py:90 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "Imposible encontrar SR en VBD %s" + +#: nova/virt/xenapi/volume_utils.py:96 +#, python-format +msgid "Forgetting SR %s ... " +msgstr "Olvidando SR %s... " + +#: nova/virt/xenapi/volume_utils.py:101 +#, python-format +msgid "Ignoring exception %s when getting PBDs for %s" +msgstr "Ignorando excepción %s al obtener PBDs de %s" + +#: nova/virt/xenapi/volume_utils.py:107 +#, python-format +msgid "Ignoring exception %s when unplugging PBD %s" +msgstr "Ignorando excepción %s al desconectar PBD %s" + +#: nova/virt/xenapi/volume_utils.py:111 +#, python-format +msgid "Forgetting SR %s done." +msgstr "Olvidando SR %s completado." + +#: nova/virt/xenapi/volume_utils.py:113 +#, python-format +msgid "Ignoring exception %s when forgetting SR %s" +msgstr "Ignorando excepción %s al olvidar SR %s" + +#: nova/virt/xenapi/volume_utils.py:123 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "Incapaz de insertar VDI en SR %s" + +#: nova/virt/xenapi/volume_utils.py:128 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "Imposible obtener copia del VDI %s en" + +#: nova/virt/xenapi/volume_utils.py:146 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "Inposible insertar VDI para SR %s" + +#: nova/virt/xenapi/volume_utils.py:175 +#, python-format +msgid "Unable to obtain target information %s, %s" +msgstr "Imposible obtener información del destino %s, %s" + +#: nova/virt/xenapi/volume_utils.py:197 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "Punto de montaje no puede ser traducido: %s" + +#: nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %s, %s, %s" +msgstr "Attach_volume: %s, %s, %s" + +#: nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %s for instance %s" +msgstr "Inpoisble crear VDI en SR %s para la instancia %s" + +#: nova/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Unable to use SR %s for instance %s" +msgstr "Imposible utilizar SR %s para la instancia %s" + +#: nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "Imposible adjuntar volumen a la instancia %s" + +#: nova/virt/xenapi/volumeops.py:95 +#, python-format +msgid "Mountpoint %s attached to instance %s" +msgstr "Punto de montaje %s unido a la instancia %s" + +#: nova/virt/xenapi/volumeops.py:106 +#, python-format +msgid "Detach_volume: %s, %s" +msgstr "Detach_volume: %s, %s" + +#: nova/virt/xenapi/volumeops.py:113 +#, python-format +msgid "Unable to locate volume %s" +msgstr "Imposible encontrar volumen %s" + +#: nova/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Unable to detach volume %s" +msgstr "Imposible desasociar volumen %s" + +#: nova/virt/xenapi/volumeops.py:128 +#, python-format +msgid "Mountpoint %s detached from instance %s" +msgstr "Punto d emontaje %s desasociado de la instancia %s" + +#: nova/volume/api.py:44 +#, python-format +msgid "Quota exceeeded for %s, tried to create %sG volume" +msgstr "Quota excedida para %s, intentando crear el volumen %sG" + +#: nova/volume/api.py:46 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %s" +msgstr "Quota de volumen superada. No puedes crear un volumen de tamaño %s" + +#: nova/volume/api.py:70 nova/volume/api.py:95 +msgid "Volume status must be available" +msgstr "El estado del volumen debe estar disponible" + +#: nova/volume/api.py:97 +msgid "Volume is already attached" +msgstr "El volumen ya está asociado previamente" + +#: nova/volume/api.py:103 +msgid "Volume is already detached" +msgstr "El volumen ya ha sido desasociado previamente" + +#: nova/volume/driver.py:76 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "Recuperandose de una ejecución fallida. Intenta el número %s" + +#: nova/volume/driver.py:85 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "el grupo de volumenes %s no existe" + +#: nova/volume/driver.py:210 +#, python-format +msgid "FAKE AOE: %s" +msgstr "Falso AOE: %s" + +#: nova/volume/driver.py:315 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "Falso ISCSI: %s" + +#: nova/volume/manager.py:85 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "Exportando de nuevo los volumenes %s" + +#: nova/volume/manager.py:93 +#, python-format +msgid "volume %s: creating" +msgstr "volumen %s: creando" + +#: nova/volume/manager.py:102 +#, python-format +msgid "volume %s: creating lv of size %sG" +msgstr "volumen %s: creando lv de tamaño %sG" + +#: nova/volume/manager.py:106 +#, python-format +msgid "volume %s: creating export" +msgstr "volumen %s: exportando" + +#: nova/volume/manager.py:113 +#, python-format +msgid "volume %s: created successfully" +msgstr "volumen %s: creado satisfactoriamente" + +#: nova/volume/manager.py:121 +msgid "Volume is still attached" +msgstr "El volumen todavía está asociado" + +#: nova/volume/manager.py:123 +msgid "Volume is not local to this node" +msgstr "Volumen no local a este nodo" + +#: nova/volume/manager.py:124 +#, python-format +msgid "volume %s: removing export" +msgstr "volumen %s: eliminando exportación" + +#: nova/volume/manager.py:126 +#, python-format +msgid "volume %s: deleting" +msgstr "volumen %s: eliminando" + +#: nova/volume/manager.py:129 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "volumen %s: eliminado satisfactoriamente" diff --git a/locale/it.po b/locale/it.po new file mode 100644 index 000000000..f2f6a6b87 --- /dev/null +++ b/locale/it.po @@ -0,0 +1,2141 @@ +# Italian translation for nova +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the nova package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: nova\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2011-01-10 11:25-0800\n" +"PO-Revision-Date: 2011-01-14 17:17+0000\n" +"Last-Translator: Armando Migliaccio \n" +"Language-Team: Italian \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Launchpad-Export-Date: 2011-01-28 05:20+0000\n" +"X-Generator: Launchpad (build 12177)\n" + +#: nova/crypto.py:46 +msgid "Filename of root CA" +msgstr "Nome del file root CA" + +#: nova/crypto.py:49 +msgid "Filename of private key" +msgstr "Nome del file della chiave privata" + +#: nova/crypto.py:51 +msgid "Filename of root Certificate Revokation List" +msgstr "" + +#: nova/crypto.py:53 +msgid "Where we keep our keys" +msgstr "Dove si conservano le chiavi" + +#: nova/crypto.py:55 +msgid "Where we keep our root CA" +msgstr "Dove si conserva root CA" + +#: nova/crypto.py:57 +msgid "Should we use a CA for each project?" +msgstr "Si dovrebbe usare un CA per ogni progetto?" + +#: nova/crypto.py:61 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" +"Soggetto per il certificato degli utenti, %s per progetto, utente, orario" + +#: nova/crypto.py:66 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "Soggetto per il certificato dei progetti, %s per progetto, orario" + +#: nova/crypto.py:71 +#, python-format +msgid "Subject for certificate for vpns, %s for project, timestamp" +msgstr "Soggetto per il certificato delle vpn, %s per progetto, orario" + +#: nova/crypto.py:258 +#, python-format +msgid "Flags path: %s" +msgstr "Percorso dei flags: %s" + +#: nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "" +"Si e' verificato un errore inatteso durante l'esecuzione del comando." + +#: nova/exception.py:36 +#, python-format +msgid "" +"%s\n" +"Command: %s\n" +"Exit code: %s\n" +"Stdout: %r\n" +"Stderr: %r" +msgstr "" +"%s\n" +"Comando: %s\n" +"Exit code: %s\n" +"Stdout: %r\n" +"Stderr: %r" + +#: nova/exception.py:86 +msgid "Uncaught exception" +msgstr "Eccezione non gestita" + +#: nova/fakerabbit.py:48 +#, python-format +msgid "(%s) publish (key: %s) %s" +msgstr "(%s) pubblica (chiave: %s) %s" + +#: nova/fakerabbit.py:53 +#, python-format +msgid "Publishing to route %s" +msgstr "Pubblicando sulla route %s" + +#: nova/fakerabbit.py:83 +#, python-format +msgid "Declaring queue %s" +msgstr "Dichiarando la coda %s" + +#: nova/fakerabbit.py:89 +#, python-format +msgid "Declaring exchange %s" +msgstr "Dichiarando il centralino %s" + +#: nova/fakerabbit.py:95 +#, python-format +msgid "Binding %s to %s with key %s" +msgstr "Collegando %s a %s con la chiave %s" + +#: nova/fakerabbit.py:120 +#, python-format +msgid "Getting from %s: %s" +msgstr "" + +#: nova/rpc.py:92 +#, python-format +msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +msgstr "" +"Il server AMQP su %s:%d non é raggiungibile. Riprovare in %d secondi." + +#: nova/rpc.py:99 +#, python-format +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "" +"Impossibile connettersi al server AMQP dopo %d tentativi. Terminando " +"l'applicazione." + +#: nova/rpc.py:118 +msgid "Reconnected to queue" +msgstr "Riconnesso alla coda" + +#: nova/rpc.py:125 +msgid "Failed to fetch message from queue" +msgstr "Impossibile prelevare il messaggio dalla coda" + +#: nova/rpc.py:155 +#, python-format +msgid "Initing the Adapter Consumer for %s" +msgstr "Inizializzando il Consumer Adapter per %s" + +#: nova/rpc.py:170 +#, python-format +msgid "received %s" +msgstr "ricevuto %s" + +#: nova/rpc.py:183 +#, python-format +msgid "no method for message: %s" +msgstr "nessun metodo per il messaggio: %s" + +#: nova/rpc.py:184 +#, python-format +msgid "No method for message: %s" +msgstr "nessun metodo per il messagggio: %s" + +#: nova/rpc.py:245 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Sollevando eccezione %s al chiamante" + +#: nova/rpc.py:286 +#, python-format +msgid "unpacked context: %s" +msgstr "contesto decompresso: %s" + +#: nova/rpc.py:305 +msgid "Making asynchronous call..." +msgstr "Facendo chiamata asincrona..." + +#: nova/rpc.py:308 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID é %s" + +#: nova/rpc.py:356 +#, python-format +msgid "response %s" +msgstr "risposta %s" + +#: nova/rpc.py:365 +#, python-format +msgid "topic is %s" +msgstr "argomento é %s" + +#: nova/rpc.py:366 +#, python-format +msgid "message %s" +msgstr "messaggio %s" + +#: nova/service.py:157 +#, python-format +msgid "Starting %s node" +msgstr "Avviando il nodo %s" + +#: nova/service.py:169 +msgid "Service killed that has no database entry" +msgstr "Servizio terminato che non ha entry nel database" + +#: nova/service.py:190 +msgid "The service database object disappeared, Recreating it." +msgstr "Il servizio é scomparso dal database, ricreo." + +#: nova/service.py:202 +msgid "Recovered model server connection!" +msgstr "Connessione al model server ripristinata!" + +#: nova/service.py:208 +msgid "model server went away" +msgstr "model server é scomparso" + +#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#, python-format +msgid "Data store %s is unreachable. Trying again in %d seconds." +msgstr "Datastore %s é irrangiungibile. Riprovare in %d seconds." + +#: nova/service.py:232 nova/twistd.py:232 +#, python-format +msgid "Serving %s" +msgstr "Servire %s" + +#: nova/service.py:234 nova/twistd.py:264 +msgid "Full set of FLAGS:" +msgstr "Insieme di FLAGS:" + +#: nova/twistd.py:211 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "" +"Il pidfile %s non esiste. Assicurarsi che il demone é in esecuzione.\n" + +#: nova/twistd.py:268 +#, python-format +msgid "Starting %s" +msgstr "Avvio di %s" + +#: nova/utils.py:53 +#, python-format +msgid "Inner Exception: %s" +msgstr "Eccezione interna: %s" + +#: nova/utils.py:54 +#, python-format +msgid "Class %s cannot be found" +msgstr "Classe %s non può essere trovata" + +#: nova/utils.py:113 +#, python-format +msgid "Fetching %s" +msgstr "Prelievo %s" + +#: nova/utils.py:125 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Esecuzione del comando (sottoprocesso): %s" + +#: nova/utils.py:138 +#, python-format +msgid "Result was %s" +msgstr "Il risultato é %s" + +#: nova/utils.py:171 +#, python-format +msgid "debug in callback: %s" +msgstr "debug in callback: %s" + +#: nova/utils.py:176 +#, python-format +msgid "Running %s" +msgstr "" + +#: nova/utils.py:207 +#, python-format +msgid "Couldn't get IP, using 127.0.0.1 %s" +msgstr "" + +#: nova/utils.py:289 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: nova/utils.py:300 +#, python-format +msgid "backend %s" +msgstr "" + +#: nova/api/ec2/__init__.py:133 +msgid "Too many failed authentications." +msgstr "" + +#: nova/api/ec2/__init__.py:142 +#, python-format +msgid "" +"Access key %s has had %d failed authentications and will be locked out for " +"%d minutes." +msgstr "" + +#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#, python-format +msgid "Authentication Failure: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:190 +#, python-format +msgid "Authenticated Request For %s:%s)" +msgstr "" + +#: nova/api/ec2/__init__.py:227 +#, python-format +msgid "action: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:229 +#, python-format +msgid "arg: %s\t\tval: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:301 +#, python-format +msgid "Unauthorized request for controller=%s and action=%s" +msgstr "" + +#: nova/api/ec2/__init__.py:339 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:342 +#, python-format +msgid "ApiError raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:349 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:354 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: nova/api/ec2/admin.py:84 +#, python-format +msgid "Creating new user: %s" +msgstr "" + +#: nova/api/ec2/admin.py:92 +#, python-format +msgid "Deleting user: %s" +msgstr "" + +#: nova/api/ec2/admin.py:114 +#, python-format +msgid "Adding role %s to user %s for project %s" +msgstr "" + +#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#, python-format +msgid "Adding sitewide role %s to user %s" +msgstr "" + +#: nova/api/ec2/admin.py:122 +#, python-format +msgid "Removing role %s from user %s for project %s" +msgstr "" + +#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#, python-format +msgid "Removing sitewide role %s from user %s" +msgstr "" + +#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 +msgid "operation must be add or remove" +msgstr "" + +#: nova/api/ec2/admin.py:142 +#, python-format +msgid "Getting x509 for user: %s on project: %s" +msgstr "" + +#: nova/api/ec2/admin.py:159 +#, python-format +msgid "Create project %s managed by %s" +msgstr "" + +#: nova/api/ec2/admin.py:170 +#, python-format +msgid "Delete project: %s" +msgstr "" + +#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#, python-format +msgid "Adding user %s to project %s" +msgstr "" + +#: nova/api/ec2/admin.py:188 +#, python-format +msgid "Removing user %s from project %s" +msgstr "" + +#: nova/api/ec2/apirequest.py:95 +#, python-format +msgid "Unsupported API request: controller = %s,action = %s" +msgstr "" + +#: nova/api/ec2/cloud.py:117 +#, python-format +msgid "Generating root CA: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:277 +#, python-format +msgid "Create key pair %s" +msgstr "" + +#: nova/api/ec2/cloud.py:285 +#, python-format +msgid "Delete key pair %s" +msgstr "" + +#: nova/api/ec2/cloud.py:357 +#, python-format +msgid "%s is not a valid ipProtocol" +msgstr "" + +#: nova/api/ec2/cloud.py:361 +msgid "Invalid port range" +msgstr "" + +#: nova/api/ec2/cloud.py:392 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 +msgid "No rule for the specified parameters." +msgstr "" + +#: nova/api/ec2/cloud.py:421 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: nova/api/ec2/cloud.py:432 +#, python-format +msgid "This rule already exists in group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:460 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:463 +#, python-format +msgid "group %s already exists" +msgstr "" + +#: nova/api/ec2/cloud.py:475 +#, python-format +msgid "Delete security group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 +#, python-format +msgid "Get console output for instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:543 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: nova/api/ec2/cloud.py:567 +#, python-format +msgid "Attach volume %s to instacne %s at %s" +msgstr "" + +#: nova/api/ec2/cloud.py:579 +#, python-format +msgid "Detach volume %s" +msgstr "" + +#: nova/api/ec2/cloud.py:686 +msgid "Allocate address" +msgstr "" + +#: nova/api/ec2/cloud.py:691 +#, python-format +msgid "Release address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:696 +#, python-format +msgid "Associate address %s to instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:703 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:730 +msgid "Going to start terminating instances" +msgstr "" + +#: nova/api/ec2/cloud.py:738 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: nova/api/ec2/cloud.py:775 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: nova/api/ec2/cloud.py:783 +#, python-format +msgid "Registered image %s with id %s" +msgstr "" + +#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#, python-format +msgid "attribute not supported: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:794 +#, python-format +msgid "invalid id: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:807 +msgid "user or group not specified" +msgstr "" + +#: nova/api/ec2/cloud.py:809 +msgid "only group \"all\" is supported" +msgstr "" + +#: nova/api/ec2/cloud.py:811 +msgid "operation_type must be add or remove" +msgstr "" + +#: nova/api/ec2/cloud.py:812 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: nova/api/ec2/metadatarequesthandler.py:75 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:70 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:86 +msgid "Including admin operations in API." +msgstr "" + +#: nova/api/openstack/servers.py:184 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:199 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: nova/api/openstack/servers.py:213 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:224 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: nova/api/openstack/servers.py:235 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: nova/api/openstack/servers.py:246 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: nova/api/openstack/servers.py:257 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" +msgstr "" + +#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "" + +#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#, python-format +msgid "Project can't be created because project %s already exists" +msgstr "" + +#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#, python-format +msgid "Project can't be modified because manager %s doesn't exist" +msgstr "" + +#: nova/auth/dbdriver.py:245 +#, python-format +msgid "User \"%s\" not found" +msgstr "" + +#: nova/auth/dbdriver.py:248 +#, python-format +msgid "Project \"%s\" not found" +msgstr "" + +#: nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: nova/auth/ldapdriver.py:181 +#, python-format +msgid "LDAP object for %s doesn't exist" +msgstr "" + +#: nova/auth/ldapdriver.py:218 +#, python-format +msgid "Project can't be created because user %s doesn't exist" +msgstr "" + +#: nova/auth/ldapdriver.py:478 +#, python-format +msgid "User %s is already a member of the group %s" +msgstr "" + +#: nova/auth/ldapdriver.py:507 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: nova/auth/ldapdriver.py:528 +#, python-format +msgid "Group at dn %s doesn't exist" +msgstr "" + +#: nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" +msgstr "" + +#: nova/auth/manager.py:270 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: nova/auth/manager.py:275 +#, python-format +msgid "failed authorization: no project named %s (user=%s)" +msgstr "" + +#: nova/auth/manager.py:277 +#, python-format +msgid "No project called %s could be found" +msgstr "" + +#: nova/auth/manager.py:281 +#, python-format +msgid "Failed authorization: user %s not admin and not member of project %s" +msgstr "" + +#: nova/auth/manager.py:283 +#, python-format +msgid "User %s is not a member of project %s" +msgstr "" + +#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: nova/auth/manager.py:293 nova/auth/manager.py:304 +msgid "Signature does not match" +msgstr "" + +#: nova/auth/manager.py:374 +msgid "Must specify project" +msgstr "" + +#: nova/auth/manager.py:408 +#, python-format +msgid "The %s role can not be found" +msgstr "" + +#: nova/auth/manager.py:410 +#, python-format +msgid "The %s role is global only" +msgstr "" + +#: nova/auth/manager.py:412 +#, python-format +msgid "Adding role %s to user %s in project %s" +msgstr "" + +#: nova/auth/manager.py:438 +#, python-format +msgid "Removing role %s from user %s on project %s" +msgstr "" + +#: nova/auth/manager.py:505 +#, python-format +msgid "Created project %s with manager %s" +msgstr "" + +#: nova/auth/manager.py:523 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: nova/auth/manager.py:553 +#, python-format +msgid "Remove user %s from project %s" +msgstr "" + +#: nova/auth/manager.py:581 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: nova/auth/manager.py:637 +#, python-format +msgid "Created user %s (admin: %r)" +msgstr "" + +#: nova/auth/manager.py:645 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: nova/auth/manager.py:655 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:657 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:659 +#, python-format +msgid "Admin status set to %r for user %s" +msgstr "" + +#: nova/auth/manager.py:708 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "" + +#: nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:97 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: nova/compute/api.py:67 +#, python-format +msgid "Instance %d was not found in get_network_topic" +msgstr "" + +#: nova/compute/api.py:73 +#, python-format +msgid "Instance %d has no host" +msgstr "" + +#: nova/compute/api.py:92 +#, python-format +msgid "Quota exceeeded for %s, tried to run %s instances" +msgstr "" + +#: nova/compute/api.py:94 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." +msgstr "" + +#: nova/compute/api.py:109 +msgid "Creating a raw instance" +msgstr "" + +#: nova/compute/api.py:156 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: nova/compute/api.py:180 +#, python-format +msgid "Casting to scheduler for %s/%s's instance %s" +msgstr "" + +#: nova/compute/api.py:279 +#, python-format +msgid "Going to try and terminate %s" +msgstr "" + +#: nova/compute/api.py:283 +#, python-format +msgid "Instance %d was not found during terminate" +msgstr "" + +#: nova/compute/api.py:288 +#, python-format +msgid "Instance %d is already being terminated" +msgstr "" + +#: nova/compute/api.py:450 +#, python-format +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "" + +#: nova/compute/api.py:465 +msgid "Volume isn't attached to anything!" +msgstr "" + +#: nova/compute/disk.py:71 +#, python-format +msgid "Input partition size not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:75 +#, python-format +msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:128 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: nova/compute/disk.py:136 +#, python-format +msgid "Failed to load partition: %s" +msgstr "" + +#: nova/compute/disk.py:158 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "" + +#: nova/compute/manager.py:69 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: nova/compute/manager.py:71 +#, python-format +msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +msgstr "" + +#: nova/compute/manager.py:75 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: nova/compute/manager.py:77 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: nova/compute/manager.py:82 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: nova/compute/manager.py:86 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: nova/compute/manager.py:157 +msgid "Instance has already been created" +msgstr "" + +#: nova/compute/manager.py:158 +#, python-format +msgid "instance %s: starting..." +msgstr "" + +#: nova/compute/manager.py:197 +#, python-format +msgid "instance %s: Failed to spawn" +msgstr "" + +#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 +#, python-format +msgid "Terminating instance %s" +msgstr "" + +#: nova/compute/manager.py:217 +#, python-format +msgid "Disassociating address %s" +msgstr "" + +#: nova/compute/manager.py:230 +#, python-format +msgid "Deallocating address %s" +msgstr "" + +#: nova/compute/manager.py:243 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "" + +#: nova/compute/manager.py:257 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: nova/compute/manager.py:260 +#, python-format +msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:286 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: nova/compute/manager.py:289 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:301 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: nova/compute/manager.py:316 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: nova/compute/manager.py:335 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: nova/compute/manager.py:352 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: nova/compute/manager.py:369 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: nova/compute/manager.py:382 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: nova/compute/manager.py:401 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: nova/compute/manager.py:420 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: nova/compute/manager.py:432 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: nova/compute/manager.py:442 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: nova/compute/manager.py:462 +#, python-format +msgid "instance %s: attaching volume %s to %s" +msgstr "" + +#: nova/compute/manager.py:478 +#, python-format +msgid "instance %s: attach failed %s, removing" +msgstr "" + +#: nova/compute/manager.py:493 +#, python-format +msgid "Detach volume %s from mountpoint %s on instance %s" +msgstr "" + +#: nova/compute/manager.py:497 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: nova/compute/monitor.py:259 +#, python-format +msgid "updating %s..." +msgstr "" + +#: nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "" + +#: nova/compute/monitor.py:355 +#, python-format +msgid "Cannot get blockstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:377 +#, python-format +msgid "Cannot get ifstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:412 +msgid "unexpected exception getting connection" +msgstr "" + +#: nova/compute/monitor.py:427 +#, python-format +msgid "Found instance: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:43 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: nova/db/sqlalchemy/api.py:132 +#, python-format +msgid "No service for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:229 +#, python-format +msgid "No service for %s, %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:574 +#, python-format +msgid "No floating ip for address %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:668 +#, python-format +msgid "No instance for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 +#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#, python-format +msgid "Instance %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:891 +#, python-format +msgid "no keypair for user %s, name %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 +#, python-format +msgid "No network for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1036 +#, python-format +msgid "No network for bridge %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1050 +#, python-format +msgid "No network for instance %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1180 +#, python-format +msgid "Token %s does not exist" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1205 +#, python-format +msgid "No quota for project_id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1356 +#, python-format +msgid "No volume for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1401 +#, python-format +msgid "Volume %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1413 +#, python-format +msgid "No export device found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1426 +#, python-format +msgid "No target id found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1471 +#, python-format +msgid "No security group with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1488 +#, python-format +msgid "No security group named %s for project: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1576 +#, python-format +msgid "No secuity group rule with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1650 +#, python-format +msgid "No user for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1666 +#, python-format +msgid "No user for access key %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1728 +#, python-format +msgid "No project with id %s" +msgstr "" + +#: nova/image/glance.py:78 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images" +msgstr "" + +#: nova/image/glance.py:97 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images/detail" +msgstr "" + +#: nova/image/s3.py:82 +#, python-format +msgid "Image %s could not be found" +msgstr "" + +#: nova/network/api.py:39 +#, python-format +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "" + +#: nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" + +#: nova/network/linux_net.py:176 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: nova/network/linux_net.py:186 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#: nova/network/linux_net.py:254 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: nova/network/linux_net.py:256 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: nova/network/linux_net.py:334 +#, python-format +msgid "Killing dnsmasq threw %s" +msgstr "" + +#: nova/network/manager.py:135 +msgid "setting network host" +msgstr "" + +#: nova/network/manager.py:190 +#, python-format +msgid "Leasing IP %s" +msgstr "" + +#: nova/network/manager.py:194 +#, python-format +msgid "IP %s leased that isn't associated" +msgstr "" + +#: nova/network/manager.py:197 +#, python-format +msgid "IP %s leased to bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:205 +#, python-format +msgid "IP %s leased that was already deallocated" +msgstr "" + +#: nova/network/manager.py:214 +#, python-format +msgid "IP %s released that isn't associated" +msgstr "" + +#: nova/network/manager.py:217 +#, python-format +msgid "IP %s released from bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:220 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: nova/network/manager.py:442 +#, python-format +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "" + +#: nova/objectstore/handler.py:106 +#, python-format +msgid "Unknown S3 value type %r" +msgstr "" + +#: nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "" + +#: nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "" + +#: nova/objectstore/handler.py:209 +#, python-format +msgid "List keys for bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:217 +#, python-format +msgid "Unauthorized attempt to access bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:235 +#, python-format +msgid "Creating bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:245 +#, python-format +msgid "Deleting bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:249 +#, python-format +msgid "Unauthorized attempt to delete bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:271 +#, python-format +msgid "Getting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:274 +#, python-format +msgid "Unauthorized attempt to get object %s from bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:292 +#, python-format +msgid "Putting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:295 +#, python-format +msgid "Unauthorized attempt to upload object %s to bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:314 +#, python-format +msgid "Deleting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:393 +#, python-format +msgid "Not authorized to upload image: invalid directory %s" +msgstr "" + +#: nova/objectstore/handler.py:401 +#, python-format +msgid "Not authorized to upload image: unauthorized bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:406 +#, python-format +msgid "Starting image upload: %s" +msgstr "" + +#: nova/objectstore/handler.py:420 +#, python-format +msgid "Not authorized to update attributes of image %s" +msgstr "" + +#: nova/objectstore/handler.py:428 +#, python-format +msgid "Toggling publicity flag of image %s %r" +msgstr "" + +#: nova/objectstore/handler.py:433 +#, python-format +msgid "Updating user fields on image %s" +msgstr "" + +#: nova/objectstore/handler.py:447 +#, python-format +msgid "Unauthorized attempt to delete image %s" +msgstr "" + +#: nova/objectstore/handler.py:452 +#, python-format +msgid "Deleted image: %s" +msgstr "" + +#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 +#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 +msgid "No hosts found" +msgstr "" + +#: nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "" + +#: nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %s %s for %s" +msgstr "" + +#: nova/scheduler/simple.py:63 +msgid "All hosts have too many cores" +msgstr "" + +#: nova/scheduler/simple.py:95 +msgid "All hosts have too many gigabytes" +msgstr "" + +#: nova/scheduler/simple.py:115 +msgid "All hosts have too many networks" +msgstr "" + +#: nova/tests/test_cloud.py:198 +msgid "Can't test instances without a real virtual env." +msgstr "" + +#: nova/tests/test_cloud.py:210 +#, python-format +msgid "Need to watch instance %s until it's running..." +msgstr "" + +#: nova/tests/test_compute.py:104 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: nova/tests/test_compute.py:110 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: nova/tests/test_rpc.py:89 +#, python-format +msgid "Nested received %s, %s" +msgstr "" + +#: nova/tests/test_rpc.py:94 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#, python-format +msgid "Received %s" +msgstr "" + +#: nova/tests/test_volume.py:162 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: nova/virt/fake.py:210 +#, python-format +msgid "Instance %s Not Found" +msgstr "" + +#: nova/virt/hyperv.py:118 +msgid "In init host" +msgstr "" + +#: nova/virt/hyperv.py:131 +#, python-format +msgid "Attempt to create duplicate vm %s" +msgstr "" + +#: nova/virt/hyperv.py:148 +#, python-format +msgid "Starting VM %s " +msgstr "" + +#: nova/virt/hyperv.py:150 +#, python-format +msgid "Started VM %s " +msgstr "" + +#: nova/virt/hyperv.py:152 +#, python-format +msgid "spawn vm failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:169 +#, python-format +msgid "Failed to create VM %s" +msgstr "" + +#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 +#, python-format +msgid "Created VM %s..." +msgstr "" + +#: nova/virt/hyperv.py:188 +#, python-format +msgid "Set memory for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:198 +#, python-format +msgid "Set vcpus for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:202 +#, python-format +msgid "Creating disk for %s by attaching disk file %s" +msgstr "" + +#: nova/virt/hyperv.py:227 +#, python-format +msgid "Failed to add diskdrive to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:230 +#, python-format +msgid "New disk drive path is %s" +msgstr "" + +#: nova/virt/hyperv.py:247 +#, python-format +msgid "Failed to add vhd file to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:249 +#, python-format +msgid "Created disk for %s" +msgstr "" + +#: nova/virt/hyperv.py:253 +#, python-format +msgid "Creating nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:272 +msgid "Failed creating a port on the external vswitch" +msgstr "" + +#: nova/virt/hyperv.py:273 +#, python-format +msgid "Failed creating port for %s" +msgstr "" + +#: nova/virt/hyperv.py:275 +#, python-format +msgid "Created switch port %s on switch %s" +msgstr "" + +#: nova/virt/hyperv.py:285 +#, python-format +msgid "Failed to add nic to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:287 +#, python-format +msgid "Created nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:320 +#, python-format +msgid "WMI job failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:322 +#, python-format +msgid "WMI job succeeded: %s, Elapsed=%s " +msgstr "" + +#: nova/virt/hyperv.py:358 +#, python-format +msgid "Got request to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:383 +#, python-format +msgid "Failed to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:389 +#, python-format +msgid "Del: disk %s vm %s" +msgstr "" + +#: nova/virt/hyperv.py:405 +#, python-format +msgid "" +"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " +"cpu_time=%s" +msgstr "" + +#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#, python-format +msgid "duplicate name found: %s" +msgstr "" + +#: nova/virt/hyperv.py:444 +#, python-format +msgid "Successfully changed vm state of %s to %s" +msgstr "" + +#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#, python-format +msgid "Failed to change vm state of %s to %s" +msgstr "" + +#: nova/virt/images.py:70 +#, python-format +msgid "Finished retreving %s -- placed in %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:144 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:157 +msgid "Connection to libvirt broke" +msgstr "" + +#: nova/virt/libvirt_conn.py:229 +#, python-format +msgid "instance %s: deleting instance files %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:271 +#, python-format +msgid "No disk at %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:278 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "" + +#: nova/virt/libvirt_conn.py:294 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: nova/virt/libvirt_conn.py:297 +#, python-format +msgid "_wait_for_reboot failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:340 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: nova/virt/libvirt_conn.py:343 +#, python-format +msgid "_wait_for_rescue failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:370 +#, python-format +msgid "instance %s: is running" +msgstr "" + +#: nova/virt/libvirt_conn.py:381 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 +#, python-format +msgid "instance %s: failed to boot" +msgstr "" + +#: nova/virt/libvirt_conn.py:395 +#, python-format +msgid "virsh said: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:399 +msgid "cool, it's a device" +msgstr "" + +#: nova/virt/libvirt_conn.py:407 +#, python-format +msgid "data: %r, fpath: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:415 +#, python-format +msgid "Contents of file %s: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:449 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: nova/virt/libvirt_conn.py:505 +#, python-format +msgid "instance %s: injecting key into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:508 +#, python-format +msgid "instance %s: injecting net into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:516 +#, python-format +msgid "instance %s: ignoring error injecting data into image %s (%s)" +msgstr "" + +#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: nova/virt/libvirt_conn.py:589 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: nova/virt/xenapi_conn.py:113 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" +msgstr "" + +#: nova/virt/xenapi_conn.py:263 +#, python-format +msgid "Task [%s] %s status: success %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:271 +#, python-format +msgid "Task [%s] %s status: %s %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:72 +#, python-format +msgid "%s: _db_content => %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 +#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 +msgid "Raising NotImplemented" +msgstr "" + +#: nova/virt/xenapi/fake.py:249 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:283 +#, python-format +msgid "Calling %s %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:288 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:340 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:127 +#, python-format +msgid "Created VM %s as %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:147 +#, python-format +msgid "Creating VBD for VM %s, VDI %s ... " +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:149 +#, python-format +msgid "Created VBD %s for VM %s, VDI %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:165 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:175 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:202 +#, python-format +msgid "Creating VIF for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:205 +#, python-format +msgid "Created VIF %s for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:216 +#, python-format +msgid "Snapshotting VM %s with label '%s'..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:229 +#, python-format +msgid "Created snapshot %s from VM %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:243 +#, python-format +msgid "Asking xapi to upload %s as '%s'" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:261 +#, python-format +msgid "Asking xapi to fetch %s as %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:279 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:290 +#, python-format +msgid "PV Kernel in VDI:%d" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:318 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:331 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:333 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:390 +#, python-format +msgid "VHD %s has parent %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:407 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:448 +#, python-format +msgid "No VDIs found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:452 +#, python-format +msgid "Unexpected number of VDIs (%s) found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:62 +#, python-format +msgid "Attempted to create non-unique name %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:99 +#, python-format +msgid "Starting VM %s..." +msgstr "" + +#: nova/virt/xenapi/vmops.py:101 +#, python-format +msgid "Spawning VM %s created %s." +msgstr "" + +#: nova/virt/xenapi/vmops.py:112 +#, python-format +msgid "Instance %s: booted" +msgstr "" + +#: nova/virt/xenapi/vmops.py:137 +#, python-format +msgid "Instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:166 +#, python-format +msgid "Starting snapshot for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:174 +#, python-format +msgid "Unable to Snapshot %s: %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:184 +#, python-format +msgid "Finished snapshot and upload for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:252 +#, python-format +msgid "suspend: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:262 +#, python-format +msgid "resume: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:271 +#, python-format +msgid "Instance not found %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:57 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:74 +#, python-format +msgid "Introduced %s as %s." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:90 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:96 +#, python-format +msgid "Forgetting SR %s ... " +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:101 +#, python-format +msgid "Ignoring exception %s when getting PBDs for %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:107 +#, python-format +msgid "Ignoring exception %s when unplugging PBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:111 +#, python-format +msgid "Forgetting SR %s done." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:113 +#, python-format +msgid "Ignoring exception %s when forgetting SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:123 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:128 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:146 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:175 +#, python-format +msgid "Unable to obtain target information %s, %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:197 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %s, %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Unable to use SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:95 +#, python-format +msgid "Mountpoint %s attached to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:106 +#, python-format +msgid "Detach_volume: %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:113 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:128 +#, python-format +msgid "Mountpoint %s detached from instance %s" +msgstr "" + +#: nova/volume/api.py:44 +#, python-format +msgid "Quota exceeeded for %s, tried to create %sG volume" +msgstr "" + +#: nova/volume/api.py:46 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %s" +msgstr "" + +#: nova/volume/api.py:70 nova/volume/api.py:95 +msgid "Volume status must be available" +msgstr "" + +#: nova/volume/api.py:97 +msgid "Volume is already attached" +msgstr "" + +#: nova/volume/api.py:103 +msgid "Volume is already detached" +msgstr "" + +#: nova/volume/driver.py:76 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: nova/volume/driver.py:85 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: nova/volume/driver.py:210 +#, python-format +msgid "FAKE AOE: %s" +msgstr "" + +#: nova/volume/driver.py:315 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: nova/volume/manager.py:85 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: nova/volume/manager.py:93 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: nova/volume/manager.py:102 +#, python-format +msgid "volume %s: creating lv of size %sG" +msgstr "" + +#: nova/volume/manager.py:106 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: nova/volume/manager.py:113 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: nova/volume/manager.py:121 +msgid "Volume is still attached" +msgstr "" + +#: nova/volume/manager.py:123 +msgid "Volume is not local to this node" +msgstr "" + +#: nova/volume/manager.py:124 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: nova/volume/manager.py:126 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: nova/volume/manager.py:129 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" diff --git a/locale/ja.po b/locale/ja.po new file mode 100644 index 000000000..919625e9a --- /dev/null +++ b/locale/ja.po @@ -0,0 +1,2143 @@ +# Japanese translation for nova +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the nova package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: nova\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2011-01-10 11:25-0800\n" +"PO-Revision-Date: 2011-01-14 09:04+0000\n" +"Last-Translator: Koji Iida \n" +"Language-Team: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Launchpad-Export-Date: 2011-01-28 05:20+0000\n" +"X-Generator: Launchpad (build 12177)\n" + +#: nova/crypto.py:46 +msgid "Filename of root CA" +msgstr "ルートCAのファイル名" + +#: nova/crypto.py:49 +msgid "Filename of private key" +msgstr "プライベートキーのファイル名" + +#: nova/crypto.py:51 +msgid "Filename of root Certificate Revokation List" +msgstr "ルート証明書失効リストのファイル名" + +#: nova/crypto.py:53 +msgid "Where we keep our keys" +msgstr "キーを格納するパス" + +#: nova/crypto.py:55 +msgid "Where we keep our root CA" +msgstr "ルートCAを格納するパス" + +#: nova/crypto.py:57 +msgid "Should we use a CA for each project?" +msgstr "プロジェクトごとにCAを使用するか否かのフラグ" + +#: nova/crypto.py:61 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "ユーザの証明書のサブジェクト、%s はプロジェクト、ユーザ、タイムスタンプ" + +#: nova/crypto.py:66 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "プロジェクトの証明書のサブジェクト、%s はプロジェクト、およびタイムスタンプ" + +#: nova/crypto.py:71 +#, python-format +msgid "Subject for certificate for vpns, %s for project, timestamp" +msgstr "vpnの証明書のサブジェクト、%sはプロジェクト、およびタイムスタンプ" + +#: nova/crypto.py:258 +#, python-format +msgid "Flags path: %s" +msgstr "Flags のパス: %s" + +#: nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "コマンド実行において予期しないエラーが発生しました。" + +#: nova/exception.py:36 +#, python-format +msgid "" +"%s\n" +"Command: %s\n" +"Exit code: %s\n" +"Stdout: %r\n" +"Stderr: %r" +msgstr "" +"%s\n" +"コマンド: %s\n" +"終了コード: %s\n" +"標準出力: %r\n" +"標準エラー出力: %r" + +#: nova/exception.py:86 +msgid "Uncaught exception" +msgstr "キャッチされなかった例外" + +#: nova/fakerabbit.py:48 +#, python-format +msgid "(%s) publish (key: %s) %s" +msgstr "(%s) パブリッシュ (key: %s) %s" + +#: nova/fakerabbit.py:53 +#, python-format +msgid "Publishing to route %s" +msgstr "ルート %s へパブリッシュ" + +#: nova/fakerabbit.py:83 +#, python-format +msgid "Declaring queue %s" +msgstr "queue %s の宣言" + +#: nova/fakerabbit.py:89 +#, python-format +msgid "Declaring exchange %s" +msgstr "exchange %s の宣言" + +#: nova/fakerabbit.py:95 +#, python-format +msgid "Binding %s to %s with key %s" +msgstr "%s を %s にキー %s でバインドします。" + +#: nova/fakerabbit.py:120 +#, python-format +msgid "Getting from %s: %s" +msgstr "%s から %s を取得" + +#: nova/rpc.py:92 +#, python-format +msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +msgstr "AMQPサーバ %s:%d に接続できません。 %d 秒後に再度試みます。" + +#: nova/rpc.py:99 +#, python-format +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "AMQPサーバーに %d 回接続を試みましたが、接続できませんでした。シャットダウンします。" + +#: nova/rpc.py:118 +msgid "Reconnected to queue" +msgstr "キューに再接続しました。" + +#: nova/rpc.py:125 +msgid "Failed to fetch message from queue" +msgstr "キューからメッセージの取得に失敗しました。" + +#: nova/rpc.py:155 +#, python-format +msgid "Initing the Adapter Consumer for %s" +msgstr "%sのアダプターコンシューマー(Adapter Consumer)を初期化しています。" + +#: nova/rpc.py:170 +#, python-format +msgid "received %s" +msgstr "受信: %s" + +#: nova/rpc.py:183 +#, python-format +msgid "no method for message: %s" +msgstr "メッセージ %s に対するメソッドが存在しません。" + +#: nova/rpc.py:184 +#, python-format +msgid "No method for message: %s" +msgstr "メッセージ %s に対するメソッドが存在しません。" + +#: nova/rpc.py:245 +#, python-format +msgid "Returning exception %s to caller" +msgstr "呼び出し元に 例外 %s を返却します。" + +#: nova/rpc.py:286 +#, python-format +msgid "unpacked context: %s" +msgstr "context %s をアンパックしました。" + +#: nova/rpc.py:305 +msgid "Making asynchronous call..." +msgstr "非同期呼び出しを実行します…" + +#: nova/rpc.py:308 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_IDは %s です。" + +#: nova/rpc.py:356 +#, python-format +msgid "response %s" +msgstr "応答 %s" + +#: nova/rpc.py:365 +#, python-format +msgid "topic is %s" +msgstr "topic は %s です。" + +#: nova/rpc.py:366 +#, python-format +msgid "message %s" +msgstr "メッセージ %s" + +#: nova/service.py:157 +#, python-format +msgid "Starting %s node" +msgstr "ノード %s を開始します。" + +#: nova/service.py:169 +msgid "Service killed that has no database entry" +msgstr "データベースにエントリの存在しないサービスを終了します。" + +#: nova/service.py:190 +msgid "The service database object disappeared, Recreating it." +msgstr "サービスデータベースオブジェクトが消滅しました。再作成します。" + +#: nova/service.py:202 +msgid "Recovered model server connection!" +msgstr "モデルサーバへの接続を復旧しました。" + +#: nova/service.py:208 +msgid "model server went away" +msgstr "モデルサーバが消滅しました。" + +#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#, python-format +msgid "Data store %s is unreachable. Trying again in %d seconds." +msgstr "データストア %s に接続できません。 %d 秒後に再接続します。" + +#: nova/service.py:232 nova/twistd.py:232 +#, python-format +msgid "Serving %s" +msgstr "%s サービスの開始" + +#: nova/service.py:234 nova/twistd.py:264 +msgid "Full set of FLAGS:" +msgstr "FLAGSの一覧:" + +#: nova/twistd.py:211 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "pidfile %s が存在しません。デーモンは実行中ですか?\n" + +#: nova/twistd.py:268 +#, python-format +msgid "Starting %s" +msgstr "%s を開始します。" + +#: nova/utils.py:53 +#, python-format +msgid "Inner Exception: %s" +msgstr "内側で発生した例外: %s" + +#: nova/utils.py:54 +#, python-format +msgid "Class %s cannot be found" +msgstr "クラス %s が見つかりません。" + +#: nova/utils.py:113 +#, python-format +msgid "Fetching %s" +msgstr "ファイルをフェッチ: %s" + +#: nova/utils.py:125 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "コマンド実行(subprocess): %s" + +#: nova/utils.py:138 +#, python-format +msgid "Result was %s" +msgstr "コマンド実行結果: %s" + +#: nova/utils.py:171 +#, python-format +msgid "debug in callback: %s" +msgstr "コールバック中のデバッグ: %s" + +#: nova/utils.py:176 +#, python-format +msgid "Running %s" +msgstr "コマンド実行: %s" + +#: nova/utils.py:207 +#, python-format +msgid "Couldn't get IP, using 127.0.0.1 %s" +msgstr "IPを取得できません。127.0.0.1 を %s として使います。" + +#: nova/utils.py:289 +#, python-format +msgid "Invalid backend: %s" +msgstr "不正なバックエンドです: %s" + +#: nova/utils.py:300 +#, python-format +msgid "backend %s" +msgstr "バックエンドは %s です。" + +#: nova/api/ec2/__init__.py:133 +msgid "Too many failed authentications." +msgstr "認証失敗の回数が多すぎます。" + +#: nova/api/ec2/__init__.py:142 +#, python-format +msgid "" +"Access key %s has had %d failed authentications and will be locked out for " +"%d minutes." +msgstr "アクセスキー %s は %d 回認証に失敗したため、%d 分間ロックされます。" + +#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#, python-format +msgid "Authentication Failure: %s" +msgstr "%s の認証に失敗しました。" + +#: nova/api/ec2/__init__.py:190 +#, python-format +msgid "Authenticated Request For %s:%s)" +msgstr "リクエストを認証しました: %s:%s" + +#: nova/api/ec2/__init__.py:227 +#, python-format +msgid "action: %s" +msgstr "アクション(action): %s" + +#: nova/api/ec2/__init__.py:229 +#, python-format +msgid "arg: %s\t\tval: %s" +msgstr "引数(arg): %s\t値(val): %s" + +#: nova/api/ec2/__init__.py:301 +#, python-format +msgid "Unauthorized request for controller=%s and action=%s" +msgstr "許可されていないリクエスト: controller=%s, action %sです。" + +#: nova/api/ec2/__init__.py:339 +#, python-format +msgid "NotFound raised: %s" +msgstr "NotFound 発生: %s" + +#: nova/api/ec2/__init__.py:342 +#, python-format +msgid "ApiError raised: %s" +msgstr "APIエラー発生: %s" + +#: nova/api/ec2/__init__.py:349 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "予期しないエラー発生: %s" + +#: nova/api/ec2/__init__.py:354 +msgid "An unknown error has occurred. Please try your request again." +msgstr "未知のエラーが発生しました。再度リクエストを実行してください。" + +#: nova/api/ec2/admin.py:84 +#, python-format +msgid "Creating new user: %s" +msgstr "Creating new user: 新しいユーザ %s を作成します。" + +#: nova/api/ec2/admin.py:92 +#, python-format +msgid "Deleting user: %s" +msgstr "Deleting user: ユーザ %s を削除します。" + +#: nova/api/ec2/admin.py:114 +#, python-format +msgid "Adding role %s to user %s for project %s" +msgstr "Adding role: ロール %s をユーザ %s、プロジェクト %s に追加します。" + +#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#, python-format +msgid "Adding sitewide role %s to user %s" +msgstr "Adding sitewide role: サイトワイドのロール %s をユーザ %s に追加します。" + +#: nova/api/ec2/admin.py:122 +#, python-format +msgid "Removing role %s from user %s for project %s" +msgstr "Removing role: ロール %s をユーザ %s プロジェクト %s から削除します。" + +#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#, python-format +msgid "Removing sitewide role %s from user %s" +msgstr "Removing sitewide role: サイトワイドのロール %s をユーザ %s から削除します。" + +#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 +msgid "operation must be add or remove" +msgstr "operation は add または remove の何れかである必要があります。" + +#: nova/api/ec2/admin.py:142 +#, python-format +msgid "Getting x509 for user: %s on project: %s" +msgstr "Getting X509: x509の取得: ユーザ %s, プロジェクト %s" + +#: nova/api/ec2/admin.py:159 +#, python-format +msgid "Create project %s managed by %s" +msgstr "Create project: プロジェクト %s (%s により管理される)を作成します。" + +#: nova/api/ec2/admin.py:170 +#, python-format +msgid "Delete project: %s" +msgstr "Delete project: プロジェクト %s を削除しました。" + +#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#, python-format +msgid "Adding user %s to project %s" +msgstr "Adding user: ユーザ %s をプロジェクト %s に追加します。" + +#: nova/api/ec2/admin.py:188 +#, python-format +msgid "Removing user %s from project %s" +msgstr "Removing user: ユーザ %s をプロジェクト %s から削除します。" + +#: nova/api/ec2/apirequest.py:95 +#, python-format +msgid "Unsupported API request: controller = %s,action = %s" +msgstr "サポートされていないAPIリクエストです。 controller = %s,action = %s" + +#: nova/api/ec2/cloud.py:117 +#, python-format +msgid "Generating root CA: %s" +msgstr "ルートCA %s を生成しています。" + +#: nova/api/ec2/cloud.py:277 +#, python-format +msgid "Create key pair %s" +msgstr "Create key pair: キーペア %s を作成します。" + +#: nova/api/ec2/cloud.py:285 +#, python-format +msgid "Delete key pair %s" +msgstr "Delete key pair: キーペア %s を削除します。" + +#: nova/api/ec2/cloud.py:357 +#, python-format +msgid "%s is not a valid ipProtocol" +msgstr "%s は適切なipProtocolではありません。" + +#: nova/api/ec2/cloud.py:361 +msgid "Invalid port range" +msgstr "ポートの範囲が不正です。" + +#: nova/api/ec2/cloud.py:392 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "Revoke security group ingress: セキュリティグループ許可 %s の取消" + +#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 +msgid "No rule for the specified parameters." +msgstr "指定されたパラメータに該当するルールがありません。" + +#: nova/api/ec2/cloud.py:421 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "Authorize security group ingress: セキュリティグループ許可 %s" + +#: nova/api/ec2/cloud.py:432 +#, python-format +msgid "This rule already exists in group %s" +msgstr "指定されたルールは既にグループ %s に存在しています。" + +#: nova/api/ec2/cloud.py:460 +#, python-format +msgid "Create Security Group %s" +msgstr "Create Security Group: セキュリティグループ %s を作成します。" + +#: nova/api/ec2/cloud.py:463 +#, python-format +msgid "group %s already exists" +msgstr "グループ %s は既に存在しています。" + +#: nova/api/ec2/cloud.py:475 +#, python-format +msgid "Delete security group %s" +msgstr "Delete security group: セキュリティグループ %s を削除します。" + +#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 +#, python-format +msgid "Get console output for instance %s" +msgstr "Get console output: インスタンス %s のコンソール出力を取得します。" + +#: nova/api/ec2/cloud.py:543 +#, python-format +msgid "Create volume of %s GB" +msgstr "Create volume: %s GBのボリュームを作成します。" + +#: nova/api/ec2/cloud.py:567 +#, python-format +msgid "Attach volume %s to instacne %s at %s" +msgstr "Attach volume: ボリューム%s をインスタンス %s にデバイス %s でアタッチします。" + +#: nova/api/ec2/cloud.py:579 +#, python-format +msgid "Detach volume %s" +msgstr "Detach volume: ボリューム %s をデタッチします" + +#: nova/api/ec2/cloud.py:686 +msgid "Allocate address" +msgstr "Allocate address: アドレスを割り当てます。" + +#: nova/api/ec2/cloud.py:691 +#, python-format +msgid "Release address %s" +msgstr "Release address: アドレス %s を開放します。" + +#: nova/api/ec2/cloud.py:696 +#, python-format +msgid "Associate address %s to instance %s" +msgstr "Associate address: アドレス %s をインスタンス %s に関連付けます。" + +#: nova/api/ec2/cloud.py:703 +#, python-format +msgid "Disassociate address %s" +msgstr "Disassociate address: アドレス %s の関連付けを解除します。" + +#: nova/api/ec2/cloud.py:730 +msgid "Going to start terminating instances" +msgstr "インスタンス終了処理を開始します。" + +#: nova/api/ec2/cloud.py:738 +#, python-format +msgid "Reboot instance %r" +msgstr "Reboot instance: インスタンス %r を再起動します。" + +#: nova/api/ec2/cloud.py:775 +#, python-format +msgid "De-registering image %s" +msgstr "De-registering image: イメージ %s を登録解除します。" + +#: nova/api/ec2/cloud.py:783 +#, python-format +msgid "Registered image %s with id %s" +msgstr "Registered image: イメージ %s をid %s で登録します。" + +#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#, python-format +msgid "attribute not supported: %s" +msgstr "アトリビュート %s はサポートされていません。" + +#: nova/api/ec2/cloud.py:794 +#, python-format +msgid "invalid id: %s" +msgstr "id %s は不正です。" + +#: nova/api/ec2/cloud.py:807 +msgid "user or group not specified" +msgstr "ユーザまたはグループが指定されていません。" + +#: nova/api/ec2/cloud.py:809 +msgid "only group \"all\" is supported" +msgstr "グループ \"all\" のみサポートされています。" + +#: nova/api/ec2/cloud.py:811 +msgid "operation_type must be add or remove" +msgstr "operation_type は add または remove の何れかである必要があります。" + +#: nova/api/ec2/cloud.py:812 +#, python-format +msgid "Updating image %s publicity" +msgstr "イメージ %s の公開設定を更新します。" + +#: nova/api/ec2/metadatarequesthandler.py:75 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "ip %s に対するメタデータの取得に失敗しました。" + +#: nova/api/openstack/__init__.py:70 +#, python-format +msgid "Caught error: %s" +msgstr "エラー %s をキャッチしました。" + +#: nova/api/openstack/__init__.py:86 +msgid "Including admin operations in API." +msgstr "管理用オペレーション(admin operation)をAPIに登録します。" + +#: nova/api/openstack/servers.py:184 +#, python-format +msgid "Compute.api::lock %s" +msgstr "例外: Compute.api::lock %s" + +#: nova/api/openstack/servers.py:199 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "例外: Compute.api::unlock %s" + +#: nova/api/openstack/servers.py:213 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "例外: Compute.api::get_lock %s" + +#: nova/api/openstack/servers.py:224 +#, python-format +msgid "Compute.api::pause %s" +msgstr "例外: Compute.api::pause %s" + +#: nova/api/openstack/servers.py:235 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "例外: Compute.api::unpause %s" + +#: nova/api/openstack/servers.py:246 +#, python-format +msgid "compute.api::suspend %s" +msgstr "例外: compute.api::suspend %s" + +#: nova/api/openstack/servers.py:257 +#, python-format +msgid "compute.api::resume %s" +msgstr "例外: compute.api::resume %s" + +#: nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" +msgstr "ユーザー %s は既に存在しています。" + +#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "マネージャ %s が存在しないためプロジェクトを作成できません。" + +#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#, python-format +msgid "Project can't be created because project %s already exists" +msgstr "プロジェクト %s が既に存在するためプロジェクトを作成できません。" + +#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#, python-format +msgid "Project can't be modified because manager %s doesn't exist" +msgstr "マネージャ %s が存在しないためプロジェクトを更新できません。" + +#: nova/auth/dbdriver.py:245 +#, python-format +msgid "User \"%s\" not found" +msgstr "ユーザ \"%s\" が見つかりません。" + +#: nova/auth/dbdriver.py:248 +#, python-format +msgid "Project \"%s\" not found" +msgstr "プロジェクト \"%s\" が見つかりません。" + +#: nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "シングルトンをインスタンス化しようとしました。" + +#: nova/auth/ldapdriver.py:181 +#, python-format +msgid "LDAP object for %s doesn't exist" +msgstr "LDAPオブジェクト %s が存在しません。" + +#: nova/auth/ldapdriver.py:218 +#, python-format +msgid "Project can't be created because user %s doesn't exist" +msgstr "ユーザ %s が存在しないためプロジェクトを作成できません。" + +#: nova/auth/ldapdriver.py:478 +#, python-format +msgid "User %s is already a member of the group %s" +msgstr "ユーザ %s は既にグループ %s のメンバーです。" + +#: nova/auth/ldapdriver.py:507 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "グループの最後のメンバーを削除しようとしました。代わりにグループ %s を削除してください。" + +#: nova/auth/ldapdriver.py:528 +#, python-format +msgid "Group at dn %s doesn't exist" +msgstr "dnが %s のグループは存在しません。" + +#: nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" +msgstr "ユーザ %r を検索します。" + +#: nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "Failed authorization: アクセスキー %s の認証に失敗しました。" + +#: nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" +msgstr "アクセスキー %s に対するユーザが見つかりませんでした。" + +#: nova/auth/manager.py:270 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "ユーザ名 (%s) をプロジェクト名として使用します。" + +#: nova/auth/manager.py:275 +#, python-format +msgid "failed authorization: no project named %s (user=%s)" +msgstr "Failed authorization: 認証に失敗しました。プロジェクト名 %s (ユーザ = %s) は存在しません。" + +#: nova/auth/manager.py:277 +#, python-format +msgid "No project called %s could be found" +msgstr "プロジェクト %s は見つかりませんでした。" + +#: nova/auth/manager.py:281 +#, python-format +msgid "Failed authorization: user %s not admin and not member of project %s" +msgstr "" +"Failed authorization: 認証に失敗しました: ユーザ %s は管理者ではなくかつプロジェクト %s のメンバーではありません。" + +#: nova/auth/manager.py:283 +#, python-format +msgid "User %s is not a member of project %s" +msgstr "ユーザ %s はプロジェクト %s のメンバーではありません。" + +#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#, python-format +msgid "Invalid signature for user %s" +msgstr "Invalid signature: ユーザ %s の署名が不正です。" + +#: nova/auth/manager.py:293 nova/auth/manager.py:304 +msgid "Signature does not match" +msgstr "署名が一致しません。" + +#: nova/auth/manager.py:374 +msgid "Must specify project" +msgstr "プロジェクトを指定してください。" + +#: nova/auth/manager.py:408 +#, python-format +msgid "The %s role can not be found" +msgstr "ロール %s が見つかりません。" + +#: nova/auth/manager.py:410 +#, python-format +msgid "The %s role is global only" +msgstr "ロール %s はグローバルでのみ使用可能です。" + +#: nova/auth/manager.py:412 +#, python-format +msgid "Adding role %s to user %s in project %s" +msgstr "Adding role: ロール %s をユーザ %s (プロジェクト %s の) に追加します。" + +#: nova/auth/manager.py:438 +#, python-format +msgid "Removing role %s from user %s on project %s" +msgstr "Removing role: ロール %s をユーザ %s (プロジェクト %s の)から削除します。" + +#: nova/auth/manager.py:505 +#, python-format +msgid "Created project %s with manager %s" +msgstr "Created project: プロジェクト %s (マネージャ %s)を作成します。" + +#: nova/auth/manager.py:523 +#, python-format +msgid "modifying project %s" +msgstr "modifying project: プロジェクト %s を更新します。" + +#: nova/auth/manager.py:553 +#, python-format +msgid "Remove user %s from project %s" +msgstr "Remove user: ユーザ %s をプロジェクト %s から削除します。" + +#: nova/auth/manager.py:581 +#, python-format +msgid "Deleting project %s" +msgstr "Deleting project: プロジェクト %s を削除します。" + +#: nova/auth/manager.py:637 +#, python-format +msgid "Created user %s (admin: %r)" +msgstr "Created user: ユーザ %s (admin: %r) を作成しました。" + +#: nova/auth/manager.py:645 +#, python-format +msgid "Deleting user %s" +msgstr "Deleting user: ユーザ %s を削除します。" + +#: nova/auth/manager.py:655 +#, python-format +msgid "Access Key change for user %s" +msgstr "Access Key change: ユーザ %s のアクセスキーを更新します。" + +#: nova/auth/manager.py:657 +#, python-format +msgid "Secret Key change for user %s" +msgstr "Secret Key change: ユーザ %s のシークレットキーを更新します。" + +#: nova/auth/manager.py:659 +#, python-format +msgid "Admin status set to %r for user %s" +msgstr "Admin status set: 管理者ステータス %r をユーザ %s に設定します。" + +#: nova/auth/manager.py:708 +#, python-format +msgid "No vpn data for project %s" +msgstr "プロジェクト %s に関するvpnデータがありません。" + +#: nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "cloudpipeインスタンス起動時に実行するスクリプトのテンプレート" + +#: nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "openvpnの設定に入れるネットワークの値" + +#: nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "openvpnの設定に入れるネットマスクの値" + +#: nova/cloudpipe/pipelib.py:97 +#, python-format +msgid "Launching VPN for %s" +msgstr "%s 用のVPNを起動します。" + +#: nova/compute/api.py:67 +#, python-format +msgid "Instance %d was not found in get_network_topic" +msgstr "get_network_topicにおいてインスタンス %d が見つかりませんでした。" + +#: nova/compute/api.py:73 +#, python-format +msgid "Instance %d has no host" +msgstr "インスタンス %d にホストが登録されていません。" + +#: nova/compute/api.py:92 +#, python-format +msgid "Quota exceeeded for %s, tried to run %s instances" +msgstr "%s のクオータ上限を超えました。%s インスタンスを実行しようとしました。" + +#: nova/compute/api.py:94 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." +msgstr "インスタンスのクオータを超えました。このタイプにおいてはあと %s インスタンスしか実行できません。" + +#: nova/compute/api.py:109 +msgid "Creating a raw instance" +msgstr "raw instanceを生成します。" + +#: nova/compute/api.py:156 +#, python-format +msgid "Going to run %s instances..." +msgstr "%s 個のインスタンスの起動を始めます…" + +#: nova/compute/api.py:180 +#, python-format +msgid "Casting to scheduler for %s/%s's instance %s" +msgstr "スケジューラに対して %s/%s のインスタンス %s を送信します。" + +#: nova/compute/api.py:279 +#, python-format +msgid "Going to try and terminate %s" +msgstr "%s を終了します。" + +#: nova/compute/api.py:283 +#, python-format +msgid "Instance %d was not found during terminate" +msgstr "インスタンス %d が終了処理において見つかりませんでした。" + +#: nova/compute/api.py:288 +#, python-format +msgid "Instance %d is already being terminated" +msgstr "インスタンス %d は既に終了済みです。" + +#: nova/compute/api.py:450 +#, python-format +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "デバイスの指定 %s が不正です: デバイス指定の例: /dev/vdb" + +#: nova/compute/api.py:465 +msgid "Volume isn't attached to anything!" +msgstr "ボリュームはどこにもアタッチされていません。" + +#: nova/compute/disk.py:71 +#, python-format +msgid "Input partition size not evenly divisible by sector size: %d / %d" +msgstr "インプットパーティションサイズがセクターサイズで割り切れません。 %d / %d" + +#: nova/compute/disk.py:75 +#, python-format +msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +msgstr "ローカルストレージのバイト数がセクターサイズで割り切れません: %d / %d" + +#: nova/compute/disk.py:128 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "イメージをループバック %s にアタッチできません。" + +#: nova/compute/disk.py:136 +#, python-format +msgid "Failed to load partition: %s" +msgstr "パーティション %s のロードに失敗しました。" + +#: nova/compute/disk.py:158 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "ファイルシステム %s のマウントに失敗しました。" + +#: nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "%s は未知のインスタンスタイプです。" + +#: nova/compute/manager.py:69 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "check_instance_lock: decorating: |%s|" + +#: nova/compute/manager.py:71 +#, python-format +msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +msgstr "check_instance_lock: arguments: |%s| |%s| |%s|" + +#: nova/compute/manager.py:75 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "check_instance_lock: locked: |%s|" + +#: nova/compute/manager.py:77 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "check_instance_lock: admin: |%s|" + +#: nova/compute/manager.py:82 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "check_instance_lock: executing: |%s|" + +#: nova/compute/manager.py:86 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "check_instance_lock: not executing |%s|" + +#: nova/compute/manager.py:157 +msgid "Instance has already been created" +msgstr "インスタンスは既に生成されています。" + +#: nova/compute/manager.py:158 +#, python-format +msgid "instance %s: starting..." +msgstr "インスタンス %s を開始します。" + +#: nova/compute/manager.py:197 +#, python-format +msgid "instance %s: Failed to spawn" +msgstr "インスタンス %s の起動に失敗しました。" + +#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 +#, python-format +msgid "Terminating instance %s" +msgstr "Terminating instance: インスタンス %s を終了します。" + +#: nova/compute/manager.py:217 +#, python-format +msgid "Disassociating address %s" +msgstr "アドレス %s の関連付けを解除(disassociate)しています。" + +#: nova/compute/manager.py:230 +#, python-format +msgid "Deallocating address %s" +msgstr "アドレス %s の割当を解除(deallocate)します。" + +#: nova/compute/manager.py:243 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "既に消去済みのインスタンス%sを消去しようとしました。" + +#: nova/compute/manager.py:257 +#, python-format +msgid "Rebooting instance %s" +msgstr "Rebooting instance: インスタンス %s を再起動します。" + +#: nova/compute/manager.py:260 +#, python-format +msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +msgstr "実行していないインスタンスの再起動を試みます。%s (状態: %s 期待する状態: %s)" + +#: nova/compute/manager.py:286 +#, python-format +msgid "instance %s: snapshotting" +msgstr "snapshotting: インスタンス %s のスナップショットを取得します。" + +#: nova/compute/manager.py:289 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgstr "実行していないインスタンスのスナップショット取得を試みます。%s (状態: %s 期待する状態: %s)" + +#: nova/compute/manager.py:301 +#, python-format +msgid "instance %s: rescuing" +msgstr "Rescuing: インスタンス %s をレスキューします。" + +#: nova/compute/manager.py:316 +#, python-format +msgid "instance %s: unrescuing" +msgstr "Unrescuing: インスタンス %s をアンレスキューします。" + +#: nova/compute/manager.py:335 +#, python-format +msgid "instance %s: pausing" +msgstr "pausing: インスタンス %s を一時停止します。" + +#: nova/compute/manager.py:352 +#, python-format +msgid "instance %s: unpausing" +msgstr "unpausing: インスタンス %s の一時停止を解除します。" + +#: nova/compute/manager.py:369 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "retrieving diagnostics: インスタンス %s の診断情報を取得します。" + +#: nova/compute/manager.py:382 +#, python-format +msgid "instance %s: suspending" +msgstr "suspending: インスタンス %s をサスペンドします。" + +#: nova/compute/manager.py:401 +#, python-format +msgid "instance %s: resuming" +msgstr "resuming: インスタンス %s をレジュームします。" + +#: nova/compute/manager.py:420 +#, python-format +msgid "instance %s: locking" +msgstr "locking: インスタンス %s をロックします。" + +#: nova/compute/manager.py:432 +#, python-format +msgid "instance %s: unlocking" +msgstr "unlocking: インスタンス %s のロックを解除します。" + +#: nova/compute/manager.py:442 +#, python-format +msgid "instance %s: getting locked state" +msgstr "getting locked state: インスタンス %s のロックを取得しました。" + +#: nova/compute/manager.py:462 +#, python-format +msgid "instance %s: attaching volume %s to %s" +msgstr "attaching volume: インスタンス %s についてボリューム %s を %s にアタッチします。" + +#: nova/compute/manager.py:478 +#, python-format +msgid "instance %s: attach failed %s, removing" +msgstr "インスタンス %s: %sのアタッチに失敗しました。リムーブします。" + +#: nova/compute/manager.py:493 +#, python-format +msgid "Detach volume %s from mountpoint %s on instance %s" +msgstr "Detach volume: ボリューム %s をマウントポイント %s (インスタンス%s)からデタッチします。" + +#: nova/compute/manager.py:497 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "ボリュームを未知のインスタンス %s からデタッチします。" + +#: nova/compute/monitor.py:259 +#, python-format +msgid "updating %s..." +msgstr "%s の情報の更新…" + +#: nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "更新の最中に予期しないエラーが発生しました。" + +#: nova/compute/monitor.py:355 +#, python-format +msgid "Cannot get blockstats for \"%s\" on \"%s\"" +msgstr "ブロックデバイス \"%s\" の統計を \"%s\" について取得できません。" + +#: nova/compute/monitor.py:377 +#, python-format +msgid "Cannot get ifstats for \"%s\" on \"%s\"" +msgstr "インタフェース \"%s\" の統計を \"%s\" について取得できません。" + +#: nova/compute/monitor.py:412 +msgid "unexpected exception getting connection" +msgstr "接続に際し予期しないエラーが発生しました。" + +#: nova/compute/monitor.py:427 +#, python-format +msgid "Found instance: %s" +msgstr "インスタンス %s が見つかりました。" + +#: nova/db/sqlalchemy/api.py:43 +msgid "Use of empty request context is deprecated" +msgstr "Request context を空とすることは非推奨です。" + +#: nova/db/sqlalchemy/api.py:132 +#, python-format +msgid "No service for id %s" +msgstr "id %s のserviceが存在しません。" + +#: nova/db/sqlalchemy/api.py:229 +#, python-format +msgid "No service for %s, %s" +msgstr "%s, %s のserviceが存在しません。" + +#: nova/db/sqlalchemy/api.py:574 +#, python-format +msgid "No floating ip for address %s" +msgstr "アドレス %s の floating ip が存在しません。" + +#: nova/db/sqlalchemy/api.py:668 +#, python-format +msgid "No instance for id %s" +msgstr "id %s のinstanceが存在しません。" + +#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 +#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#, python-format +msgid "Instance %s not found" +msgstr "インスタンス %s が見つかりません。" + +#: nova/db/sqlalchemy/api.py:891 +#, python-format +msgid "no keypair for user %s, name %s" +msgstr "ユーザ %s, ネーム%s に該当するキーペアが存在しません。" + +#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 +#, python-format +msgid "No network for id %s" +msgstr "id %s に該当するnetwork が存在しません。" + +#: nova/db/sqlalchemy/api.py:1036 +#, python-format +msgid "No network for bridge %s" +msgstr "ブリッジ %s に該当する network が存在しません。" + +#: nova/db/sqlalchemy/api.py:1050 +#, python-format +msgid "No network for instance %s" +msgstr "instance %s に該当する network が存在しません。" + +#: nova/db/sqlalchemy/api.py:1180 +#, python-format +msgid "Token %s does not exist" +msgstr "トークン %s が存在しません。" + +#: nova/db/sqlalchemy/api.py:1205 +#, python-format +msgid "No quota for project_id %s" +msgstr "project_id %s に対するクオータが存在しません。" + +#: nova/db/sqlalchemy/api.py:1356 +#, python-format +msgid "No volume for id %s" +msgstr "id %s に該当するボリュームが存在しません。" + +#: nova/db/sqlalchemy/api.py:1401 +#, python-format +msgid "Volume %s not found" +msgstr "ボリューム %s が見つかりません。" + +#: nova/db/sqlalchemy/api.py:1413 +#, python-format +msgid "No export device found for volume %s" +msgstr "ボリューム %s に関してエクスポートされているデバイスがありません。" + +#: nova/db/sqlalchemy/api.py:1426 +#, python-format +msgid "No target id found for volume %s" +msgstr "ボリューム %s に対する target idが存在しません。" + +#: nova/db/sqlalchemy/api.py:1471 +#, python-format +msgid "No security group with id %s" +msgstr "id %s のセキュリティグループが存在しません。" + +#: nova/db/sqlalchemy/api.py:1488 +#, python-format +msgid "No security group named %s for project: %s" +msgstr "セキュリティグループ名 %s がプロジェクト %s に存在しません。" + +#: nova/db/sqlalchemy/api.py:1576 +#, python-format +msgid "No secuity group rule with id %s" +msgstr "id %s のセキュリティグループルールが存在しません。" + +#: nova/db/sqlalchemy/api.py:1650 +#, python-format +msgid "No user for id %s" +msgstr "id %s のユーザが存在しません。" + +#: nova/db/sqlalchemy/api.py:1666 +#, python-format +msgid "No user for access key %s" +msgstr "アクセスキー %s に該当するユーザが存在しません。" + +#: nova/db/sqlalchemy/api.py:1728 +#, python-format +msgid "No project with id %s" +msgstr "id %s のプロジェクトが存在しません。" + +#: nova/image/glance.py:78 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images" +msgstr "Parallax がHTTPエラー%d を /images に対するリクエストに対して返しました。" + +#: nova/image/glance.py:97 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images/detail" +msgstr "Parallax がHTTPエラー %d を /images/detail に対するリクエストに対して返しました" + +#: nova/image/s3.py:82 +#, python-format +msgid "Image %s could not be found" +msgstr "イメージ %s が見つかりませんでした。" + +#: nova/network/api.py:39 +#, python-format +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "アドレスを割り当てようとしましたが、%s のクオータを超えました。" + +#: nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "アドレスのクオータを超えました。これ以上アドレスを割り当てることはできません。" + +#: nova/network/linux_net.py:176 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "VLANインタフェース %s を開始します。" + +#: nova/network/linux_net.py:186 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "%s 用のブリッジインタフェースを開始します。" + +#: nova/network/linux_net.py:254 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "dnsmasqに対してhupを送信しましたが %s が発生しました。" + +#: nova/network/linux_net.py:256 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "Pid %d は無効です。dnsmasqを再実行します。" + +#: nova/network/linux_net.py:334 +#, python-format +msgid "Killing dnsmasq threw %s" +msgstr "dnsmasq をkillしましたが、 %s が発生しました。" + +#: nova/network/manager.py:135 +msgid "setting network host" +msgstr "ネットワークホストの設定をします。" + +#: nova/network/manager.py:190 +#, python-format +msgid "Leasing IP %s" +msgstr "IP %s をリースします。" + +#: nova/network/manager.py:194 +#, python-format +msgid "IP %s leased that isn't associated" +msgstr "IP %s がリースされましたが関連付けられていません。" + +#: nova/network/manager.py:197 +#, python-format +msgid "IP %s leased to bad mac %s vs %s" +msgstr "IP %s が期待した mac %s ではなく %s にリースされました。" + +#: nova/network/manager.py:205 +#, python-format +msgid "IP %s leased that was already deallocated" +msgstr "既に割当解除しているIP %s がリースされました。" + +#: nova/network/manager.py:214 +#, python-format +msgid "IP %s released that isn't associated" +msgstr "割り当てていないIP %s が開放されました。" + +#: nova/network/manager.py:217 +#, python-format +msgid "IP %s released from bad mac %s vs %s" +msgstr "IP %s がmac %s ではない mac %s への割当から開放されました。" + +#: nova/network/manager.py:220 +#, python-format +msgid "IP %s released that was not leased" +msgstr "リースしていないIP %s が開放されました。" + +#: nova/network/manager.py:442 +#, python-format +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "無効になった %s 個の fixed ip を割当解除しました。" + +#: nova/objectstore/handler.py:106 +#, python-format +msgid "Unknown S3 value type %r" +msgstr "未知のS3 value type %r です。" + +#: nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "認証リクエスト" + +#: nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "List of buckets が呼ばれました。" + +#: nova/objectstore/handler.py:209 +#, python-format +msgid "List keys for bucket %s" +msgstr "バケット %s のキーの一覧" + +#: nova/objectstore/handler.py:217 +#, python-format +msgid "Unauthorized attempt to access bucket %s" +msgstr "Unauthorized attempt to access bucket: バケット %s に対するアクセスは許可されていません。" + +#: nova/objectstore/handler.py:235 +#, python-format +msgid "Creating bucket %s" +msgstr "バケットを作成します。 %s" + +#: nova/objectstore/handler.py:245 +#, python-format +msgid "Deleting bucket %s" +msgstr "バケットを削除します。 %s" + +#: nova/objectstore/handler.py:249 +#, python-format +msgid "Unauthorized attempt to delete bucket %s" +msgstr "Unauthorized attempt to delete bucket: バケット %s に対する削除は許可されていません。" + +#: nova/objectstore/handler.py:271 +#, python-format +msgid "Getting object: %s / %s" +msgstr "オブジェクトの取得: %s / %s" + +#: nova/objectstore/handler.py:274 +#, python-format +msgid "Unauthorized attempt to get object %s from bucket %s" +msgstr "" +"Unauthorized attempt to get object: オブジェクト %s のバケット %s からの取得は許可されていません。" + +#: nova/objectstore/handler.py:292 +#, python-format +msgid "Putting object: %s / %s" +msgstr "オブジェクトの格納:: %s / %s" + +#: nova/objectstore/handler.py:295 +#, python-format +msgid "Unauthorized attempt to upload object %s to bucket %s" +msgstr "" +"Unauthorized attempt to upload: オブジェクト %s のバケット %s へのアップロードは許可されていません。" + +#: nova/objectstore/handler.py:314 +#, python-format +msgid "Deleting object: %s / %s" +msgstr "オブジェクトを削除しています。: %s / %s" + +#: nova/objectstore/handler.py:393 +#, python-format +msgid "Not authorized to upload image: invalid directory %s" +msgstr "" +"Not authorized to upload image: イメージの格納は許可されていません。ディレクトリ %s は正しくありません。" + +#: nova/objectstore/handler.py:401 +#, python-format +msgid "Not authorized to upload image: unauthorized bucket %s" +msgstr "" +"Not authorized to upload image: イメージの格納は許可されていません。バケット %s への格納は許可されていません。" + +#: nova/objectstore/handler.py:406 +#, python-format +msgid "Starting image upload: %s" +msgstr "イメージのアップロードを開始しました。 %s" + +#: nova/objectstore/handler.py:420 +#, python-format +msgid "Not authorized to update attributes of image %s" +msgstr "Not authorized to update attributes: イメージ %s のアトリビュートの更新は許可されていません。" + +#: nova/objectstore/handler.py:428 +#, python-format +msgid "Toggling publicity flag of image %s %r" +msgstr "Toggling publicity flag: イメージ %s の公開フラグを %r に更新します。" + +#: nova/objectstore/handler.py:433 +#, python-format +msgid "Updating user fields on image %s" +msgstr "Updating user fields: イメージ %s のユーザフィールドを更新します。" + +#: nova/objectstore/handler.py:447 +#, python-format +msgid "Unauthorized attempt to delete image %s" +msgstr "Unauthorized attempt to delete image: イメージ %s の削除は許可されていません。" + +#: nova/objectstore/handler.py:452 +#, python-format +msgid "Deleted image: %s" +msgstr "イメージ %s を削除しました。" + +#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 +#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 +msgid "No hosts found" +msgstr "適切なホストが見つかりません。" + +#: nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "予備の(fallback)スケジューラを実装する必要があります。" + +#: nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %s %s for %s" +msgstr "メッセージのcast: %s %s for %s" + +#: nova/scheduler/simple.py:63 +msgid "All hosts have too many cores" +msgstr "全てのホストにコア数の空きがありません。" + +#: nova/scheduler/simple.py:95 +msgid "All hosts have too many gigabytes" +msgstr "全てのホストが利用可能な容量(gigabytes)に達しています。" + +#: nova/scheduler/simple.py:115 +msgid "All hosts have too many networks" +msgstr "全てのホストがネットワークの最大数に達しています。" + +#: nova/tests/test_cloud.py:198 +msgid "Can't test instances without a real virtual env." +msgstr "インスタンスのテストには実際の仮想環境が必要です。(fakeでは実行できません。)" + +#: nova/tests/test_cloud.py:210 +#, python-format +msgid "Need to watch instance %s until it's running..." +msgstr "インスタンス %s が実行するまで監視します…" + +#: nova/tests/test_compute.py:104 +#, python-format +msgid "Running instances: %s" +msgstr "インスタンス %s は実行中です。" + +#: nova/tests/test_compute.py:110 +#, python-format +msgid "After terminating instances: %s" +msgstr "インスタンス %s を終了した後です。" + +#: nova/tests/test_rpc.py:89 +#, python-format +msgid "Nested received %s, %s" +msgstr "ネスとした受信: %s, %s" + +#: nova/tests/test_rpc.py:94 +#, python-format +msgid "Nested return %s" +msgstr "ネストした戻り値: %s" + +#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#, python-format +msgid "Received %s" +msgstr "%s を受信。" + +#: nova/tests/test_volume.py:162 +#, python-format +msgid "Target %s allocated" +msgstr "ターゲット %s をアロケートしました。" + +#: nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "ハイパーバイザへの接続に失敗しました。" + +#: nova/virt/fake.py:210 +#, python-format +msgid "Instance %s Not Found" +msgstr "インスタンス %s が見つかりません。" + +#: nova/virt/hyperv.py:118 +msgid "In init host" +msgstr "In init host" + +#: nova/virt/hyperv.py:131 +#, python-format +msgid "Attempt to create duplicate vm %s" +msgstr "VM %s を二重に作成しようとしました。" + +#: nova/virt/hyperv.py:148 +#, python-format +msgid "Starting VM %s " +msgstr "VM %s を開始します。 " + +#: nova/virt/hyperv.py:150 +#, python-format +msgid "Started VM %s " +msgstr "VM %s を開始しました。 " + +#: nova/virt/hyperv.py:152 +#, python-format +msgid "spawn vm failed: %s" +msgstr "vmの生成(spawn)に失敗しました: %s" + +#: nova/virt/hyperv.py:169 +#, python-format +msgid "Failed to create VM %s" +msgstr "VM %s の作成に失敗しました。" + +#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 +#, python-format +msgid "Created VM %s..." +msgstr "VM %s を作成します。" + +#: nova/virt/hyperv.py:188 +#, python-format +msgid "Set memory for vm %s..." +msgstr "vm %s のメモリを設定します。" + +#: nova/virt/hyperv.py:198 +#, python-format +msgid "Set vcpus for vm %s..." +msgstr "vm %s のvcpus を設定します。" + +#: nova/virt/hyperv.py:202 +#, python-format +msgid "Creating disk for %s by attaching disk file %s" +msgstr "%s のディスクをディスクファイル %s をアタッチして作成します。" + +#: nova/virt/hyperv.py:227 +#, python-format +msgid "Failed to add diskdrive to VM %s" +msgstr "VM %s へのディスクドライブの追加に失敗しました。" + +#: nova/virt/hyperv.py:230 +#, python-format +msgid "New disk drive path is %s" +msgstr "新しいドライブパスは %s です。" + +#: nova/virt/hyperv.py:247 +#, python-format +msgid "Failed to add vhd file to VM %s" +msgstr "vhdファイルの VM %s への追加に失敗しました。" + +#: nova/virt/hyperv.py:249 +#, python-format +msgid "Created disk for %s" +msgstr "%s に diskを作成します。" + +#: nova/virt/hyperv.py:253 +#, python-format +msgid "Creating nic for %s " +msgstr "%s にNICを作成します。 " + +#: nova/virt/hyperv.py:272 +msgid "Failed creating a port on the external vswitch" +msgstr "外部vswitchへのポート作成に失敗しました。" + +#: nova/virt/hyperv.py:273 +#, python-format +msgid "Failed creating port for %s" +msgstr "ポート %s の作成に失敗しました。" + +#: nova/virt/hyperv.py:275 +#, python-format +msgid "Created switch port %s on switch %s" +msgstr "スイッチポート %s をスイッチ %s に作成しました。" + +#: nova/virt/hyperv.py:285 +#, python-format +msgid "Failed to add nic to VM %s" +msgstr "VM %s に対してNICの追加に失敗しました。" + +#: nova/virt/hyperv.py:287 +#, python-format +msgid "Created nic for %s " +msgstr "%s のNICを作成しました。 " + +#: nova/virt/hyperv.py:320 +#, python-format +msgid "WMI job failed: %s" +msgstr "WMIジョブに失敗しました: %s" + +#: nova/virt/hyperv.py:322 +#, python-format +msgid "WMI job succeeded: %s, Elapsed=%s " +msgstr "WMIジョブが成功しました: %s, 経過時間=%s " + +#: nova/virt/hyperv.py:358 +#, python-format +msgid "Got request to destroy vm %s" +msgstr "destroy vm %s リクエストを受信しました。" + +#: nova/virt/hyperv.py:383 +#, python-format +msgid "Failed to destroy vm %s" +msgstr "vm %s の削除に失敗しました。" + +#: nova/virt/hyperv.py:389 +#, python-format +msgid "Del: disk %s vm %s" +msgstr "Del: 削除: disk %s vm %s" + +#: nova/virt/hyperv.py:405 +#, python-format +msgid "" +"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " +"cpu_time=%s" +msgstr "" +"vm %s の情報の取得: state=%s, mem=%s, num_cpu=%s, cpu_time=%s" + +#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#, python-format +msgid "duplicate name found: %s" +msgstr "%s は重複しています。" + +#: nova/virt/hyperv.py:444 +#, python-format +msgid "Successfully changed vm state of %s to %s" +msgstr "vmの状態の %s から %s への変更に成功しました。" + +#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#, python-format +msgid "Failed to change vm state of %s to %s" +msgstr "VMの状態の %s から %s への変更に失敗しました。" + +#: nova/virt/images.py:70 +#, python-format +msgid "Finished retreving %s -- placed in %s" +msgstr "%s を取得しました。格納先: %s" + +#: nova/virt/libvirt_conn.py:144 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "libvirt %s へ接続します。" + +#: nova/virt/libvirt_conn.py:157 +msgid "Connection to libvirt broke" +msgstr "libvirtへの接続が切れています。" + +#: nova/virt/libvirt_conn.py:229 +#, python-format +msgid "instance %s: deleting instance files %s" +msgstr "インスタンス %s: インスタンスファイル %s を削除しています。" + +#: nova/virt/libvirt_conn.py:271 +#, python-format +msgid "No disk at %s" +msgstr "%s にディスクが存在しません。" + +#: nova/virt/libvirt_conn.py:278 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "インスタンスのスナップショットは現在libvirtに対してはサポートされていません。" + +#: nova/virt/libvirt_conn.py:294 +#, python-format +msgid "instance %s: rebooted" +msgstr "インスタンス%s: 再起動しました。" + +#: nova/virt/libvirt_conn.py:297 +#, python-format +msgid "_wait_for_reboot failed: %s" +msgstr "_wait_for_reboot 失敗: %s" + +#: nova/virt/libvirt_conn.py:340 +#, python-format +msgid "instance %s: rescued" +msgstr "インスタンス %s: rescued" + +#: nova/virt/libvirt_conn.py:343 +#, python-format +msgid "_wait_for_rescue failed: %s" +msgstr "_wait_for_rescue 失敗: %s" + +#: nova/virt/libvirt_conn.py:370 +#, python-format +msgid "instance %s: is running" +msgstr "インスタンス %s を起動中です。" + +#: nova/virt/libvirt_conn.py:381 +#, python-format +msgid "instance %s: booted" +msgstr "インスタンス %s: 起動しました。" + +#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 +#, python-format +msgid "instance %s: failed to boot" +msgstr "インスタンス %s の起動に失敗しました。" + +#: nova/virt/libvirt_conn.py:395 +#, python-format +msgid "virsh said: %r" +msgstr "virsh の出力: %r" + +#: nova/virt/libvirt_conn.py:399 +msgid "cool, it's a device" +msgstr "デバイスです。" + +#: nova/virt/libvirt_conn.py:407 +#, python-format +msgid "data: %r, fpath: %r" +msgstr "データ:%r ファイルパス: %r" + +#: nova/virt/libvirt_conn.py:415 +#, python-format +msgid "Contents of file %s: %r" +msgstr "ファイル %s の中身: %r" + +#: nova/virt/libvirt_conn.py:449 +#, python-format +msgid "instance %s: Creating image" +msgstr "インスタンス %s のイメージを生成します。" + +#: nova/virt/libvirt_conn.py:505 +#, python-format +msgid "instance %s: injecting key into image %s" +msgstr "インスタンス %s にキー %s をインジェクトします。" + +#: nova/virt/libvirt_conn.py:508 +#, python-format +msgid "instance %s: injecting net into image %s" +msgstr "インスタンス %s のネットワーク設定をイメージ %s にインジェクトします。" + +#: nova/virt/libvirt_conn.py:516 +#, python-format +msgid "instance %s: ignoring error injecting data into image %s (%s)" +msgstr "インスタンス %s: データをイメージ %s にインジェクトする際にエラーが発生しました。(%s)" + +#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "インスタンス %s: toXML メソッドを開始。" + +#: nova/virt/libvirt_conn.py:589 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "インスタンス %s: toXML メソッドを完了。" + +#: nova/virt/xenapi_conn.py:113 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" +msgstr "" +"connection_type=xenapi を使用するには、以下の指定が必要です: xenapi_connection_url, " +"xenapi_connection_username (オプション), xenapi_connection_password" + +#: nova/virt/xenapi_conn.py:263 +#, python-format +msgid "Task [%s] %s status: success %s" +msgstr "タスク [%s] %s ステータス: success %s" + +#: nova/virt/xenapi_conn.py:271 +#, python-format +msgid "Task [%s] %s status: %s %s" +msgstr "タスク [%s] %s ステータス: %s %s" + +#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 +#, python-format +msgid "Got exception: %s" +msgstr "例外 %s が発生しました。" + +#: nova/virt/xenapi/fake.py:72 +#, python-format +msgid "%s: _db_content => %s" +msgstr "%s: _db_content => %s" + +#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 +#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 +msgid "Raising NotImplemented" +msgstr "NotImplemented 例外を発生させます。" + +#: nova/virt/xenapi/fake.py:249 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "xenapi.fake には %s が実装されていません。" + +#: nova/virt/xenapi/fake.py:283 +#, python-format +msgid "Calling %s %s" +msgstr "呼び出し: %s %s" + +#: nova/virt/xenapi/fake.py:288 +#, python-format +msgid "Calling getter %s" +msgstr "getter %s をコールします。" + +#: nova/virt/xenapi/fake.py:340 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "xenapi.fake に %s に関する実装がないか、引数の数が誤っています。" + +#: nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "ブリッジ %s に対してブリッジが複数存在します。" + +#: nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "ブリッジ %s に対するネットワークが存在しません。" + +#: nova/virt/xenapi/vm_utils.py:127 +#, python-format +msgid "Created VM %s as %s." +msgstr "VM %s を %s として作成しました。" + +#: nova/virt/xenapi/vm_utils.py:147 +#, python-format +msgid "Creating VBD for VM %s, VDI %s ... " +msgstr "VM %s, VDI %s のVBDを作成します… " + +#: nova/virt/xenapi/vm_utils.py:149 +#, python-format +msgid "Created VBD %s for VM %s, VDI %s." +msgstr "VBD %s を VM %s, VDI %s に対して作成しました。" + +#: nova/virt/xenapi/vm_utils.py:165 +#, python-format +msgid "VBD not found in instance %s" +msgstr "インスタンス %s のVBDが見つかりません。" + +#: nova/virt/xenapi/vm_utils.py:175 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "VBD %s の unplug に失敗しました。" + +#: nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "VBD %s の削除に失敗しました。" + +#: nova/virt/xenapi/vm_utils.py:202 +#, python-format +msgid "Creating VIF for VM %s, network %s." +msgstr "VM %s, ネットワーク %s を作成します。" + +#: nova/virt/xenapi/vm_utils.py:205 +#, python-format +msgid "Created VIF %s for VM %s, network %s." +msgstr "VIF %s を VM %s, ネットワーク %s に作成しました。" + +#: nova/virt/xenapi/vm_utils.py:216 +#, python-format +msgid "Snapshotting VM %s with label '%s'..." +msgstr "VM %s のスナップショットをラベル '%s' で作成します。" + +#: nova/virt/xenapi/vm_utils.py:229 +#, python-format +msgid "Created snapshot %s from VM %s." +msgstr "スナップショット %s を VM %s について作成しました。" + +#: nova/virt/xenapi/vm_utils.py:243 +#, python-format +msgid "Asking xapi to upload %s as '%s'" +msgstr "xapiに対して %s を '%s' としてアップロードするように指示します。" + +#: nova/virt/xenapi/vm_utils.py:261 +#, python-format +msgid "Asking xapi to fetch %s as %s" +msgstr "xapi に対して %s を %s として取得するように指示します。" + +#: nova/virt/xenapi/vm_utils.py:279 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "PV kernelのvdi %s を取得します。" + +#: nova/virt/xenapi/vm_utils.py:290 +#, python-format +msgid "PV Kernel in VDI:%d" +msgstr "VDIのPV Kernel: %d" + +#: nova/virt/xenapi/vm_utils.py:318 +#, python-format +msgid "VDI %s is still available" +msgstr "VDI %s は依然として存在しています。" + +#: nova/virt/xenapi/vm_utils.py:331 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "(VM_UTILS) xenserver の vm state -> |%s|" + +#: nova/virt/xenapi/vm_utils.py:333 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "(VM_UTILS) xenapi の power_state -> |%s|" + +#: nova/virt/xenapi/vm_utils.py:390 +#, python-format +msgid "VHD %s has parent %s" +msgstr "VHD %s のペアレントは %s です。" + +#: nova/virt/xenapi/vm_utils.py:407 +#, python-format +msgid "Re-scanning SR %s" +msgstr "SR %s を再スキャンします。" + +#: nova/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +msgstr "ペアレント %s がオリジナルのペアレント %s と一致しません。合致するのを待ちます…" + +#: nova/virt/xenapi/vm_utils.py:448 +#, python-format +msgid "No VDIs found for VM %s" +msgstr "VM %s にVDIが存在しません。" + +#: nova/virt/xenapi/vm_utils.py:452 +#, python-format +msgid "Unexpected number of VDIs (%s) found for VM %s" +msgstr "予期しない数 (%s) のVDIがVM %s に存在します。" + +#: nova/virt/xenapi/vmops.py:62 +#, python-format +msgid "Attempted to create non-unique name %s" +msgstr "ユニークではないname %s を作成しようとしました。" + +#: nova/virt/xenapi/vmops.py:99 +#, python-format +msgid "Starting VM %s..." +msgstr "VM %s を開始します…" + +#: nova/virt/xenapi/vmops.py:101 +#, python-format +msgid "Spawning VM %s created %s." +msgstr "VM %s の生成(spawning) により %s を作成しました。" + +#: nova/virt/xenapi/vmops.py:112 +#, python-format +msgid "Instance %s: booted" +msgstr "インスタンス%s: ブートしました。" + +#: nova/virt/xenapi/vmops.py:137 +#, python-format +msgid "Instance not present %s" +msgstr "インスタンス%s が存在しません。" + +#: nova/virt/xenapi/vmops.py:166 +#, python-format +msgid "Starting snapshot for VM %s" +msgstr "VM %s に対するスナップショットを開始します。" + +#: nova/virt/xenapi/vmops.py:174 +#, python-format +msgid "Unable to Snapshot %s: %s" +msgstr "%s のスナップショットに失敗しました: %s" + +#: nova/virt/xenapi/vmops.py:184 +#, python-format +msgid "Finished snapshot and upload for VM %s" +msgstr "VM %s のスナップショットとアップロードが完了しました。" + +#: nova/virt/xenapi/vmops.py:252 +#, python-format +msgid "suspend: instance not present %s" +msgstr "suspend: インスタンス %s は存在しません。" + +#: nova/virt/xenapi/vmops.py:262 +#, python-format +msgid "resume: instance not present %s" +msgstr "resume: インスタンス %s は存在しません。" + +#: nova/virt/xenapi/vmops.py:271 +#, python-format +msgid "Instance not found %s" +msgstr "インスタンス %s が見つかりません。" + +#: nova/virt/xenapi/volume_utils.py:57 +#, python-format +msgid "Introducing %s..." +msgstr "%s を introduce します…" + +#: nova/virt/xenapi/volume_utils.py:74 +#, python-format +msgid "Introduced %s as %s." +msgstr "%s を %s として introduce しました。" + +#: nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "Storage Repository を作成できません。" + +#: nova/virt/xenapi/volume_utils.py:90 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "VBD %s から SRを取得できません。" + +#: nova/virt/xenapi/volume_utils.py:96 +#, python-format +msgid "Forgetting SR %s ... " +msgstr "SR %s をforgetします。 " + +#: nova/virt/xenapi/volume_utils.py:101 +#, python-format +msgid "Ignoring exception %s when getting PBDs for %s" +msgstr "例外 %s が %s のPBDを取得する際に発生しましたが無視します。" + +#: nova/virt/xenapi/volume_utils.py:107 +#, python-format +msgid "Ignoring exception %s when unplugging PBD %s" +msgstr "例外 %s が %s のPBDをunplugする際に発生しましたが無視します。" + +#: nova/virt/xenapi/volume_utils.py:111 +#, python-format +msgid "Forgetting SR %s done." +msgstr "SR %s のforgetが完了。" + +#: nova/virt/xenapi/volume_utils.py:113 +#, python-format +msgid "Ignoring exception %s when forgetting SR %s" +msgstr "例外 %s がSR %s をforgetする際に発生しましたが無視します。" + +#: nova/virt/xenapi/volume_utils.py:123 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "SR %s のVDIのintroduceができません。" + +#: nova/virt/xenapi/volume_utils.py:128 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "VDI %s のレコードを取得できません。" + +#: nova/virt/xenapi/volume_utils.py:146 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "SR %s のVDIをintroduceできません。" + +#: nova/virt/xenapi/volume_utils.py:175 +#, python-format +msgid "Unable to obtain target information %s, %s" +msgstr "ターゲットの情報を取得できません。 %s, %s" + +#: nova/virt/xenapi/volume_utils.py:197 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "マウントポイントを変換できません。 %s" + +#: nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %s, %s, %s" +msgstr "Attach_volume: ボリュームのアタッチ: %s, %s, %s" + +#: nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %s for instance %s" +msgstr "SR %s にインスタンス %s のVDIを作成できません。" + +#: nova/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Unable to use SR %s for instance %s" +msgstr "SR %s をインスタンス %s に対して利用できません。" + +#: nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "インスタンス %s にボリュームをアタッチできません。" + +#: nova/virt/xenapi/volumeops.py:95 +#, python-format +msgid "Mountpoint %s attached to instance %s" +msgstr "マウントポイント %s をインスタンス %s にアタッチしました。" + +#: nova/virt/xenapi/volumeops.py:106 +#, python-format +msgid "Detach_volume: %s, %s" +msgstr "Detach_volume: ボリュームのデタッチ: %s, %s" + +#: nova/virt/xenapi/volumeops.py:113 +#, python-format +msgid "Unable to locate volume %s" +msgstr "ボリューム %s の存在が確認できません。" + +#: nova/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Unable to detach volume %s" +msgstr "ボリューム %s のデタッチができません。" + +#: nova/virt/xenapi/volumeops.py:128 +#, python-format +msgid "Mountpoint %s detached from instance %s" +msgstr "マウントポイント %s をインスタンス %s からデタッチしました。" + +#: nova/volume/api.py:44 +#, python-format +msgid "Quota exceeeded for %s, tried to create %sG volume" +msgstr "%sのクオータを超えています。サイズ %sG のボリュームの作成を行おうとしました。" + +#: nova/volume/api.py:46 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %s" +msgstr "ボリュームのクオータを超えています。%sの大きさのボリュームは作成できません。" + +#: nova/volume/api.py:70 nova/volume/api.py:95 +msgid "Volume status must be available" +msgstr "ボリュームのステータス(status)が available でなければなりません。" + +#: nova/volume/api.py:97 +msgid "Volume is already attached" +msgstr "ボリュームは既にアタッチされています(attached)。" + +#: nova/volume/api.py:103 +msgid "Volume is already detached" +msgstr "ボリュームは既にデタッチされています(detached)。" + +#: nova/volume/driver.py:76 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "実行失敗からリカバリーします。%s 回目のトライ。" + +#: nova/volume/driver.py:85 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "ボリュームグループ%sが存在しません。" + +#: nova/volume/driver.py:210 +#, python-format +msgid "FAKE AOE: %s" +msgstr "偽のAOE: %s" + +#: nova/volume/driver.py:315 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "偽のISCSI: %s" + +#: nova/volume/manager.py:85 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "%s 個のボリュームを再エクスポートします。" + +#: nova/volume/manager.py:93 +#, python-format +msgid "volume %s: creating" +msgstr "ボリューム%sを作成します。" + +#: nova/volume/manager.py:102 +#, python-format +msgid "volume %s: creating lv of size %sG" +msgstr "ボリューム%sの%sGのlv (論理ボリューム) を作成します。" + +#: nova/volume/manager.py:106 +#, python-format +msgid "volume %s: creating export" +msgstr "ボリューム %s をエクスポートします。" + +#: nova/volume/manager.py:113 +#, python-format +msgid "volume %s: created successfully" +msgstr "ボリューム %s の作成に成功しました。" + +#: nova/volume/manager.py:121 +msgid "Volume is still attached" +msgstr "ボリュームはアタッチされたままです。" + +#: nova/volume/manager.py:123 +msgid "Volume is not local to this node" +msgstr "ボリュームはこのノードのローカルではありません。" + +#: nova/volume/manager.py:124 +#, python-format +msgid "volume %s: removing export" +msgstr "ボリューム %s のエクスポートを解除します。" + +#: nova/volume/manager.py:126 +#, python-format +msgid "volume %s: deleting" +msgstr "ボリューム %s を削除します。" + +#: nova/volume/manager.py:129 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "ボリューム %s の削除に成功しました。" diff --git a/locale/pt_BR.po b/locale/pt_BR.po new file mode 100644 index 000000000..a58ccc182 --- /dev/null +++ b/locale/pt_BR.po @@ -0,0 +1,2148 @@ +# Brazilian Portuguese translation for nova +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the nova package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: nova\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2011-01-10 11:25-0800\n" +"PO-Revision-Date: 2011-01-13 18:44+0000\n" +"Last-Translator: Gustavo Morozowski \n" +"Language-Team: Brazilian Portuguese \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Launchpad-Export-Date: 2011-01-28 05:21+0000\n" +"X-Generator: Launchpad (build 12177)\n" + +#: nova/crypto.py:46 +msgid "Filename of root CA" +msgstr "Nome do arquivo da CA raiz" + +#: nova/crypto.py:49 +msgid "Filename of private key" +msgstr "Nome do arquivo da chave privada" + +#: nova/crypto.py:51 +msgid "Filename of root Certificate Revokation List" +msgstr "Nome de arquivo da Lista de Revogação de Certificados" + +#: nova/crypto.py:53 +msgid "Where we keep our keys" +msgstr "Aonde armazenamos nossas chaves" + +#: nova/crypto.py:55 +msgid "Where we keep our root CA" +msgstr "Aonde mantemos nosso CA raiz" + +#: nova/crypto.py:57 +msgid "Should we use a CA for each project?" +msgstr "Devemos usar um CA para cada projeto?" + +#: nova/crypto.py:61 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" +"Sujeito do certificado para usuários, %s para projeto, usuário, timestamp" + +#: nova/crypto.py:66 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "Sujeito do certificado para projetos, %s para projeto, timestamp" + +#: nova/crypto.py:71 +#, python-format +msgid "Subject for certificate for vpns, %s for project, timestamp" +msgstr "Sujeito do certificado para vpns, %s para projeto, timestamp" + +#: nova/crypto.py:258 +#, python-format +msgid "Flags path: %s" +msgstr "" + +#: nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "Erro inesperado ao executar o comando." + +#: nova/exception.py:36 +#, python-format +msgid "" +"%s\n" +"Command: %s\n" +"Exit code: %s\n" +"Stdout: %r\n" +"Stderr: %r" +msgstr "" +"%s\n" +"Comando: %s\n" +"Código de retorno: %s\n" +"Stdout: %r\n" +"Stderr: %r" + +#: nova/exception.py:86 +msgid "Uncaught exception" +msgstr "Exceção não capturada" + +#: nova/fakerabbit.py:48 +#, python-format +msgid "(%s) publish (key: %s) %s" +msgstr "(%s) publicar (key: %s) %s" + +#: nova/fakerabbit.py:53 +#, python-format +msgid "Publishing to route %s" +msgstr "Publicando para rota %s" + +#: nova/fakerabbit.py:83 +#, python-format +msgid "Declaring queue %s" +msgstr "Declarando fila %s" + +#: nova/fakerabbit.py:89 +#, python-format +msgid "Declaring exchange %s" +msgstr "" + +#: nova/fakerabbit.py:95 +#, python-format +msgid "Binding %s to %s with key %s" +msgstr "Atribuindo %s para %s com chave %s" + +#: nova/fakerabbit.py:120 +#, python-format +msgid "Getting from %s: %s" +msgstr "Obtendo de %s: %s" + +#: nova/rpc.py:92 +#, python-format +msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +msgstr "" +"Servidor AMQP em %s:%d inatingível. Tentando novamente em %d segundos." + +#: nova/rpc.py:99 +#, python-format +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "" +"Não foi possível conectar ao servidor AMQP após %d tentativas. Desligando." + +#: nova/rpc.py:118 +msgid "Reconnected to queue" +msgstr "Reconectado à fila" + +#: nova/rpc.py:125 +msgid "Failed to fetch message from queue" +msgstr "Falha ao obter mensagem da fila" + +#: nova/rpc.py:155 +#, python-format +msgid "Initing the Adapter Consumer for %s" +msgstr "Iniciando o Adaptador Consumidor para %s" + +#: nova/rpc.py:170 +#, python-format +msgid "received %s" +msgstr "recebido %s" + +#: nova/rpc.py:183 +#, python-format +msgid "no method for message: %s" +msgstr "sem método para mensagem: %s" + +#: nova/rpc.py:184 +#, python-format +msgid "No method for message: %s" +msgstr "Sem método para mensagem: %s" + +#: nova/rpc.py:245 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Retornando exceção %s ao método de origem" + +#: nova/rpc.py:286 +#, python-format +msgid "unpacked context: %s" +msgstr "conteúdo descompactado: %s" + +#: nova/rpc.py:305 +msgid "Making asynchronous call..." +msgstr "Fazendo chamada assíncrona..." + +#: nova/rpc.py:308 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID é %s" + +#: nova/rpc.py:356 +#, python-format +msgid "response %s" +msgstr "resposta %s" + +#: nova/rpc.py:365 +#, python-format +msgid "topic is %s" +msgstr "topico é %s" + +#: nova/rpc.py:366 +#, python-format +msgid "message %s" +msgstr "mensagem %s" + +#: nova/service.py:157 +#, python-format +msgid "Starting %s node" +msgstr "Iniciando nó %s" + +#: nova/service.py:169 +msgid "Service killed that has no database entry" +msgstr "Encerrado serviço que não tem entrada na base de dados" + +#: nova/service.py:190 +msgid "The service database object disappeared, Recreating it." +msgstr "O objeto da base de dados do serviço desapareceu, Recriando." + +#: nova/service.py:202 +msgid "Recovered model server connection!" +msgstr "Recuperada conexão servidor de modelo." + +#: nova/service.py:208 +msgid "model server went away" +msgstr "servidor de modelo perdido" + +#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#, python-format +msgid "Data store %s is unreachable. Trying again in %d seconds." +msgstr "" +"Repositório de dados %s não pode ser atingido. Tentando novamente em %d " +"segundos." + +#: nova/service.py:232 nova/twistd.py:232 +#, python-format +msgid "Serving %s" +msgstr "Servindo %s" + +#: nova/service.py:234 nova/twistd.py:264 +msgid "Full set of FLAGS:" +msgstr "Conjunto completo de FLAGS:" + +#: nova/twistd.py:211 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "" +"Arquivo de id de processo (pidfile) %s não existe. Daemon não está " +"executando?\n" + +#: nova/twistd.py:268 +#, python-format +msgid "Starting %s" +msgstr "Iniciando %s" + +#: nova/utils.py:53 +#, python-format +msgid "Inner Exception: %s" +msgstr "Exceção interna: %s" + +#: nova/utils.py:54 +#, python-format +msgid "Class %s cannot be found" +msgstr "Classe %s não pode ser encontrada" + +#: nova/utils.py:113 +#, python-format +msgid "Fetching %s" +msgstr "Obtendo %s" + +#: nova/utils.py:125 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Executando comando (subprocesso): %s" + +#: nova/utils.py:138 +#, python-format +msgid "Result was %s" +msgstr "Resultado foi %s" + +#: nova/utils.py:171 +#, python-format +msgid "debug in callback: %s" +msgstr "debug em callback: %s" + +#: nova/utils.py:176 +#, python-format +msgid "Running %s" +msgstr "Executando %s" + +#: nova/utils.py:207 +#, python-format +msgid "Couldn't get IP, using 127.0.0.1 %s" +msgstr "Não foi possível obter IP, usando 127.0.0.1 %s" + +#: nova/utils.py:289 +#, python-format +msgid "Invalid backend: %s" +msgstr "Backend inválido: %s" + +#: nova/utils.py:300 +#, python-format +msgid "backend %s" +msgstr "backend %s" + +#: nova/api/ec2/__init__.py:133 +msgid "Too many failed authentications." +msgstr "Muitas falhas de autenticação." + +#: nova/api/ec2/__init__.py:142 +#, python-format +msgid "" +"Access key %s has had %d failed authentications and will be locked out for " +"%d minutes." +msgstr "" +"Chave de acesso %s tem %d falhas de autenticação e vai ser bloqueada por %d " +"minutos." + +#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#, python-format +msgid "Authentication Failure: %s" +msgstr "Falha de Autenticação: %s" + +#: nova/api/ec2/__init__.py:190 +#, python-format +msgid "Authenticated Request For %s:%s)" +msgstr "Pedido de Autenticação Para: %s:%s" + +#: nova/api/ec2/__init__.py:227 +#, python-format +msgid "action: %s" +msgstr "ação: %s" + +#: nova/api/ec2/__init__.py:229 +#, python-format +msgid "arg: %s\t\tval: %s" +msgstr "argumento: %s\t\tvalor: %s" + +#: nova/api/ec2/__init__.py:301 +#, python-format +msgid "Unauthorized request for controller=%s and action=%s" +msgstr "Requisição não autorizada para controlador=%s e ação=%s" + +#: nova/api/ec2/__init__.py:339 +#, python-format +msgid "NotFound raised: %s" +msgstr "NotFound lançado: %s" + +#: nova/api/ec2/__init__.py:342 +#, python-format +msgid "ApiError raised: %s" +msgstr "ApiError lançado: %s" + +#: nova/api/ec2/__init__.py:349 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "Erro inexperado lançado: %s" + +#: nova/api/ec2/__init__.py:354 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" +"Ocorreu um erro desconhecido. Por favor tente sua requisição novamente." + +#: nova/api/ec2/admin.py:84 +#, python-format +msgid "Creating new user: %s" +msgstr "Criando novo usuário: %s" + +#: nova/api/ec2/admin.py:92 +#, python-format +msgid "Deleting user: %s" +msgstr "Excluindo usuário: %s" + +#: nova/api/ec2/admin.py:114 +#, python-format +msgid "Adding role %s to user %s for project %s" +msgstr "Adicionando papel %s ao usuário %s para o projeto %s" + +#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#, python-format +msgid "Adding sitewide role %s to user %s" +msgstr "Adicionando papel em todo site %s ao usuário %s" + +#: nova/api/ec2/admin.py:122 +#, python-format +msgid "Removing role %s from user %s for project %s" +msgstr "Removendo papel %s do usuário %s para o projeto %s" + +#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#, python-format +msgid "Removing sitewide role %s from user %s" +msgstr "Removendo papel %s em todo site do usuário %s" + +#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 +msgid "operation must be add or remove" +msgstr "operações devem ser adicionar e excluir" + +#: nova/api/ec2/admin.py:142 +#, python-format +msgid "Getting x509 for user: %s on project: %s" +msgstr "Obtendo x509 para usuário: %s do projeto: %s" + +#: nova/api/ec2/admin.py:159 +#, python-format +msgid "Create project %s managed by %s" +msgstr "Criar projeto %s gerenciado por %s" + +#: nova/api/ec2/admin.py:170 +#, python-format +msgid "Delete project: %s" +msgstr "Excluir projeto: %s" + +#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#, python-format +msgid "Adding user %s to project %s" +msgstr "Adicionando usuário %s ao projeto %s" + +#: nova/api/ec2/admin.py:188 +#, python-format +msgid "Removing user %s from project %s" +msgstr "Excluindo usuário %s do projeto %s" + +#: nova/api/ec2/apirequest.py:95 +#, python-format +msgid "Unsupported API request: controller = %s,action = %s" +msgstr "Requisição de API não suportada: controlador = %s,ação = %s" + +#: nova/api/ec2/cloud.py:117 +#, python-format +msgid "Generating root CA: %s" +msgstr "Gerando CA raiz: %s" + +#: nova/api/ec2/cloud.py:277 +#, python-format +msgid "Create key pair %s" +msgstr "Criar par de chaves %s" + +#: nova/api/ec2/cloud.py:285 +#, python-format +msgid "Delete key pair %s" +msgstr "Remover par de chaves %s" + +#: nova/api/ec2/cloud.py:357 +#, python-format +msgid "%s is not a valid ipProtocol" +msgstr "%s não é um ipProtocol válido" + +#: nova/api/ec2/cloud.py:361 +msgid "Invalid port range" +msgstr "" + +#: nova/api/ec2/cloud.py:392 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "Revogado entrada do grupo de segurança %s" + +#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 +msgid "No rule for the specified parameters." +msgstr "Não existe regra para os parâmetros especificados" + +#: nova/api/ec2/cloud.py:421 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "Autorizada entrada do grupo de segurança %s" + +#: nova/api/ec2/cloud.py:432 +#, python-format +msgid "This rule already exists in group %s" +msgstr "Esta regra já existe no grupo %s" + +#: nova/api/ec2/cloud.py:460 +#, python-format +msgid "Create Security Group %s" +msgstr "Criar Grupo de Segurança %s" + +#: nova/api/ec2/cloud.py:463 +#, python-format +msgid "group %s already exists" +msgstr "group %s já existe" + +#: nova/api/ec2/cloud.py:475 +#, python-format +msgid "Delete security group %s" +msgstr "Excluir grupo de segurança %s" + +#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 +#, python-format +msgid "Get console output for instance %s" +msgstr "Obter saída do console para instância %s" + +#: nova/api/ec2/cloud.py:543 +#, python-format +msgid "Create volume of %s GB" +msgstr "Criar volume de %s GB" + +#: nova/api/ec2/cloud.py:567 +#, python-format +msgid "Attach volume %s to instacne %s at %s" +msgstr "Anexar volume %s para instância %s em %s" + +#: nova/api/ec2/cloud.py:579 +#, python-format +msgid "Detach volume %s" +msgstr "Desanexar volume %s" + +#: nova/api/ec2/cloud.py:686 +msgid "Allocate address" +msgstr "Alocar endereço" + +#: nova/api/ec2/cloud.py:691 +#, python-format +msgid "Release address %s" +msgstr "Liberar endereço %s" + +#: nova/api/ec2/cloud.py:696 +#, python-format +msgid "Associate address %s to instance %s" +msgstr "Atribuir endereço %s à instância %s" + +#: nova/api/ec2/cloud.py:703 +#, python-format +msgid "Disassociate address %s" +msgstr "Desatribuir endereço %s" + +#: nova/api/ec2/cloud.py:730 +msgid "Going to start terminating instances" +msgstr "Começando a terminar instâncias" + +#: nova/api/ec2/cloud.py:738 +#, python-format +msgid "Reboot instance %r" +msgstr "Reiniciar instância %r" + +#: nova/api/ec2/cloud.py:775 +#, python-format +msgid "De-registering image %s" +msgstr "Removendo o registro da imagem %s" + +#: nova/api/ec2/cloud.py:783 +#, python-format +msgid "Registered image %s with id %s" +msgstr "Registrada imagem %s com id %s" + +#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#, python-format +msgid "attribute not supported: %s" +msgstr "atributo não suportado: %s" + +#: nova/api/ec2/cloud.py:794 +#, python-format +msgid "invalid id: %s" +msgstr "id inválido: %s" + +#: nova/api/ec2/cloud.py:807 +msgid "user or group not specified" +msgstr "usuário ou grupo não especificado" + +#: nova/api/ec2/cloud.py:809 +msgid "only group \"all\" is supported" +msgstr "apenas o grupo \"all\" é suportado" + +#: nova/api/ec2/cloud.py:811 +msgid "operation_type must be add or remove" +msgstr "operation_type deve ser add ou remove" + +#: nova/api/ec2/cloud.py:812 +#, python-format +msgid "Updating image %s publicity" +msgstr "Atualizando publicidade da imagem %s" + +#: nova/api/ec2/metadatarequesthandler.py:75 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "Falha ao obter metadados para o ip: %s" + +#: nova/api/openstack/__init__.py:70 +#, python-format +msgid "Caught error: %s" +msgstr "Capturado o erro: %s" + +#: nova/api/openstack/__init__.py:86 +msgid "Including admin operations in API." +msgstr "Incluindo operações administrativas na API." + +#: nova/api/openstack/servers.py:184 +#, python-format +msgid "Compute.api::lock %s" +msgstr "Compute.api::lock %s" + +#: nova/api/openstack/servers.py:199 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "Compute.api::unlock %s" + +#: nova/api/openstack/servers.py:213 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "Compute.api::get_lock %s" + +#: nova/api/openstack/servers.py:224 +#, python-format +msgid "Compute.api::pause %s" +msgstr "Compute.api::pause %s" + +#: nova/api/openstack/servers.py:235 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "Compute.api::unpause %s" + +#: nova/api/openstack/servers.py:246 +#, python-format +msgid "compute.api::suspend %s" +msgstr "compute.api::suspend %s" + +#: nova/api/openstack/servers.py:257 +#, python-format +msgid "compute.api::resume %s" +msgstr "compute.api::resume %s" + +#: nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" +msgstr "Usuário %s já existe" + +#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "Projeto não pode ser criado porque o gerente %s não existe." + +#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#, python-format +msgid "Project can't be created because project %s already exists" +msgstr "Projeto não pode ser criado porque o projeto %s já existe." + +#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#, python-format +msgid "Project can't be modified because manager %s doesn't exist" +msgstr "Projeto não pode ser modificado porque o gerente %s não existe." + +#: nova/auth/dbdriver.py:245 +#, python-format +msgid "User \"%s\" not found" +msgstr "Usuário \"%s\" não encontrado" + +#: nova/auth/dbdriver.py:248 +#, python-format +msgid "Project \"%s\" not found" +msgstr "Projeto \"%s\" não encontrado" + +#: nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "Tentativa de instanciar singleton" + +#: nova/auth/ldapdriver.py:181 +#, python-format +msgid "LDAP object for %s doesn't exist" +msgstr "Objeto LDAP para %s não existe" + +#: nova/auth/ldapdriver.py:218 +#, python-format +msgid "Project can't be created because user %s doesn't exist" +msgstr "Projeto não pode ser criado porque o usuário %s não existe" + +#: nova/auth/ldapdriver.py:478 +#, python-format +msgid "User %s is already a member of the group %s" +msgstr "Usuário %s já pertence ao grupo %s" + +#: nova/auth/ldapdriver.py:507 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" +"Tentatica de remover o último membto de um grupo. Ao invés disso excluindo o " +"grupo %s." + +#: nova/auth/ldapdriver.py:528 +#, python-format +msgid "Group at dn %s doesn't exist" +msgstr "Grupo no dn %s não existe" + +#: nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" +msgstr "Procurando usuário: %r" + +#: nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "Falha de autorização para chave de acesso %s" + +#: nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" +msgstr "Nenhum usuário encontrado para chave de acesso %s" + +#: nova/auth/manager.py:270 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "Usando nome do projeto = nome do usuário (%s)" + +#: nova/auth/manager.py:275 +#, python-format +msgid "failed authorization: no project named %s (user=%s)" +msgstr "falha de autorização: nenhum projeto de nome %s (usuário=%s)" + +#: nova/auth/manager.py:277 +#, python-format +msgid "No project called %s could be found" +msgstr "Nenhum projeto chamado %s pode ser encontrado." + +#: nova/auth/manager.py:281 +#, python-format +msgid "Failed authorization: user %s not admin and not member of project %s" +msgstr "" +"Falha de autorização: usuário %s não é administrador nem membro do projeto %s" + +#: nova/auth/manager.py:283 +#, python-format +msgid "User %s is not a member of project %s" +msgstr "Usuário %s não é membro do projeto %s" + +#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#, python-format +msgid "Invalid signature for user %s" +msgstr "Assinatura inválida para usuário %s" + +#: nova/auth/manager.py:293 nova/auth/manager.py:304 +msgid "Signature does not match" +msgstr "Assinatura não confere" + +#: nova/auth/manager.py:374 +msgid "Must specify project" +msgstr "Deve especificar projeto" + +#: nova/auth/manager.py:408 +#, python-format +msgid "The %s role can not be found" +msgstr "O papel %s não foi encontrado" + +#: nova/auth/manager.py:410 +#, python-format +msgid "The %s role is global only" +msgstr "O papel %s é apenas global" + +#: nova/auth/manager.py:412 +#, python-format +msgid "Adding role %s to user %s in project %s" +msgstr "Adicionando papel %s ao usuário %s no projeto %s" + +#: nova/auth/manager.py:438 +#, python-format +msgid "Removing role %s from user %s on project %s" +msgstr "Removendo papel %s do usuário %s no projeto %s" + +#: nova/auth/manager.py:505 +#, python-format +msgid "Created project %s with manager %s" +msgstr "Criado projeto %s com gerente %s" + +#: nova/auth/manager.py:523 +#, python-format +msgid "modifying project %s" +msgstr "modificando projeto %s" + +#: nova/auth/manager.py:553 +#, python-format +msgid "Remove user %s from project %s" +msgstr "Remover usuário %s do projeto %s" + +#: nova/auth/manager.py:581 +#, python-format +msgid "Deleting project %s" +msgstr "Excluindo projeto %s" + +#: nova/auth/manager.py:637 +#, python-format +msgid "Created user %s (admin: %r)" +msgstr "Criado usuário %s (administrador: %r)" + +#: nova/auth/manager.py:645 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: nova/auth/manager.py:655 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:657 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:659 +#, python-format +msgid "Admin status set to %r for user %s" +msgstr "" + +#: nova/auth/manager.py:708 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "" + +#: nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:97 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: nova/compute/api.py:67 +#, python-format +msgid "Instance %d was not found in get_network_topic" +msgstr "" + +#: nova/compute/api.py:73 +#, python-format +msgid "Instance %d has no host" +msgstr "" + +#: nova/compute/api.py:92 +#, python-format +msgid "Quota exceeeded for %s, tried to run %s instances" +msgstr "" + +#: nova/compute/api.py:94 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." +msgstr "" + +#: nova/compute/api.py:109 +msgid "Creating a raw instance" +msgstr "" + +#: nova/compute/api.py:156 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: nova/compute/api.py:180 +#, python-format +msgid "Casting to scheduler for %s/%s's instance %s" +msgstr "" + +#: nova/compute/api.py:279 +#, python-format +msgid "Going to try and terminate %s" +msgstr "" + +#: nova/compute/api.py:283 +#, python-format +msgid "Instance %d was not found during terminate" +msgstr "" + +#: nova/compute/api.py:288 +#, python-format +msgid "Instance %d is already being terminated" +msgstr "" + +#: nova/compute/api.py:450 +#, python-format +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "" + +#: nova/compute/api.py:465 +msgid "Volume isn't attached to anything!" +msgstr "" + +#: nova/compute/disk.py:71 +#, python-format +msgid "Input partition size not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:75 +#, python-format +msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:128 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: nova/compute/disk.py:136 +#, python-format +msgid "Failed to load partition: %s" +msgstr "" + +#: nova/compute/disk.py:158 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "" + +#: nova/compute/manager.py:69 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: nova/compute/manager.py:71 +#, python-format +msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +msgstr "" + +#: nova/compute/manager.py:75 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: nova/compute/manager.py:77 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: nova/compute/manager.py:82 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: nova/compute/manager.py:86 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: nova/compute/manager.py:157 +msgid "Instance has already been created" +msgstr "" + +#: nova/compute/manager.py:158 +#, python-format +msgid "instance %s: starting..." +msgstr "" + +#: nova/compute/manager.py:197 +#, python-format +msgid "instance %s: Failed to spawn" +msgstr "" + +#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 +#, python-format +msgid "Terminating instance %s" +msgstr "" + +#: nova/compute/manager.py:217 +#, python-format +msgid "Disassociating address %s" +msgstr "" + +#: nova/compute/manager.py:230 +#, python-format +msgid "Deallocating address %s" +msgstr "" + +#: nova/compute/manager.py:243 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "" + +#: nova/compute/manager.py:257 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: nova/compute/manager.py:260 +#, python-format +msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:286 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: nova/compute/manager.py:289 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:301 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: nova/compute/manager.py:316 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: nova/compute/manager.py:335 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: nova/compute/manager.py:352 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: nova/compute/manager.py:369 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: nova/compute/manager.py:382 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: nova/compute/manager.py:401 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: nova/compute/manager.py:420 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: nova/compute/manager.py:432 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: nova/compute/manager.py:442 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: nova/compute/manager.py:462 +#, python-format +msgid "instance %s: attaching volume %s to %s" +msgstr "" + +#: nova/compute/manager.py:478 +#, python-format +msgid "instance %s: attach failed %s, removing" +msgstr "" + +#: nova/compute/manager.py:493 +#, python-format +msgid "Detach volume %s from mountpoint %s on instance %s" +msgstr "" + +#: nova/compute/manager.py:497 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: nova/compute/monitor.py:259 +#, python-format +msgid "updating %s..." +msgstr "" + +#: nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "" + +#: nova/compute/monitor.py:355 +#, python-format +msgid "Cannot get blockstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:377 +#, python-format +msgid "Cannot get ifstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:412 +msgid "unexpected exception getting connection" +msgstr "" + +#: nova/compute/monitor.py:427 +#, python-format +msgid "Found instance: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:43 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: nova/db/sqlalchemy/api.py:132 +#, python-format +msgid "No service for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:229 +#, python-format +msgid "No service for %s, %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:574 +#, python-format +msgid "No floating ip for address %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:668 +#, python-format +msgid "No instance for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 +#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#, python-format +msgid "Instance %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:891 +#, python-format +msgid "no keypair for user %s, name %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 +#, python-format +msgid "No network for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1036 +#, python-format +msgid "No network for bridge %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1050 +#, python-format +msgid "No network for instance %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1180 +#, python-format +msgid "Token %s does not exist" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1205 +#, python-format +msgid "No quota for project_id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1356 +#, python-format +msgid "No volume for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1401 +#, python-format +msgid "Volume %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1413 +#, python-format +msgid "No export device found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1426 +#, python-format +msgid "No target id found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1471 +#, python-format +msgid "No security group with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1488 +#, python-format +msgid "No security group named %s for project: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1576 +#, python-format +msgid "No secuity group rule with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1650 +#, python-format +msgid "No user for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1666 +#, python-format +msgid "No user for access key %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1728 +#, python-format +msgid "No project with id %s" +msgstr "" + +#: nova/image/glance.py:78 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images" +msgstr "" + +#: nova/image/glance.py:97 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images/detail" +msgstr "" + +#: nova/image/s3.py:82 +#, python-format +msgid "Image %s could not be found" +msgstr "" + +#: nova/network/api.py:39 +#, python-format +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "" + +#: nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" + +#: nova/network/linux_net.py:176 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: nova/network/linux_net.py:186 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#: nova/network/linux_net.py:254 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: nova/network/linux_net.py:256 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: nova/network/linux_net.py:334 +#, python-format +msgid "Killing dnsmasq threw %s" +msgstr "" + +#: nova/network/manager.py:135 +msgid "setting network host" +msgstr "" + +#: nova/network/manager.py:190 +#, python-format +msgid "Leasing IP %s" +msgstr "" + +#: nova/network/manager.py:194 +#, python-format +msgid "IP %s leased that isn't associated" +msgstr "" + +#: nova/network/manager.py:197 +#, python-format +msgid "IP %s leased to bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:205 +#, python-format +msgid "IP %s leased that was already deallocated" +msgstr "" + +#: nova/network/manager.py:214 +#, python-format +msgid "IP %s released that isn't associated" +msgstr "" + +#: nova/network/manager.py:217 +#, python-format +msgid "IP %s released from bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:220 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: nova/network/manager.py:442 +#, python-format +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "" + +#: nova/objectstore/handler.py:106 +#, python-format +msgid "Unknown S3 value type %r" +msgstr "" + +#: nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "" + +#: nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "" + +#: nova/objectstore/handler.py:209 +#, python-format +msgid "List keys for bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:217 +#, python-format +msgid "Unauthorized attempt to access bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:235 +#, python-format +msgid "Creating bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:245 +#, python-format +msgid "Deleting bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:249 +#, python-format +msgid "Unauthorized attempt to delete bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:271 +#, python-format +msgid "Getting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:274 +#, python-format +msgid "Unauthorized attempt to get object %s from bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:292 +#, python-format +msgid "Putting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:295 +#, python-format +msgid "Unauthorized attempt to upload object %s to bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:314 +#, python-format +msgid "Deleting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:393 +#, python-format +msgid "Not authorized to upload image: invalid directory %s" +msgstr "" + +#: nova/objectstore/handler.py:401 +#, python-format +msgid "Not authorized to upload image: unauthorized bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:406 +#, python-format +msgid "Starting image upload: %s" +msgstr "" + +#: nova/objectstore/handler.py:420 +#, python-format +msgid "Not authorized to update attributes of image %s" +msgstr "" + +#: nova/objectstore/handler.py:428 +#, python-format +msgid "Toggling publicity flag of image %s %r" +msgstr "" + +#: nova/objectstore/handler.py:433 +#, python-format +msgid "Updating user fields on image %s" +msgstr "" + +#: nova/objectstore/handler.py:447 +#, python-format +msgid "Unauthorized attempt to delete image %s" +msgstr "" + +#: nova/objectstore/handler.py:452 +#, python-format +msgid "Deleted image: %s" +msgstr "" + +#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 +#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 +msgid "No hosts found" +msgstr "" + +#: nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "" + +#: nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %s %s for %s" +msgstr "" + +#: nova/scheduler/simple.py:63 +msgid "All hosts have too many cores" +msgstr "" + +#: nova/scheduler/simple.py:95 +msgid "All hosts have too many gigabytes" +msgstr "" + +#: nova/scheduler/simple.py:115 +msgid "All hosts have too many networks" +msgstr "" + +#: nova/tests/test_cloud.py:198 +msgid "Can't test instances without a real virtual env." +msgstr "" + +#: nova/tests/test_cloud.py:210 +#, python-format +msgid "Need to watch instance %s until it's running..." +msgstr "" + +#: nova/tests/test_compute.py:104 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: nova/tests/test_compute.py:110 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: nova/tests/test_rpc.py:89 +#, python-format +msgid "Nested received %s, %s" +msgstr "" + +#: nova/tests/test_rpc.py:94 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#, python-format +msgid "Received %s" +msgstr "" + +#: nova/tests/test_volume.py:162 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: nova/virt/fake.py:210 +#, python-format +msgid "Instance %s Not Found" +msgstr "" + +#: nova/virt/hyperv.py:118 +msgid "In init host" +msgstr "" + +#: nova/virt/hyperv.py:131 +#, python-format +msgid "Attempt to create duplicate vm %s" +msgstr "" + +#: nova/virt/hyperv.py:148 +#, python-format +msgid "Starting VM %s " +msgstr "" + +#: nova/virt/hyperv.py:150 +#, python-format +msgid "Started VM %s " +msgstr "" + +#: nova/virt/hyperv.py:152 +#, python-format +msgid "spawn vm failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:169 +#, python-format +msgid "Failed to create VM %s" +msgstr "" + +#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 +#, python-format +msgid "Created VM %s..." +msgstr "" + +#: nova/virt/hyperv.py:188 +#, python-format +msgid "Set memory for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:198 +#, python-format +msgid "Set vcpus for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:202 +#, python-format +msgid "Creating disk for %s by attaching disk file %s" +msgstr "" + +#: nova/virt/hyperv.py:227 +#, python-format +msgid "Failed to add diskdrive to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:230 +#, python-format +msgid "New disk drive path is %s" +msgstr "" + +#: nova/virt/hyperv.py:247 +#, python-format +msgid "Failed to add vhd file to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:249 +#, python-format +msgid "Created disk for %s" +msgstr "" + +#: nova/virt/hyperv.py:253 +#, python-format +msgid "Creating nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:272 +msgid "Failed creating a port on the external vswitch" +msgstr "" + +#: nova/virt/hyperv.py:273 +#, python-format +msgid "Failed creating port for %s" +msgstr "" + +#: nova/virt/hyperv.py:275 +#, python-format +msgid "Created switch port %s on switch %s" +msgstr "" + +#: nova/virt/hyperv.py:285 +#, python-format +msgid "Failed to add nic to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:287 +#, python-format +msgid "Created nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:320 +#, python-format +msgid "WMI job failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:322 +#, python-format +msgid "WMI job succeeded: %s, Elapsed=%s " +msgstr "" + +#: nova/virt/hyperv.py:358 +#, python-format +msgid "Got request to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:383 +#, python-format +msgid "Failed to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:389 +#, python-format +msgid "Del: disk %s vm %s" +msgstr "" + +#: nova/virt/hyperv.py:405 +#, python-format +msgid "" +"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " +"cpu_time=%s" +msgstr "" + +#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#, python-format +msgid "duplicate name found: %s" +msgstr "" + +#: nova/virt/hyperv.py:444 +#, python-format +msgid "Successfully changed vm state of %s to %s" +msgstr "" + +#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#, python-format +msgid "Failed to change vm state of %s to %s" +msgstr "" + +#: nova/virt/images.py:70 +#, python-format +msgid "Finished retreving %s -- placed in %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:144 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:157 +msgid "Connection to libvirt broke" +msgstr "" + +#: nova/virt/libvirt_conn.py:229 +#, python-format +msgid "instance %s: deleting instance files %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:271 +#, python-format +msgid "No disk at %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:278 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "" + +#: nova/virt/libvirt_conn.py:294 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: nova/virt/libvirt_conn.py:297 +#, python-format +msgid "_wait_for_reboot failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:340 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: nova/virt/libvirt_conn.py:343 +#, python-format +msgid "_wait_for_rescue failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:370 +#, python-format +msgid "instance %s: is running" +msgstr "" + +#: nova/virt/libvirt_conn.py:381 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 +#, python-format +msgid "instance %s: failed to boot" +msgstr "" + +#: nova/virt/libvirt_conn.py:395 +#, python-format +msgid "virsh said: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:399 +msgid "cool, it's a device" +msgstr "" + +#: nova/virt/libvirt_conn.py:407 +#, python-format +msgid "data: %r, fpath: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:415 +#, python-format +msgid "Contents of file %s: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:449 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: nova/virt/libvirt_conn.py:505 +#, python-format +msgid "instance %s: injecting key into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:508 +#, python-format +msgid "instance %s: injecting net into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:516 +#, python-format +msgid "instance %s: ignoring error injecting data into image %s (%s)" +msgstr "" + +#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: nova/virt/libvirt_conn.py:589 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: nova/virt/xenapi_conn.py:113 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" +msgstr "" + +#: nova/virt/xenapi_conn.py:263 +#, python-format +msgid "Task [%s] %s status: success %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:271 +#, python-format +msgid "Task [%s] %s status: %s %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:72 +#, python-format +msgid "%s: _db_content => %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 +#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 +msgid "Raising NotImplemented" +msgstr "" + +#: nova/virt/xenapi/fake.py:249 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:283 +#, python-format +msgid "Calling %s %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:288 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:340 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:127 +#, python-format +msgid "Created VM %s as %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:147 +#, python-format +msgid "Creating VBD for VM %s, VDI %s ... " +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:149 +#, python-format +msgid "Created VBD %s for VM %s, VDI %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:165 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:175 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:202 +#, python-format +msgid "Creating VIF for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:205 +#, python-format +msgid "Created VIF %s for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:216 +#, python-format +msgid "Snapshotting VM %s with label '%s'..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:229 +#, python-format +msgid "Created snapshot %s from VM %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:243 +#, python-format +msgid "Asking xapi to upload %s as '%s'" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:261 +#, python-format +msgid "Asking xapi to fetch %s as %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:279 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:290 +#, python-format +msgid "PV Kernel in VDI:%d" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:318 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:331 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:333 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:390 +#, python-format +msgid "VHD %s has parent %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:407 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:448 +#, python-format +msgid "No VDIs found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:452 +#, python-format +msgid "Unexpected number of VDIs (%s) found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:62 +#, python-format +msgid "Attempted to create non-unique name %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:99 +#, python-format +msgid "Starting VM %s..." +msgstr "" + +#: nova/virt/xenapi/vmops.py:101 +#, python-format +msgid "Spawning VM %s created %s." +msgstr "" + +#: nova/virt/xenapi/vmops.py:112 +#, python-format +msgid "Instance %s: booted" +msgstr "" + +#: nova/virt/xenapi/vmops.py:137 +#, python-format +msgid "Instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:166 +#, python-format +msgid "Starting snapshot for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:174 +#, python-format +msgid "Unable to Snapshot %s: %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:184 +#, python-format +msgid "Finished snapshot and upload for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:252 +#, python-format +msgid "suspend: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:262 +#, python-format +msgid "resume: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:271 +#, python-format +msgid "Instance not found %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:57 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:74 +#, python-format +msgid "Introduced %s as %s." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:90 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:96 +#, python-format +msgid "Forgetting SR %s ... " +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:101 +#, python-format +msgid "Ignoring exception %s when getting PBDs for %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:107 +#, python-format +msgid "Ignoring exception %s when unplugging PBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:111 +#, python-format +msgid "Forgetting SR %s done." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:113 +#, python-format +msgid "Ignoring exception %s when forgetting SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:123 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:128 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:146 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:175 +#, python-format +msgid "Unable to obtain target information %s, %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:197 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %s, %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Unable to use SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:95 +#, python-format +msgid "Mountpoint %s attached to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:106 +#, python-format +msgid "Detach_volume: %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:113 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:128 +#, python-format +msgid "Mountpoint %s detached from instance %s" +msgstr "" + +#: nova/volume/api.py:44 +#, python-format +msgid "Quota exceeeded for %s, tried to create %sG volume" +msgstr "" + +#: nova/volume/api.py:46 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %s" +msgstr "" + +#: nova/volume/api.py:70 nova/volume/api.py:95 +msgid "Volume status must be available" +msgstr "" + +#: nova/volume/api.py:97 +msgid "Volume is already attached" +msgstr "" + +#: nova/volume/api.py:103 +msgid "Volume is already detached" +msgstr "" + +#: nova/volume/driver.py:76 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: nova/volume/driver.py:85 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: nova/volume/driver.py:210 +#, python-format +msgid "FAKE AOE: %s" +msgstr "" + +#: nova/volume/driver.py:315 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: nova/volume/manager.py:85 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: nova/volume/manager.py:93 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: nova/volume/manager.py:102 +#, python-format +msgid "volume %s: creating lv of size %sG" +msgstr "" + +#: nova/volume/manager.py:106 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: nova/volume/manager.py:113 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: nova/volume/manager.py:121 +msgid "Volume is still attached" +msgstr "" + +#: nova/volume/manager.py:123 +msgid "Volume is not local to this node" +msgstr "" + +#: nova/volume/manager.py:124 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: nova/volume/manager.py:126 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: nova/volume/manager.py:129 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" diff --git a/locale/ru.po b/locale/ru.po new file mode 100644 index 000000000..c751f41b2 --- /dev/null +++ b/locale/ru.po @@ -0,0 +1,2136 @@ +# Russian translation for nova +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the nova package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: nova\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2011-01-10 11:25-0800\n" +"PO-Revision-Date: 2011-01-25 17:45+0000\n" +"Last-Translator: Ilya Alekseyev \n" +"Language-Team: Russian \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Launchpad-Export-Date: 2011-01-28 05:20+0000\n" +"X-Generator: Launchpad (build 12177)\n" + +#: nova/crypto.py:46 +msgid "Filename of root CA" +msgstr "" + +#: nova/crypto.py:49 +msgid "Filename of private key" +msgstr "Имя файла секретного ключа" + +#: nova/crypto.py:51 +msgid "Filename of root Certificate Revokation List" +msgstr "" + +#: nova/crypto.py:53 +msgid "Where we keep our keys" +msgstr "" + +#: nova/crypto.py:55 +msgid "Where we keep our root CA" +msgstr "" + +#: nova/crypto.py:57 +msgid "Should we use a CA for each project?" +msgstr "" + +#: nova/crypto.py:61 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" + +#: nova/crypto.py:66 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "" + +#: nova/crypto.py:71 +#, python-format +msgid "Subject for certificate for vpns, %s for project, timestamp" +msgstr "" + +#: nova/crypto.py:258 +#, python-format +msgid "Flags path: %s" +msgstr "" + +#: nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "Неожиданная ошибка при выполнении команды." + +#: nova/exception.py:36 +#, python-format +msgid "" +"%s\n" +"Command: %s\n" +"Exit code: %s\n" +"Stdout: %r\n" +"Stderr: %r" +msgstr "" +"%s\n" +"Команда: %s\n" +"Код завершения: %s\n" +"Stdout: %r\n" +"Stderr: %r" + +#: nova/exception.py:86 +msgid "Uncaught exception" +msgstr "Необработанное исключение" + +#: nova/fakerabbit.py:48 +#, python-format +msgid "(%s) publish (key: %s) %s" +msgstr "" + +#: nova/fakerabbit.py:53 +#, python-format +msgid "Publishing to route %s" +msgstr "" + +#: nova/fakerabbit.py:83 +#, python-format +msgid "Declaring queue %s" +msgstr "Объявление очереди %s" + +#: nova/fakerabbit.py:89 +#, python-format +msgid "Declaring exchange %s" +msgstr "Объявление точки обмена %s" + +#: nova/fakerabbit.py:95 +#, python-format +msgid "Binding %s to %s with key %s" +msgstr "" + +#: nova/fakerabbit.py:120 +#, python-format +msgid "Getting from %s: %s" +msgstr "" + +#: nova/rpc.py:92 +#, python-format +msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +msgstr "AMQP сервер %s:%d недоступен. Повторная попытка через %d секунд." + +#: nova/rpc.py:99 +#, python-format +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "Не удалось подключиться к серверу AMQP после %d попыток. Выключение." + +#: nova/rpc.py:118 +msgid "Reconnected to queue" +msgstr "Переподлючено к очереди" + +#: nova/rpc.py:125 +msgid "Failed to fetch message from queue" +msgstr "Не удалось получить сообщение из очереди" + +#: nova/rpc.py:155 +#, python-format +msgid "Initing the Adapter Consumer for %s" +msgstr "" + +#: nova/rpc.py:170 +#, python-format +msgid "received %s" +msgstr "получено %s" + +#: nova/rpc.py:183 +#, python-format +msgid "no method for message: %s" +msgstr "не определен метод для сообщения: %s" + +#: nova/rpc.py:184 +#, python-format +msgid "No method for message: %s" +msgstr "Не определен метод для сообщения: %s" + +#: nova/rpc.py:245 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: nova/rpc.py:286 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: nova/rpc.py:305 +msgid "Making asynchronous call..." +msgstr "Выполняется асинхронный вызов..." + +#: nova/rpc.py:308 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID is %s" + +#: nova/rpc.py:356 +#, python-format +msgid "response %s" +msgstr "" + +#: nova/rpc.py:365 +#, python-format +msgid "topic is %s" +msgstr "тема %s" + +#: nova/rpc.py:366 +#, python-format +msgid "message %s" +msgstr "сообщение %s" + +#: nova/service.py:157 +#, python-format +msgid "Starting %s node" +msgstr "Запускается нода %s" + +#: nova/service.py:169 +msgid "Service killed that has no database entry" +msgstr "" + +#: nova/service.py:190 +msgid "The service database object disappeared, Recreating it." +msgstr "Объект сервиса в базе данных отсутствует, Повторное создание." + +#: nova/service.py:202 +msgid "Recovered model server connection!" +msgstr "" + +#: nova/service.py:208 +msgid "model server went away" +msgstr "" + +#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#, python-format +msgid "Data store %s is unreachable. Trying again in %d seconds." +msgstr "Хранилище данных %s недоступно. Повторная попытка через %d секунд." + +#: nova/service.py:232 nova/twistd.py:232 +#, python-format +msgid "Serving %s" +msgstr "" + +#: nova/service.py:234 nova/twistd.py:264 +msgid "Full set of FLAGS:" +msgstr "" + +#: nova/twistd.py:211 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "pidfile %s не обнаружен. Демон не запущен?\n" + +#: nova/twistd.py:268 +#, python-format +msgid "Starting %s" +msgstr "Запускается %s" + +#: nova/utils.py:53 +#, python-format +msgid "Inner Exception: %s" +msgstr "Вложенное исключение: %s" + +#: nova/utils.py:54 +#, python-format +msgid "Class %s cannot be found" +msgstr "Класс %s не найден" + +#: nova/utils.py:113 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: nova/utils.py:125 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: nova/utils.py:138 +#, python-format +msgid "Result was %s" +msgstr "Результат %s" + +#: nova/utils.py:171 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: nova/utils.py:176 +#, python-format +msgid "Running %s" +msgstr "" + +#: nova/utils.py:207 +#, python-format +msgid "Couldn't get IP, using 127.0.0.1 %s" +msgstr "Не удалось получить IP, используем 127.0.0.1 %s" + +#: nova/utils.py:289 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: nova/utils.py:300 +#, python-format +msgid "backend %s" +msgstr "" + +#: nova/api/ec2/__init__.py:133 +msgid "Too many failed authentications." +msgstr "Слишком много неудачных попыток аутентификации." + +#: nova/api/ec2/__init__.py:142 +#, python-format +msgid "" +"Access key %s has had %d failed authentications and will be locked out for " +"%d minutes." +msgstr "" + +#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#, python-format +msgid "Authentication Failure: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:190 +#, python-format +msgid "Authenticated Request For %s:%s)" +msgstr "" + +#: nova/api/ec2/__init__.py:227 +#, python-format +msgid "action: %s" +msgstr "действие: %s" + +#: nova/api/ec2/__init__.py:229 +#, python-format +msgid "arg: %s\t\tval: %s" +msgstr "arg: %s\t\tval: %s" + +#: nova/api/ec2/__init__.py:301 +#, python-format +msgid "Unauthorized request for controller=%s and action=%s" +msgstr "" + +#: nova/api/ec2/__init__.py:339 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:342 +#, python-format +msgid "ApiError raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:349 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:354 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" +"Произошла неизвестная ошибка. Пожалуйста, попытайтесь повторить ваш запрос." + +#: nova/api/ec2/admin.py:84 +#, python-format +msgid "Creating new user: %s" +msgstr "Создание нового пользователя: %s" + +#: nova/api/ec2/admin.py:92 +#, python-format +msgid "Deleting user: %s" +msgstr "Удаление пользователя: %s" + +#: nova/api/ec2/admin.py:114 +#, python-format +msgid "Adding role %s to user %s for project %s" +msgstr "Добавление роли %s для пользователя %s для проекта %s" + +#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#, python-format +msgid "Adding sitewide role %s to user %s" +msgstr "" + +#: nova/api/ec2/admin.py:122 +#, python-format +msgid "Removing role %s from user %s for project %s" +msgstr "Удаление роли %s пользователя %s для проекта %s" + +#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#, python-format +msgid "Removing sitewide role %s from user %s" +msgstr "" + +#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 +msgid "operation must be add or remove" +msgstr "" + +#: nova/api/ec2/admin.py:142 +#, python-format +msgid "Getting x509 for user: %s on project: %s" +msgstr "" + +#: nova/api/ec2/admin.py:159 +#, python-format +msgid "Create project %s managed by %s" +msgstr "Создать проект %s под управлением %s" + +#: nova/api/ec2/admin.py:170 +#, python-format +msgid "Delete project: %s" +msgstr "Удалить проект: %s" + +#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#, python-format +msgid "Adding user %s to project %s" +msgstr "Добавление пользователя %s к проекту %s" + +#: nova/api/ec2/admin.py:188 +#, python-format +msgid "Removing user %s from project %s" +msgstr "Удаление пользователя %s с проекта %s" + +#: nova/api/ec2/apirequest.py:95 +#, python-format +msgid "Unsupported API request: controller = %s,action = %s" +msgstr "" + +#: nova/api/ec2/cloud.py:117 +#, python-format +msgid "Generating root CA: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:277 +#, python-format +msgid "Create key pair %s" +msgstr "Создание пары ключей %s" + +#: nova/api/ec2/cloud.py:285 +#, python-format +msgid "Delete key pair %s" +msgstr "Удаление пары ключей %s" + +#: nova/api/ec2/cloud.py:357 +#, python-format +msgid "%s is not a valid ipProtocol" +msgstr "" + +#: nova/api/ec2/cloud.py:361 +msgid "Invalid port range" +msgstr "Неверный диапазон портов" + +#: nova/api/ec2/cloud.py:392 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 +msgid "No rule for the specified parameters." +msgstr "" + +#: nova/api/ec2/cloud.py:421 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: nova/api/ec2/cloud.py:432 +#, python-format +msgid "This rule already exists in group %s" +msgstr "Это правило уже существует в группе %s" + +#: nova/api/ec2/cloud.py:460 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:463 +#, python-format +msgid "group %s already exists" +msgstr "группа %s уже существует" + +#: nova/api/ec2/cloud.py:475 +#, python-format +msgid "Delete security group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 +#, python-format +msgid "Get console output for instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:543 +#, python-format +msgid "Create volume of %s GB" +msgstr "Создание раздела %s ГБ" + +#: nova/api/ec2/cloud.py:567 +#, python-format +msgid "Attach volume %s to instacne %s at %s" +msgstr "" + +#: nova/api/ec2/cloud.py:579 +#, python-format +msgid "Detach volume %s" +msgstr "" + +#: nova/api/ec2/cloud.py:686 +msgid "Allocate address" +msgstr "" + +#: nova/api/ec2/cloud.py:691 +#, python-format +msgid "Release address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:696 +#, python-format +msgid "Associate address %s to instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:703 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:730 +msgid "Going to start terminating instances" +msgstr "" + +#: nova/api/ec2/cloud.py:738 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: nova/api/ec2/cloud.py:775 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: nova/api/ec2/cloud.py:783 +#, python-format +msgid "Registered image %s with id %s" +msgstr "" + +#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#, python-format +msgid "attribute not supported: %s" +msgstr "аттрибут не поддерживается: %s" + +#: nova/api/ec2/cloud.py:794 +#, python-format +msgid "invalid id: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:807 +msgid "user or group not specified" +msgstr "не указан пользователь или группа" + +#: nova/api/ec2/cloud.py:809 +msgid "only group \"all\" is supported" +msgstr "" + +#: nova/api/ec2/cloud.py:811 +msgid "operation_type must be add or remove" +msgstr "" + +#: nova/api/ec2/cloud.py:812 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: nova/api/ec2/metadatarequesthandler.py:75 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:70 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:86 +msgid "Including admin operations in API." +msgstr "" + +#: nova/api/openstack/servers.py:184 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:199 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: nova/api/openstack/servers.py:213 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:224 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: nova/api/openstack/servers.py:235 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: nova/api/openstack/servers.py:246 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: nova/api/openstack/servers.py:257 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" +msgstr "Пользователь %s уже существует" + +#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "Проект не может быть создан поскольку менеджер %s не существует" + +#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#, python-format +msgid "Project can't be created because project %s already exists" +msgstr "Проект не может быть созан поскольку проект %s уже существует" + +#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#, python-format +msgid "Project can't be modified because manager %s doesn't exist" +msgstr "" + +#: nova/auth/dbdriver.py:245 +#, python-format +msgid "User \"%s\" not found" +msgstr "Пользователь \"%s\" не существует" + +#: nova/auth/dbdriver.py:248 +#, python-format +msgid "Project \"%s\" not found" +msgstr "Проект \"%s\" не найден" + +#: nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: nova/auth/ldapdriver.py:181 +#, python-format +msgid "LDAP object for %s doesn't exist" +msgstr "Объект LDAP %s не существует" + +#: nova/auth/ldapdriver.py:218 +#, python-format +msgid "Project can't be created because user %s doesn't exist" +msgstr "Проект не может быть создан поскольку пользователь %s не существует" + +#: nova/auth/ldapdriver.py:478 +#, python-format +msgid "User %s is already a member of the group %s" +msgstr "Пользователь %s уже член группы %s" + +#: nova/auth/ldapdriver.py:507 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: nova/auth/ldapdriver.py:528 +#, python-format +msgid "Group at dn %s doesn't exist" +msgstr "" + +#: nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" +msgstr "" + +#: nova/auth/manager.py:270 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: nova/auth/manager.py:275 +#, python-format +msgid "failed authorization: no project named %s (user=%s)" +msgstr "" + +#: nova/auth/manager.py:277 +#, python-format +msgid "No project called %s could be found" +msgstr "" + +#: nova/auth/manager.py:281 +#, python-format +msgid "Failed authorization: user %s not admin and not member of project %s" +msgstr "" + +#: nova/auth/manager.py:283 +#, python-format +msgid "User %s is not a member of project %s" +msgstr "Пользователь %s не является членом группы %s" + +#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#, python-format +msgid "Invalid signature for user %s" +msgstr "Не допустимая подпись для пользователя %s" + +#: nova/auth/manager.py:293 nova/auth/manager.py:304 +msgid "Signature does not match" +msgstr "Подпись не совпадает" + +#: nova/auth/manager.py:374 +msgid "Must specify project" +msgstr "Необходимо указать проект" + +#: nova/auth/manager.py:408 +#, python-format +msgid "The %s role can not be found" +msgstr "Роль %s не может быть найдена" + +#: nova/auth/manager.py:410 +#, python-format +msgid "The %s role is global only" +msgstr "" + +#: nova/auth/manager.py:412 +#, python-format +msgid "Adding role %s to user %s in project %s" +msgstr "Добавление роли %s для пользователя %s в проект %s" + +#: nova/auth/manager.py:438 +#, python-format +msgid "Removing role %s from user %s on project %s" +msgstr "Удаление роли %s пользователя %s в проекте %s" + +#: nova/auth/manager.py:505 +#, python-format +msgid "Created project %s with manager %s" +msgstr "Создан проект %s под управлением %s" + +#: nova/auth/manager.py:523 +#, python-format +msgid "modifying project %s" +msgstr "изменение проекта %s" + +#: nova/auth/manager.py:553 +#, python-format +msgid "Remove user %s from project %s" +msgstr "Удалить пользователя %s из проекта %s" + +#: nova/auth/manager.py:581 +#, python-format +msgid "Deleting project %s" +msgstr "Удаление проекта %s" + +#: nova/auth/manager.py:637 +#, python-format +msgid "Created user %s (admin: %r)" +msgstr "Создан пользователь %s (администратор: %r)" + +#: nova/auth/manager.py:645 +#, python-format +msgid "Deleting user %s" +msgstr "Удаление пользователя %s" + +#: nova/auth/manager.py:655 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:657 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:659 +#, python-format +msgid "Admin status set to %r for user %s" +msgstr "" + +#: nova/auth/manager.py:708 +#, python-format +msgid "No vpn data for project %s" +msgstr "Нет vpn данных для проекта %s" + +#: nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "" + +#: nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:97 +#, python-format +msgid "Launching VPN for %s" +msgstr "Запуск VPN для %s" + +#: nova/compute/api.py:67 +#, python-format +msgid "Instance %d was not found in get_network_topic" +msgstr "" + +#: nova/compute/api.py:73 +#, python-format +msgid "Instance %d has no host" +msgstr "" + +#: nova/compute/api.py:92 +#, python-format +msgid "Quota exceeeded for %s, tried to run %s instances" +msgstr "" + +#: nova/compute/api.py:94 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." +msgstr "" + +#: nova/compute/api.py:109 +msgid "Creating a raw instance" +msgstr "" + +#: nova/compute/api.py:156 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: nova/compute/api.py:180 +#, python-format +msgid "Casting to scheduler for %s/%s's instance %s" +msgstr "" + +#: nova/compute/api.py:279 +#, python-format +msgid "Going to try and terminate %s" +msgstr "" + +#: nova/compute/api.py:283 +#, python-format +msgid "Instance %d was not found during terminate" +msgstr "" + +#: nova/compute/api.py:288 +#, python-format +msgid "Instance %d is already being terminated" +msgstr "" + +#: nova/compute/api.py:450 +#, python-format +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "" + +#: nova/compute/api.py:465 +msgid "Volume isn't attached to anything!" +msgstr "" + +#: nova/compute/disk.py:71 +#, python-format +msgid "Input partition size not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:75 +#, python-format +msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:128 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: nova/compute/disk.py:136 +#, python-format +msgid "Failed to load partition: %s" +msgstr "" + +#: nova/compute/disk.py:158 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "Ошибка монтирования файловой системы: %s" + +#: nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "" + +#: nova/compute/manager.py:69 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: nova/compute/manager.py:71 +#, python-format +msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +msgstr "" + +#: nova/compute/manager.py:75 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: nova/compute/manager.py:77 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: nova/compute/manager.py:82 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: nova/compute/manager.py:86 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: nova/compute/manager.py:157 +msgid "Instance has already been created" +msgstr "" + +#: nova/compute/manager.py:158 +#, python-format +msgid "instance %s: starting..." +msgstr "" + +#: nova/compute/manager.py:197 +#, python-format +msgid "instance %s: Failed to spawn" +msgstr "" + +#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 +#, python-format +msgid "Terminating instance %s" +msgstr "" + +#: nova/compute/manager.py:217 +#, python-format +msgid "Disassociating address %s" +msgstr "" + +#: nova/compute/manager.py:230 +#, python-format +msgid "Deallocating address %s" +msgstr "" + +#: nova/compute/manager.py:243 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "" + +#: nova/compute/manager.py:257 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: nova/compute/manager.py:260 +#, python-format +msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:286 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: nova/compute/manager.py:289 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:301 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: nova/compute/manager.py:316 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: nova/compute/manager.py:335 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: nova/compute/manager.py:352 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: nova/compute/manager.py:369 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: nova/compute/manager.py:382 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: nova/compute/manager.py:401 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: nova/compute/manager.py:420 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: nova/compute/manager.py:432 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: nova/compute/manager.py:442 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: nova/compute/manager.py:462 +#, python-format +msgid "instance %s: attaching volume %s to %s" +msgstr "" + +#: nova/compute/manager.py:478 +#, python-format +msgid "instance %s: attach failed %s, removing" +msgstr "" + +#: nova/compute/manager.py:493 +#, python-format +msgid "Detach volume %s from mountpoint %s on instance %s" +msgstr "" + +#: nova/compute/manager.py:497 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: nova/compute/monitor.py:259 +#, python-format +msgid "updating %s..." +msgstr "обновление %s..." + +#: nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "неожиданная ошибка во время обновления" + +#: nova/compute/monitor.py:355 +#, python-format +msgid "Cannot get blockstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:377 +#, python-format +msgid "Cannot get ifstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:412 +msgid "unexpected exception getting connection" +msgstr "" + +#: nova/compute/monitor.py:427 +#, python-format +msgid "Found instance: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:43 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: nova/db/sqlalchemy/api.py:132 +#, python-format +msgid "No service for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:229 +#, python-format +msgid "No service for %s, %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:574 +#, python-format +msgid "No floating ip for address %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:668 +#, python-format +msgid "No instance for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 +#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#, python-format +msgid "Instance %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:891 +#, python-format +msgid "no keypair for user %s, name %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 +#, python-format +msgid "No network for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1036 +#, python-format +msgid "No network for bridge %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1050 +#, python-format +msgid "No network for instance %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1180 +#, python-format +msgid "Token %s does not exist" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1205 +#, python-format +msgid "No quota for project_id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1356 +#, python-format +msgid "No volume for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1401 +#, python-format +msgid "Volume %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1413 +#, python-format +msgid "No export device found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1426 +#, python-format +msgid "No target id found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1471 +#, python-format +msgid "No security group with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1488 +#, python-format +msgid "No security group named %s for project: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1576 +#, python-format +msgid "No secuity group rule with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1650 +#, python-format +msgid "No user for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1666 +#, python-format +msgid "No user for access key %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1728 +#, python-format +msgid "No project with id %s" +msgstr "" + +#: nova/image/glance.py:78 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images" +msgstr "" + +#: nova/image/glance.py:97 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images/detail" +msgstr "" + +#: nova/image/s3.py:82 +#, python-format +msgid "Image %s could not be found" +msgstr "" + +#: nova/network/api.py:39 +#, python-format +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "" + +#: nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" + +#: nova/network/linux_net.py:176 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: nova/network/linux_net.py:186 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#: nova/network/linux_net.py:254 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: nova/network/linux_net.py:256 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: nova/network/linux_net.py:334 +#, python-format +msgid "Killing dnsmasq threw %s" +msgstr "" + +#: nova/network/manager.py:135 +msgid "setting network host" +msgstr "" + +#: nova/network/manager.py:190 +#, python-format +msgid "Leasing IP %s" +msgstr "" + +#: nova/network/manager.py:194 +#, python-format +msgid "IP %s leased that isn't associated" +msgstr "" + +#: nova/network/manager.py:197 +#, python-format +msgid "IP %s leased to bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:205 +#, python-format +msgid "IP %s leased that was already deallocated" +msgstr "" + +#: nova/network/manager.py:214 +#, python-format +msgid "IP %s released that isn't associated" +msgstr "" + +#: nova/network/manager.py:217 +#, python-format +msgid "IP %s released from bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:220 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: nova/network/manager.py:442 +#, python-format +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "" + +#: nova/objectstore/handler.py:106 +#, python-format +msgid "Unknown S3 value type %r" +msgstr "" + +#: nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "" + +#: nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "" + +#: nova/objectstore/handler.py:209 +#, python-format +msgid "List keys for bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:217 +#, python-format +msgid "Unauthorized attempt to access bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:235 +#, python-format +msgid "Creating bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:245 +#, python-format +msgid "Deleting bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:249 +#, python-format +msgid "Unauthorized attempt to delete bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:271 +#, python-format +msgid "Getting object: %s / %s" +msgstr "Получение объекта: %s / %s" + +#: nova/objectstore/handler.py:274 +#, python-format +msgid "Unauthorized attempt to get object %s from bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:292 +#, python-format +msgid "Putting object: %s / %s" +msgstr "Вставка объекта: %s / %s" + +#: nova/objectstore/handler.py:295 +#, python-format +msgid "Unauthorized attempt to upload object %s to bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:314 +#, python-format +msgid "Deleting object: %s / %s" +msgstr "Удаление объекта: %s / %s" + +#: nova/objectstore/handler.py:393 +#, python-format +msgid "Not authorized to upload image: invalid directory %s" +msgstr "" + +#: nova/objectstore/handler.py:401 +#, python-format +msgid "Not authorized to upload image: unauthorized bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:406 +#, python-format +msgid "Starting image upload: %s" +msgstr "" + +#: nova/objectstore/handler.py:420 +#, python-format +msgid "Not authorized to update attributes of image %s" +msgstr "" + +#: nova/objectstore/handler.py:428 +#, python-format +msgid "Toggling publicity flag of image %s %r" +msgstr "" + +#: nova/objectstore/handler.py:433 +#, python-format +msgid "Updating user fields on image %s" +msgstr "" + +#: nova/objectstore/handler.py:447 +#, python-format +msgid "Unauthorized attempt to delete image %s" +msgstr "" + +#: nova/objectstore/handler.py:452 +#, python-format +msgid "Deleted image: %s" +msgstr "Удаленное изображение: %s" + +#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 +#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 +msgid "No hosts found" +msgstr "" + +#: nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "" + +#: nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %s %s for %s" +msgstr "" + +#: nova/scheduler/simple.py:63 +msgid "All hosts have too many cores" +msgstr "" + +#: nova/scheduler/simple.py:95 +msgid "All hosts have too many gigabytes" +msgstr "" + +#: nova/scheduler/simple.py:115 +msgid "All hosts have too many networks" +msgstr "" + +#: nova/tests/test_cloud.py:198 +msgid "Can't test instances without a real virtual env." +msgstr "" + +#: nova/tests/test_cloud.py:210 +#, python-format +msgid "Need to watch instance %s until it's running..." +msgstr "" + +#: nova/tests/test_compute.py:104 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: nova/tests/test_compute.py:110 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: nova/tests/test_rpc.py:89 +#, python-format +msgid "Nested received %s, %s" +msgstr "" + +#: nova/tests/test_rpc.py:94 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#, python-format +msgid "Received %s" +msgstr "Получено %s" + +#: nova/tests/test_volume.py:162 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: nova/virt/fake.py:210 +#, python-format +msgid "Instance %s Not Found" +msgstr "" + +#: nova/virt/hyperv.py:118 +msgid "In init host" +msgstr "" + +#: nova/virt/hyperv.py:131 +#, python-format +msgid "Attempt to create duplicate vm %s" +msgstr "" + +#: nova/virt/hyperv.py:148 +#, python-format +msgid "Starting VM %s " +msgstr "Запускается VM %s " + +#: nova/virt/hyperv.py:150 +#, python-format +msgid "Started VM %s " +msgstr "Запущен VM %s " + +#: nova/virt/hyperv.py:152 +#, python-format +msgid "spawn vm failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:169 +#, python-format +msgid "Failed to create VM %s" +msgstr "" + +#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 +#, python-format +msgid "Created VM %s..." +msgstr "" + +#: nova/virt/hyperv.py:188 +#, python-format +msgid "Set memory for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:198 +#, python-format +msgid "Set vcpus for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:202 +#, python-format +msgid "Creating disk for %s by attaching disk file %s" +msgstr "" + +#: nova/virt/hyperv.py:227 +#, python-format +msgid "Failed to add diskdrive to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:230 +#, python-format +msgid "New disk drive path is %s" +msgstr "" + +#: nova/virt/hyperv.py:247 +#, python-format +msgid "Failed to add vhd file to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:249 +#, python-format +msgid "Created disk for %s" +msgstr "Создан диск для %s" + +#: nova/virt/hyperv.py:253 +#, python-format +msgid "Creating nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:272 +msgid "Failed creating a port on the external vswitch" +msgstr "" + +#: nova/virt/hyperv.py:273 +#, python-format +msgid "Failed creating port for %s" +msgstr "" + +#: nova/virt/hyperv.py:275 +#, python-format +msgid "Created switch port %s on switch %s" +msgstr "" + +#: nova/virt/hyperv.py:285 +#, python-format +msgid "Failed to add nic to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:287 +#, python-format +msgid "Created nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:320 +#, python-format +msgid "WMI job failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:322 +#, python-format +msgid "WMI job succeeded: %s, Elapsed=%s " +msgstr "" + +#: nova/virt/hyperv.py:358 +#, python-format +msgid "Got request to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:383 +#, python-format +msgid "Failed to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:389 +#, python-format +msgid "Del: disk %s vm %s" +msgstr "" + +#: nova/virt/hyperv.py:405 +#, python-format +msgid "" +"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " +"cpu_time=%s" +msgstr "" + +#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#, python-format +msgid "duplicate name found: %s" +msgstr "" + +#: nova/virt/hyperv.py:444 +#, python-format +msgid "Successfully changed vm state of %s to %s" +msgstr "" + +#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#, python-format +msgid "Failed to change vm state of %s to %s" +msgstr "" + +#: nova/virt/images.py:70 +#, python-format +msgid "Finished retreving %s -- placed in %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:144 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:157 +msgid "Connection to libvirt broke" +msgstr "" + +#: nova/virt/libvirt_conn.py:229 +#, python-format +msgid "instance %s: deleting instance files %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:271 +#, python-format +msgid "No disk at %s" +msgstr "Нет диска в %s" + +#: nova/virt/libvirt_conn.py:278 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "" + +#: nova/virt/libvirt_conn.py:294 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: nova/virt/libvirt_conn.py:297 +#, python-format +msgid "_wait_for_reboot failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:340 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: nova/virt/libvirt_conn.py:343 +#, python-format +msgid "_wait_for_rescue failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:370 +#, python-format +msgid "instance %s: is running" +msgstr "" + +#: nova/virt/libvirt_conn.py:381 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 +#, python-format +msgid "instance %s: failed to boot" +msgstr "" + +#: nova/virt/libvirt_conn.py:395 +#, python-format +msgid "virsh said: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:399 +msgid "cool, it's a device" +msgstr "" + +#: nova/virt/libvirt_conn.py:407 +#, python-format +msgid "data: %r, fpath: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:415 +#, python-format +msgid "Contents of file %s: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:449 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: nova/virt/libvirt_conn.py:505 +#, python-format +msgid "instance %s: injecting key into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:508 +#, python-format +msgid "instance %s: injecting net into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:516 +#, python-format +msgid "instance %s: ignoring error injecting data into image %s (%s)" +msgstr "" + +#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: nova/virt/libvirt_conn.py:589 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: nova/virt/xenapi_conn.py:113 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" +msgstr "" + +#: nova/virt/xenapi_conn.py:263 +#, python-format +msgid "Task [%s] %s status: success %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:271 +#, python-format +msgid "Task [%s] %s status: %s %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:72 +#, python-format +msgid "%s: _db_content => %s" +msgstr "%s: _db_content => %s" + +#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 +#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 +msgid "Raising NotImplemented" +msgstr "" + +#: nova/virt/xenapi/fake.py:249 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:283 +#, python-format +msgid "Calling %s %s" +msgstr "Звонок %s %s" + +#: nova/virt/xenapi/fake.py:288 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:340 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:127 +#, python-format +msgid "Created VM %s as %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:147 +#, python-format +msgid "Creating VBD for VM %s, VDI %s ... " +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:149 +#, python-format +msgid "Created VBD %s for VM %s, VDI %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:165 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:175 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:202 +#, python-format +msgid "Creating VIF for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:205 +#, python-format +msgid "Created VIF %s for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:216 +#, python-format +msgid "Snapshotting VM %s with label '%s'..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:229 +#, python-format +msgid "Created snapshot %s from VM %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:243 +#, python-format +msgid "Asking xapi to upload %s as '%s'" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:261 +#, python-format +msgid "Asking xapi to fetch %s as %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:279 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:290 +#, python-format +msgid "PV Kernel in VDI:%d" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:318 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:331 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:333 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:390 +#, python-format +msgid "VHD %s has parent %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:407 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:448 +#, python-format +msgid "No VDIs found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:452 +#, python-format +msgid "Unexpected number of VDIs (%s) found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:62 +#, python-format +msgid "Attempted to create non-unique name %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:99 +#, python-format +msgid "Starting VM %s..." +msgstr "" + +#: nova/virt/xenapi/vmops.py:101 +#, python-format +msgid "Spawning VM %s created %s." +msgstr "" + +#: nova/virt/xenapi/vmops.py:112 +#, python-format +msgid "Instance %s: booted" +msgstr "" + +#: nova/virt/xenapi/vmops.py:137 +#, python-format +msgid "Instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:166 +#, python-format +msgid "Starting snapshot for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:174 +#, python-format +msgid "Unable to Snapshot %s: %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:184 +#, python-format +msgid "Finished snapshot and upload for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:252 +#, python-format +msgid "suspend: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:262 +#, python-format +msgid "resume: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:271 +#, python-format +msgid "Instance not found %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:57 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:74 +#, python-format +msgid "Introduced %s as %s." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:90 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:96 +#, python-format +msgid "Forgetting SR %s ... " +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:101 +#, python-format +msgid "Ignoring exception %s when getting PBDs for %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:107 +#, python-format +msgid "Ignoring exception %s when unplugging PBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:111 +#, python-format +msgid "Forgetting SR %s done." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:113 +#, python-format +msgid "Ignoring exception %s when forgetting SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:123 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:128 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:146 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:175 +#, python-format +msgid "Unable to obtain target information %s, %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:197 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %s, %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Unable to use SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:95 +#, python-format +msgid "Mountpoint %s attached to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:106 +#, python-format +msgid "Detach_volume: %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:113 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:128 +#, python-format +msgid "Mountpoint %s detached from instance %s" +msgstr "" + +#: nova/volume/api.py:44 +#, python-format +msgid "Quota exceeeded for %s, tried to create %sG volume" +msgstr "" + +#: nova/volume/api.py:46 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %s" +msgstr "" + +#: nova/volume/api.py:70 nova/volume/api.py:95 +msgid "Volume status must be available" +msgstr "" + +#: nova/volume/api.py:97 +msgid "Volume is already attached" +msgstr "" + +#: nova/volume/api.py:103 +msgid "Volume is already detached" +msgstr "" + +#: nova/volume/driver.py:76 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: nova/volume/driver.py:85 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: nova/volume/driver.py:210 +#, python-format +msgid "FAKE AOE: %s" +msgstr "" + +#: nova/volume/driver.py:315 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: nova/volume/manager.py:85 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: nova/volume/manager.py:93 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: nova/volume/manager.py:102 +#, python-format +msgid "volume %s: creating lv of size %sG" +msgstr "" + +#: nova/volume/manager.py:106 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: nova/volume/manager.py:113 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: nova/volume/manager.py:121 +msgid "Volume is still attached" +msgstr "" + +#: nova/volume/manager.py:123 +msgid "Volume is not local to this node" +msgstr "" + +#: nova/volume/manager.py:124 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: nova/volume/manager.py:126 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: nova/volume/manager.py:129 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" diff --git a/locale/uk.po b/locale/uk.po new file mode 100644 index 000000000..cdbffd130 --- /dev/null +++ b/locale/uk.po @@ -0,0 +1,2130 @@ +# Ukrainian translation for nova +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the nova package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: nova\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2011-01-10 11:25-0800\n" +"PO-Revision-Date: 2011-01-13 07:03+0000\n" +"Last-Translator: Wladimir Rossinski \n" +"Language-Team: Ukrainian \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Launchpad-Export-Date: 2011-01-28 05:20+0000\n" +"X-Generator: Launchpad (build 12177)\n" + +#: nova/crypto.py:46 +msgid "Filename of root CA" +msgstr "" + +#: nova/crypto.py:49 +msgid "Filename of private key" +msgstr "" + +#: nova/crypto.py:51 +msgid "Filename of root Certificate Revokation List" +msgstr "" + +#: nova/crypto.py:53 +msgid "Where we keep our keys" +msgstr "" + +#: nova/crypto.py:55 +msgid "Where we keep our root CA" +msgstr "" + +#: nova/crypto.py:57 +msgid "Should we use a CA for each project?" +msgstr "" + +#: nova/crypto.py:61 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" + +#: nova/crypto.py:66 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "" + +#: nova/crypto.py:71 +#, python-format +msgid "Subject for certificate for vpns, %s for project, timestamp" +msgstr "" + +#: nova/crypto.py:258 +#, python-format +msgid "Flags path: %s" +msgstr "" + +#: nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "" + +#: nova/exception.py:36 +#, python-format +msgid "" +"%s\n" +"Command: %s\n" +"Exit code: %s\n" +"Stdout: %r\n" +"Stderr: %r" +msgstr "" + +#: nova/exception.py:86 +msgid "Uncaught exception" +msgstr "" + +#: nova/fakerabbit.py:48 +#, python-format +msgid "(%s) publish (key: %s) %s" +msgstr "" + +#: nova/fakerabbit.py:53 +#, python-format +msgid "Publishing to route %s" +msgstr "" + +#: nova/fakerabbit.py:83 +#, python-format +msgid "Declaring queue %s" +msgstr "" + +#: nova/fakerabbit.py:89 +#, python-format +msgid "Declaring exchange %s" +msgstr "" + +#: nova/fakerabbit.py:95 +#, python-format +msgid "Binding %s to %s with key %s" +msgstr "" + +#: nova/fakerabbit.py:120 +#, python-format +msgid "Getting from %s: %s" +msgstr "" + +#: nova/rpc.py:92 +#, python-format +msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +msgstr "AMQP сервер %s:%d недоступний. Спроба під'єднання через %d секунд." + +#: nova/rpc.py:99 +#, python-format +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "Не вдалось під'єднатися до серверу AMQP після %d спроб. Вимкнення." + +#: nova/rpc.py:118 +msgid "Reconnected to queue" +msgstr "" + +#: nova/rpc.py:125 +msgid "Failed to fetch message from queue" +msgstr "" + +#: nova/rpc.py:155 +#, python-format +msgid "Initing the Adapter Consumer for %s" +msgstr "" + +#: nova/rpc.py:170 +#, python-format +msgid "received %s" +msgstr "отримано %s" + +#: nova/rpc.py:183 +#, python-format +msgid "no method for message: %s" +msgstr "без порядку для повідомлень: %s" + +#: nova/rpc.py:184 +#, python-format +msgid "No method for message: %s" +msgstr "Без порядку для повідомлень: %s" + +#: nova/rpc.py:245 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: nova/rpc.py:286 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: nova/rpc.py:305 +msgid "Making asynchronous call..." +msgstr "Створення асинхронного виклику..." + +#: nova/rpc.py:308 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID %s" + +#: nova/rpc.py:356 +#, python-format +msgid "response %s" +msgstr "відповідь %s" + +#: nova/rpc.py:365 +#, python-format +msgid "topic is %s" +msgstr "заголовок %s" + +#: nova/rpc.py:366 +#, python-format +msgid "message %s" +msgstr "повідомлення %s" + +#: nova/service.py:157 +#, python-format +msgid "Starting %s node" +msgstr "" + +#: nova/service.py:169 +msgid "Service killed that has no database entry" +msgstr "" + +#: nova/service.py:190 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: nova/service.py:202 +msgid "Recovered model server connection!" +msgstr "" + +#: nova/service.py:208 +msgid "model server went away" +msgstr "" + +#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#, python-format +msgid "Data store %s is unreachable. Trying again in %d seconds." +msgstr "" + +#: nova/service.py:232 nova/twistd.py:232 +#, python-format +msgid "Serving %s" +msgstr "Обслуговування %s" + +#: nova/service.py:234 nova/twistd.py:264 +msgid "Full set of FLAGS:" +msgstr "" + +#: nova/twistd.py:211 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "" + +#: nova/twistd.py:268 +#, python-format +msgid "Starting %s" +msgstr "Запускається %s" + +#: nova/utils.py:53 +#, python-format +msgid "Inner Exception: %s" +msgstr "" + +#: nova/utils.py:54 +#, python-format +msgid "Class %s cannot be found" +msgstr "" + +#: nova/utils.py:113 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: nova/utils.py:125 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: nova/utils.py:138 +#, python-format +msgid "Result was %s" +msgstr "" + +#: nova/utils.py:171 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: nova/utils.py:176 +#, python-format +msgid "Running %s" +msgstr "Запускається %s" + +#: nova/utils.py:207 +#, python-format +msgid "Couldn't get IP, using 127.0.0.1 %s" +msgstr "Не вдалось отримати IP, використовуючи 127.0.0.1 %s" + +#: nova/utils.py:289 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: nova/utils.py:300 +#, python-format +msgid "backend %s" +msgstr "" + +#: nova/api/ec2/__init__.py:133 +msgid "Too many failed authentications." +msgstr "Занадто багато невдалих аутентифікацій." + +#: nova/api/ec2/__init__.py:142 +#, python-format +msgid "" +"Access key %s has had %d failed authentications and will be locked out for " +"%d minutes." +msgstr "" + +#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#, python-format +msgid "Authentication Failure: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:190 +#, python-format +msgid "Authenticated Request For %s:%s)" +msgstr "" + +#: nova/api/ec2/__init__.py:227 +#, python-format +msgid "action: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:229 +#, python-format +msgid "arg: %s\t\tval: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:301 +#, python-format +msgid "Unauthorized request for controller=%s and action=%s" +msgstr "" + +#: nova/api/ec2/__init__.py:339 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:342 +#, python-format +msgid "ApiError raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:349 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:354 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: nova/api/ec2/admin.py:84 +#, python-format +msgid "Creating new user: %s" +msgstr "" + +#: nova/api/ec2/admin.py:92 +#, python-format +msgid "Deleting user: %s" +msgstr "" + +#: nova/api/ec2/admin.py:114 +#, python-format +msgid "Adding role %s to user %s for project %s" +msgstr "" + +#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#, python-format +msgid "Adding sitewide role %s to user %s" +msgstr "" + +#: nova/api/ec2/admin.py:122 +#, python-format +msgid "Removing role %s from user %s for project %s" +msgstr "" + +#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#, python-format +msgid "Removing sitewide role %s from user %s" +msgstr "" + +#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 +msgid "operation must be add or remove" +msgstr "" + +#: nova/api/ec2/admin.py:142 +#, python-format +msgid "Getting x509 for user: %s on project: %s" +msgstr "" + +#: nova/api/ec2/admin.py:159 +#, python-format +msgid "Create project %s managed by %s" +msgstr "" + +#: nova/api/ec2/admin.py:170 +#, python-format +msgid "Delete project: %s" +msgstr "Вилучити проект: %s" + +#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#, python-format +msgid "Adding user %s to project %s" +msgstr "Долучення користувача %s до проекту %s" + +#: nova/api/ec2/admin.py:188 +#, python-format +msgid "Removing user %s from project %s" +msgstr "Вилучення користувача %s з проекту %s" + +#: nova/api/ec2/apirequest.py:95 +#, python-format +msgid "Unsupported API request: controller = %s,action = %s" +msgstr "" + +#: nova/api/ec2/cloud.py:117 +#, python-format +msgid "Generating root CA: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:277 +#, python-format +msgid "Create key pair %s" +msgstr "" + +#: nova/api/ec2/cloud.py:285 +#, python-format +msgid "Delete key pair %s" +msgstr "" + +#: nova/api/ec2/cloud.py:357 +#, python-format +msgid "%s is not a valid ipProtocol" +msgstr "%s не допустимий ipProtocol" + +#: nova/api/ec2/cloud.py:361 +msgid "Invalid port range" +msgstr "Невірний діапазон портів" + +#: nova/api/ec2/cloud.py:392 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 +msgid "No rule for the specified parameters." +msgstr "" + +#: nova/api/ec2/cloud.py:421 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: nova/api/ec2/cloud.py:432 +#, python-format +msgid "This rule already exists in group %s" +msgstr "Це правило вже існує в групі %s" + +#: nova/api/ec2/cloud.py:460 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:463 +#, python-format +msgid "group %s already exists" +msgstr "" + +#: nova/api/ec2/cloud.py:475 +#, python-format +msgid "Delete security group %s" +msgstr "Вилучити групу безпеки %s" + +#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 +#, python-format +msgid "Get console output for instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:543 +#, python-format +msgid "Create volume of %s GB" +msgstr "Створити розділ на %s ГБ" + +#: nova/api/ec2/cloud.py:567 +#, python-format +msgid "Attach volume %s to instacne %s at %s" +msgstr "" + +#: nova/api/ec2/cloud.py:579 +#, python-format +msgid "Detach volume %s" +msgstr "Від'єднати том %s" + +#: nova/api/ec2/cloud.py:686 +msgid "Allocate address" +msgstr "" + +#: nova/api/ec2/cloud.py:691 +#, python-format +msgid "Release address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:696 +#, python-format +msgid "Associate address %s to instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:703 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:730 +msgid "Going to start terminating instances" +msgstr "" + +#: nova/api/ec2/cloud.py:738 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: nova/api/ec2/cloud.py:775 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: nova/api/ec2/cloud.py:783 +#, python-format +msgid "Registered image %s with id %s" +msgstr "" + +#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#, python-format +msgid "attribute not supported: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:794 +#, python-format +msgid "invalid id: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:807 +msgid "user or group not specified" +msgstr "" + +#: nova/api/ec2/cloud.py:809 +msgid "only group \"all\" is supported" +msgstr "лише група \"всі\" підтримується" + +#: nova/api/ec2/cloud.py:811 +msgid "operation_type must be add or remove" +msgstr "" + +#: nova/api/ec2/cloud.py:812 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: nova/api/ec2/metadatarequesthandler.py:75 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:70 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:86 +msgid "Including admin operations in API." +msgstr "" + +#: nova/api/openstack/servers.py:184 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:199 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: nova/api/openstack/servers.py:213 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:224 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: nova/api/openstack/servers.py:235 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: nova/api/openstack/servers.py:246 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: nova/api/openstack/servers.py:257 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" +msgstr "Користувач %s вже існує" + +#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "" + +#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#, python-format +msgid "Project can't be created because project %s already exists" +msgstr "" + +#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#, python-format +msgid "Project can't be modified because manager %s doesn't exist" +msgstr "" + +#: nova/auth/dbdriver.py:245 +#, python-format +msgid "User \"%s\" not found" +msgstr "Користувач \"%s\" не знайдено" + +#: nova/auth/dbdriver.py:248 +#, python-format +msgid "Project \"%s\" not found" +msgstr "Проект \"%s\" не знайдено" + +#: nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: nova/auth/ldapdriver.py:181 +#, python-format +msgid "LDAP object for %s doesn't exist" +msgstr "" + +#: nova/auth/ldapdriver.py:218 +#, python-format +msgid "Project can't be created because user %s doesn't exist" +msgstr "" + +#: nova/auth/ldapdriver.py:478 +#, python-format +msgid "User %s is already a member of the group %s" +msgstr "" + +#: nova/auth/ldapdriver.py:507 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: nova/auth/ldapdriver.py:528 +#, python-format +msgid "Group at dn %s doesn't exist" +msgstr "" + +#: nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" +msgstr "" + +#: nova/auth/manager.py:270 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: nova/auth/manager.py:275 +#, python-format +msgid "failed authorization: no project named %s (user=%s)" +msgstr "" + +#: nova/auth/manager.py:277 +#, python-format +msgid "No project called %s could be found" +msgstr "" + +#: nova/auth/manager.py:281 +#, python-format +msgid "Failed authorization: user %s not admin and not member of project %s" +msgstr "" + +#: nova/auth/manager.py:283 +#, python-format +msgid "User %s is not a member of project %s" +msgstr "" + +#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: nova/auth/manager.py:293 nova/auth/manager.py:304 +msgid "Signature does not match" +msgstr "" + +#: nova/auth/manager.py:374 +msgid "Must specify project" +msgstr "" + +#: nova/auth/manager.py:408 +#, python-format +msgid "The %s role can not be found" +msgstr "" + +#: nova/auth/manager.py:410 +#, python-format +msgid "The %s role is global only" +msgstr "" + +#: nova/auth/manager.py:412 +#, python-format +msgid "Adding role %s to user %s in project %s" +msgstr "" + +#: nova/auth/manager.py:438 +#, python-format +msgid "Removing role %s from user %s on project %s" +msgstr "" + +#: nova/auth/manager.py:505 +#, python-format +msgid "Created project %s with manager %s" +msgstr "" + +#: nova/auth/manager.py:523 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: nova/auth/manager.py:553 +#, python-format +msgid "Remove user %s from project %s" +msgstr "" + +#: nova/auth/manager.py:581 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: nova/auth/manager.py:637 +#, python-format +msgid "Created user %s (admin: %r)" +msgstr "" + +#: nova/auth/manager.py:645 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: nova/auth/manager.py:655 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:657 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:659 +#, python-format +msgid "Admin status set to %r for user %s" +msgstr "" + +#: nova/auth/manager.py:708 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "" + +#: nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:97 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: nova/compute/api.py:67 +#, python-format +msgid "Instance %d was not found in get_network_topic" +msgstr "" + +#: nova/compute/api.py:73 +#, python-format +msgid "Instance %d has no host" +msgstr "" + +#: nova/compute/api.py:92 +#, python-format +msgid "Quota exceeeded for %s, tried to run %s instances" +msgstr "" + +#: nova/compute/api.py:94 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." +msgstr "" + +#: nova/compute/api.py:109 +msgid "Creating a raw instance" +msgstr "" + +#: nova/compute/api.py:156 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: nova/compute/api.py:180 +#, python-format +msgid "Casting to scheduler for %s/%s's instance %s" +msgstr "" + +#: nova/compute/api.py:279 +#, python-format +msgid "Going to try and terminate %s" +msgstr "" + +#: nova/compute/api.py:283 +#, python-format +msgid "Instance %d was not found during terminate" +msgstr "" + +#: nova/compute/api.py:288 +#, python-format +msgid "Instance %d is already being terminated" +msgstr "" + +#: nova/compute/api.py:450 +#, python-format +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "" + +#: nova/compute/api.py:465 +msgid "Volume isn't attached to anything!" +msgstr "" + +#: nova/compute/disk.py:71 +#, python-format +msgid "Input partition size not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:75 +#, python-format +msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:128 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: nova/compute/disk.py:136 +#, python-format +msgid "Failed to load partition: %s" +msgstr "" + +#: nova/compute/disk.py:158 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "" + +#: nova/compute/manager.py:69 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: nova/compute/manager.py:71 +#, python-format +msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +msgstr "" + +#: nova/compute/manager.py:75 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: nova/compute/manager.py:77 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: nova/compute/manager.py:82 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: nova/compute/manager.py:86 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: nova/compute/manager.py:157 +msgid "Instance has already been created" +msgstr "" + +#: nova/compute/manager.py:158 +#, python-format +msgid "instance %s: starting..." +msgstr "" + +#: nova/compute/manager.py:197 +#, python-format +msgid "instance %s: Failed to spawn" +msgstr "" + +#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 +#, python-format +msgid "Terminating instance %s" +msgstr "" + +#: nova/compute/manager.py:217 +#, python-format +msgid "Disassociating address %s" +msgstr "" + +#: nova/compute/manager.py:230 +#, python-format +msgid "Deallocating address %s" +msgstr "" + +#: nova/compute/manager.py:243 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "" + +#: nova/compute/manager.py:257 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: nova/compute/manager.py:260 +#, python-format +msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:286 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: nova/compute/manager.py:289 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:301 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: nova/compute/manager.py:316 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: nova/compute/manager.py:335 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: nova/compute/manager.py:352 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: nova/compute/manager.py:369 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: nova/compute/manager.py:382 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: nova/compute/manager.py:401 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: nova/compute/manager.py:420 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: nova/compute/manager.py:432 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: nova/compute/manager.py:442 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: nova/compute/manager.py:462 +#, python-format +msgid "instance %s: attaching volume %s to %s" +msgstr "" + +#: nova/compute/manager.py:478 +#, python-format +msgid "instance %s: attach failed %s, removing" +msgstr "" + +#: nova/compute/manager.py:493 +#, python-format +msgid "Detach volume %s from mountpoint %s on instance %s" +msgstr "" + +#: nova/compute/manager.py:497 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: nova/compute/monitor.py:259 +#, python-format +msgid "updating %s..." +msgstr "" + +#: nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "" + +#: nova/compute/monitor.py:355 +#, python-format +msgid "Cannot get blockstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:377 +#, python-format +msgid "Cannot get ifstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:412 +msgid "unexpected exception getting connection" +msgstr "" + +#: nova/compute/monitor.py:427 +#, python-format +msgid "Found instance: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:43 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: nova/db/sqlalchemy/api.py:132 +#, python-format +msgid "No service for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:229 +#, python-format +msgid "No service for %s, %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:574 +#, python-format +msgid "No floating ip for address %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:668 +#, python-format +msgid "No instance for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 +#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#, python-format +msgid "Instance %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:891 +#, python-format +msgid "no keypair for user %s, name %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 +#, python-format +msgid "No network for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1036 +#, python-format +msgid "No network for bridge %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1050 +#, python-format +msgid "No network for instance %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1180 +#, python-format +msgid "Token %s does not exist" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1205 +#, python-format +msgid "No quota for project_id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1356 +#, python-format +msgid "No volume for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1401 +#, python-format +msgid "Volume %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1413 +#, python-format +msgid "No export device found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1426 +#, python-format +msgid "No target id found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1471 +#, python-format +msgid "No security group with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1488 +#, python-format +msgid "No security group named %s for project: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1576 +#, python-format +msgid "No secuity group rule with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1650 +#, python-format +msgid "No user for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1666 +#, python-format +msgid "No user for access key %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1728 +#, python-format +msgid "No project with id %s" +msgstr "" + +#: nova/image/glance.py:78 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images" +msgstr "" + +#: nova/image/glance.py:97 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images/detail" +msgstr "" + +#: nova/image/s3.py:82 +#, python-format +msgid "Image %s could not be found" +msgstr "" + +#: nova/network/api.py:39 +#, python-format +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "" + +#: nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" + +#: nova/network/linux_net.py:176 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: nova/network/linux_net.py:186 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#: nova/network/linux_net.py:254 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: nova/network/linux_net.py:256 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: nova/network/linux_net.py:334 +#, python-format +msgid "Killing dnsmasq threw %s" +msgstr "" + +#: nova/network/manager.py:135 +msgid "setting network host" +msgstr "" + +#: nova/network/manager.py:190 +#, python-format +msgid "Leasing IP %s" +msgstr "" + +#: nova/network/manager.py:194 +#, python-format +msgid "IP %s leased that isn't associated" +msgstr "" + +#: nova/network/manager.py:197 +#, python-format +msgid "IP %s leased to bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:205 +#, python-format +msgid "IP %s leased that was already deallocated" +msgstr "" + +#: nova/network/manager.py:214 +#, python-format +msgid "IP %s released that isn't associated" +msgstr "" + +#: nova/network/manager.py:217 +#, python-format +msgid "IP %s released from bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:220 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: nova/network/manager.py:442 +#, python-format +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "" + +#: nova/objectstore/handler.py:106 +#, python-format +msgid "Unknown S3 value type %r" +msgstr "" + +#: nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "" + +#: nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "" + +#: nova/objectstore/handler.py:209 +#, python-format +msgid "List keys for bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:217 +#, python-format +msgid "Unauthorized attempt to access bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:235 +#, python-format +msgid "Creating bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:245 +#, python-format +msgid "Deleting bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:249 +#, python-format +msgid "Unauthorized attempt to delete bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:271 +#, python-format +msgid "Getting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:274 +#, python-format +msgid "Unauthorized attempt to get object %s from bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:292 +#, python-format +msgid "Putting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:295 +#, python-format +msgid "Unauthorized attempt to upload object %s to bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:314 +#, python-format +msgid "Deleting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:393 +#, python-format +msgid "Not authorized to upload image: invalid directory %s" +msgstr "" + +#: nova/objectstore/handler.py:401 +#, python-format +msgid "Not authorized to upload image: unauthorized bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:406 +#, python-format +msgid "Starting image upload: %s" +msgstr "" + +#: nova/objectstore/handler.py:420 +#, python-format +msgid "Not authorized to update attributes of image %s" +msgstr "" + +#: nova/objectstore/handler.py:428 +#, python-format +msgid "Toggling publicity flag of image %s %r" +msgstr "" + +#: nova/objectstore/handler.py:433 +#, python-format +msgid "Updating user fields on image %s" +msgstr "" + +#: nova/objectstore/handler.py:447 +#, python-format +msgid "Unauthorized attempt to delete image %s" +msgstr "" + +#: nova/objectstore/handler.py:452 +#, python-format +msgid "Deleted image: %s" +msgstr "" + +#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 +#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 +msgid "No hosts found" +msgstr "" + +#: nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "" + +#: nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %s %s for %s" +msgstr "" + +#: nova/scheduler/simple.py:63 +msgid "All hosts have too many cores" +msgstr "" + +#: nova/scheduler/simple.py:95 +msgid "All hosts have too many gigabytes" +msgstr "" + +#: nova/scheduler/simple.py:115 +msgid "All hosts have too many networks" +msgstr "" + +#: nova/tests/test_cloud.py:198 +msgid "Can't test instances without a real virtual env." +msgstr "" + +#: nova/tests/test_cloud.py:210 +#, python-format +msgid "Need to watch instance %s until it's running..." +msgstr "" + +#: nova/tests/test_compute.py:104 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: nova/tests/test_compute.py:110 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: nova/tests/test_rpc.py:89 +#, python-format +msgid "Nested received %s, %s" +msgstr "" + +#: nova/tests/test_rpc.py:94 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#, python-format +msgid "Received %s" +msgstr "" + +#: nova/tests/test_volume.py:162 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: nova/virt/fake.py:210 +#, python-format +msgid "Instance %s Not Found" +msgstr "" + +#: nova/virt/hyperv.py:118 +msgid "In init host" +msgstr "" + +#: nova/virt/hyperv.py:131 +#, python-format +msgid "Attempt to create duplicate vm %s" +msgstr "" + +#: nova/virt/hyperv.py:148 +#, python-format +msgid "Starting VM %s " +msgstr "" + +#: nova/virt/hyperv.py:150 +#, python-format +msgid "Started VM %s " +msgstr "" + +#: nova/virt/hyperv.py:152 +#, python-format +msgid "spawn vm failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:169 +#, python-format +msgid "Failed to create VM %s" +msgstr "" + +#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 +#, python-format +msgid "Created VM %s..." +msgstr "" + +#: nova/virt/hyperv.py:188 +#, python-format +msgid "Set memory for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:198 +#, python-format +msgid "Set vcpus for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:202 +#, python-format +msgid "Creating disk for %s by attaching disk file %s" +msgstr "" + +#: nova/virt/hyperv.py:227 +#, python-format +msgid "Failed to add diskdrive to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:230 +#, python-format +msgid "New disk drive path is %s" +msgstr "" + +#: nova/virt/hyperv.py:247 +#, python-format +msgid "Failed to add vhd file to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:249 +#, python-format +msgid "Created disk for %s" +msgstr "" + +#: nova/virt/hyperv.py:253 +#, python-format +msgid "Creating nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:272 +msgid "Failed creating a port on the external vswitch" +msgstr "" + +#: nova/virt/hyperv.py:273 +#, python-format +msgid "Failed creating port for %s" +msgstr "" + +#: nova/virt/hyperv.py:275 +#, python-format +msgid "Created switch port %s on switch %s" +msgstr "" + +#: nova/virt/hyperv.py:285 +#, python-format +msgid "Failed to add nic to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:287 +#, python-format +msgid "Created nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:320 +#, python-format +msgid "WMI job failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:322 +#, python-format +msgid "WMI job succeeded: %s, Elapsed=%s " +msgstr "" + +#: nova/virt/hyperv.py:358 +#, python-format +msgid "Got request to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:383 +#, python-format +msgid "Failed to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:389 +#, python-format +msgid "Del: disk %s vm %s" +msgstr "" + +#: nova/virt/hyperv.py:405 +#, python-format +msgid "" +"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " +"cpu_time=%s" +msgstr "" + +#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#, python-format +msgid "duplicate name found: %s" +msgstr "" + +#: nova/virt/hyperv.py:444 +#, python-format +msgid "Successfully changed vm state of %s to %s" +msgstr "" + +#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#, python-format +msgid "Failed to change vm state of %s to %s" +msgstr "" + +#: nova/virt/images.py:70 +#, python-format +msgid "Finished retreving %s -- placed in %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:144 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:157 +msgid "Connection to libvirt broke" +msgstr "" + +#: nova/virt/libvirt_conn.py:229 +#, python-format +msgid "instance %s: deleting instance files %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:271 +#, python-format +msgid "No disk at %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:278 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "" + +#: nova/virt/libvirt_conn.py:294 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: nova/virt/libvirt_conn.py:297 +#, python-format +msgid "_wait_for_reboot failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:340 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: nova/virt/libvirt_conn.py:343 +#, python-format +msgid "_wait_for_rescue failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:370 +#, python-format +msgid "instance %s: is running" +msgstr "" + +#: nova/virt/libvirt_conn.py:381 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 +#, python-format +msgid "instance %s: failed to boot" +msgstr "" + +#: nova/virt/libvirt_conn.py:395 +#, python-format +msgid "virsh said: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:399 +msgid "cool, it's a device" +msgstr "" + +#: nova/virt/libvirt_conn.py:407 +#, python-format +msgid "data: %r, fpath: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:415 +#, python-format +msgid "Contents of file %s: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:449 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: nova/virt/libvirt_conn.py:505 +#, python-format +msgid "instance %s: injecting key into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:508 +#, python-format +msgid "instance %s: injecting net into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:516 +#, python-format +msgid "instance %s: ignoring error injecting data into image %s (%s)" +msgstr "" + +#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: nova/virt/libvirt_conn.py:589 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: nova/virt/xenapi_conn.py:113 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" +msgstr "" + +#: nova/virt/xenapi_conn.py:263 +#, python-format +msgid "Task [%s] %s status: success %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:271 +#, python-format +msgid "Task [%s] %s status: %s %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:72 +#, python-format +msgid "%s: _db_content => %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 +#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 +msgid "Raising NotImplemented" +msgstr "" + +#: nova/virt/xenapi/fake.py:249 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:283 +#, python-format +msgid "Calling %s %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:288 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:340 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:127 +#, python-format +msgid "Created VM %s as %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:147 +#, python-format +msgid "Creating VBD for VM %s, VDI %s ... " +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:149 +#, python-format +msgid "Created VBD %s for VM %s, VDI %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:165 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:175 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:202 +#, python-format +msgid "Creating VIF for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:205 +#, python-format +msgid "Created VIF %s for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:216 +#, python-format +msgid "Snapshotting VM %s with label '%s'..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:229 +#, python-format +msgid "Created snapshot %s from VM %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:243 +#, python-format +msgid "Asking xapi to upload %s as '%s'" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:261 +#, python-format +msgid "Asking xapi to fetch %s as %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:279 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:290 +#, python-format +msgid "PV Kernel in VDI:%d" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:318 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:331 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:333 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:390 +#, python-format +msgid "VHD %s has parent %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:407 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:448 +#, python-format +msgid "No VDIs found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:452 +#, python-format +msgid "Unexpected number of VDIs (%s) found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:62 +#, python-format +msgid "Attempted to create non-unique name %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:99 +#, python-format +msgid "Starting VM %s..." +msgstr "" + +#: nova/virt/xenapi/vmops.py:101 +#, python-format +msgid "Spawning VM %s created %s." +msgstr "" + +#: nova/virt/xenapi/vmops.py:112 +#, python-format +msgid "Instance %s: booted" +msgstr "" + +#: nova/virt/xenapi/vmops.py:137 +#, python-format +msgid "Instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:166 +#, python-format +msgid "Starting snapshot for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:174 +#, python-format +msgid "Unable to Snapshot %s: %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:184 +#, python-format +msgid "Finished snapshot and upload for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:252 +#, python-format +msgid "suspend: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:262 +#, python-format +msgid "resume: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:271 +#, python-format +msgid "Instance not found %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:57 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:74 +#, python-format +msgid "Introduced %s as %s." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:90 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:96 +#, python-format +msgid "Forgetting SR %s ... " +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:101 +#, python-format +msgid "Ignoring exception %s when getting PBDs for %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:107 +#, python-format +msgid "Ignoring exception %s when unplugging PBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:111 +#, python-format +msgid "Forgetting SR %s done." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:113 +#, python-format +msgid "Ignoring exception %s when forgetting SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:123 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:128 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:146 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:175 +#, python-format +msgid "Unable to obtain target information %s, %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:197 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %s, %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Unable to use SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:95 +#, python-format +msgid "Mountpoint %s attached to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:106 +#, python-format +msgid "Detach_volume: %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:113 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:128 +#, python-format +msgid "Mountpoint %s detached from instance %s" +msgstr "" + +#: nova/volume/api.py:44 +#, python-format +msgid "Quota exceeeded for %s, tried to create %sG volume" +msgstr "" + +#: nova/volume/api.py:46 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %s" +msgstr "" + +#: nova/volume/api.py:70 nova/volume/api.py:95 +msgid "Volume status must be available" +msgstr "" + +#: nova/volume/api.py:97 +msgid "Volume is already attached" +msgstr "" + +#: nova/volume/api.py:103 +msgid "Volume is already detached" +msgstr "" + +#: nova/volume/driver.py:76 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: nova/volume/driver.py:85 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: nova/volume/driver.py:210 +#, python-format +msgid "FAKE AOE: %s" +msgstr "" + +#: nova/volume/driver.py:315 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: nova/volume/manager.py:85 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: nova/volume/manager.py:93 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: nova/volume/manager.py:102 +#, python-format +msgid "volume %s: creating lv of size %sG" +msgstr "" + +#: nova/volume/manager.py:106 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: nova/volume/manager.py:113 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: nova/volume/manager.py:121 +msgid "Volume is still attached" +msgstr "" + +#: nova/volume/manager.py:123 +msgid "Volume is not local to this node" +msgstr "" + +#: nova/volume/manager.py:124 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: nova/volume/manager.py:126 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: nova/volume/manager.py:129 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" diff --git a/locale/zh_CN.po b/locale/zh_CN.po new file mode 100644 index 000000000..4805eb327 --- /dev/null +++ b/locale/zh_CN.po @@ -0,0 +1,2135 @@ +# Chinese (Simplified) translation for nova +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the nova package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: nova\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2011-01-10 11:25-0800\n" +"PO-Revision-Date: 2011-01-22 03:11+0000\n" +"Last-Translator: combo \n" +"Language-Team: Chinese (Simplified) \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Launchpad-Export-Date: 2011-01-28 05:21+0000\n" +"X-Generator: Launchpad (build 12177)\n" + +#: nova/twistd.py:268 +#, python-format +msgid "Starting %s" +msgstr "正在启动 %s" + +#: nova/crypto.py:46 +msgid "Filename of root CA" +msgstr "根证书文件名" + +#: nova/crypto.py:49 +msgid "Filename of private key" +msgstr "私钥文件名" + +#: nova/crypto.py:51 +msgid "Filename of root Certificate Revokation List" +msgstr "" + +#: nova/crypto.py:53 +msgid "Where we keep our keys" +msgstr "保存密钥的位置" + +#: nova/crypto.py:55 +msgid "Where we keep our root CA" +msgstr "保存根证书的位置" + +#: nova/crypto.py:57 +msgid "Should we use a CA for each project?" +msgstr "是否所有项目都是用证书授权(CA)?" + +#: nova/crypto.py:61 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "用户证书的标题,%s依次分别为项目,用户,时间戳" + +#: nova/crypto.py:66 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "项目证书的标题,%s依次分别为项目,时间戳" + +#: nova/crypto.py:71 +#, python-format +msgid "Subject for certificate for vpns, %s for project, timestamp" +msgstr "VPN证书的标题,%s依次分别为项目,时间戳" + +#: nova/crypto.py:258 +#, python-format +msgid "Flags path: %s" +msgstr "Flag所在路径:%s" + +#: nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "运行命令时出现了意外错误。" + +#: nova/exception.py:36 +#, python-format +msgid "" +"%s\n" +"Command: %s\n" +"Exit code: %s\n" +"Stdout: %r\n" +"Stderr: %r" +msgstr "" +"%s\n" +"命令:%s\n" +"退出代码:%s\n" +"标准输出(stdout):%r\n" +"标准错误(stderr):%r" + +#: nova/exception.py:86 +msgid "Uncaught exception" +msgstr "未捕获异常" + +#: nova/fakerabbit.py:48 +#, python-format +msgid "(%s) publish (key: %s) %s" +msgstr "(%s)发布(键值:%s)%s" + +#: nova/fakerabbit.py:53 +#, python-format +msgid "Publishing to route %s" +msgstr "发布并路由到 %s" + +#: nova/fakerabbit.py:83 +#, python-format +msgid "Declaring queue %s" +msgstr "正在声明队列%s" + +#: nova/fakerabbit.py:89 +#, python-format +msgid "Declaring exchange %s" +msgstr "正在声明交换(exchange)%s" + +#: nova/fakerabbit.py:95 +#, python-format +msgid "Binding %s to %s with key %s" +msgstr "将%s绑定到%s(以%s键值)" + +#: nova/fakerabbit.py:120 +#, python-format +msgid "Getting from %s: %s" +msgstr "从%s获得如下内容:%s" + +#: nova/rpc.py:92 +#, python-format +msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +msgstr "位于%s:%d的AMQP服务器不可用。%d秒后重试。" + +#: nova/rpc.py:99 +#, python-format +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "已尝试%d次,均无法连接到AMQP服务器。关闭中。" + +#: nova/rpc.py:118 +msgid "Reconnected to queue" +msgstr "重新与队列建立连接" + +#: nova/rpc.py:125 +msgid "Failed to fetch message from queue" +msgstr "从队列获取数据失败" + +#: nova/rpc.py:155 +#, python-format +msgid "Initing the Adapter Consumer for %s" +msgstr "" + +#: nova/rpc.py:170 +#, python-format +msgid "received %s" +msgstr "已接收 %s" + +#: nova/rpc.py:183 +#, python-format +msgid "no method for message: %s" +msgstr "没有适用于消息%s的方法" + +#: nova/rpc.py:184 +#, python-format +msgid "No method for message: %s" +msgstr "没有适用于消息%s的方法" + +#: nova/rpc.py:245 +#, python-format +msgid "Returning exception %s to caller" +msgstr "返回%s异常给调用者" + +#: nova/rpc.py:286 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: nova/rpc.py:305 +msgid "Making asynchronous call..." +msgstr "产生异步调用中……" + +#: nova/rpc.py:308 +#, python-format +msgid "MSG_ID is %s" +msgstr "消息ID(MSG_ID)是 %s" + +#: nova/rpc.py:356 +#, python-format +msgid "response %s" +msgstr "回复 %s" + +#: nova/rpc.py:365 +#, python-format +msgid "topic is %s" +msgstr "话题是 %s" + +#: nova/rpc.py:366 +#, python-format +msgid "message %s" +msgstr "消息 %s" + +#: nova/service.py:157 +#, python-format +msgid "Starting %s node" +msgstr "启动%s节点" + +#: nova/service.py:169 +msgid "Service killed that has no database entry" +msgstr "因无数据库记录,服务已被中止" + +#: nova/service.py:190 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: nova/service.py:202 +msgid "Recovered model server connection!" +msgstr "与模型服务器(model server)的连接已恢复!" + +#: nova/service.py:208 +msgid "model server went away" +msgstr "失去与模型服务器的连接" + +#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#, python-format +msgid "Data store %s is unreachable. Trying again in %d seconds." +msgstr "数据储存服务%s不可用。%d秒之后继续尝试。" + +#: nova/service.py:232 nova/twistd.py:232 +#, python-format +msgid "Serving %s" +msgstr "正在为%s服务" + +#: nova/service.py:234 nova/twistd.py:264 +msgid "Full set of FLAGS:" +msgstr "FLAGS全集:" + +#: nova/twistd.py:211 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "pidfile %s不存在。后台服务没有运行?\n" + +#: nova/utils.py:53 +#, python-format +msgid "Inner Exception: %s" +msgstr "内层异常:%s" + +#: nova/utils.py:54 +#, python-format +msgid "Class %s cannot be found" +msgstr "无法找到%s类" + +#: nova/utils.py:113 +#, python-format +msgid "Fetching %s" +msgstr "正在抓取%s" + +#: nova/utils.py:125 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "正在运行(在子进程中)运行命令:%s" + +#: nova/utils.py:138 +#, python-format +msgid "Result was %s" +msgstr "运行结果为 %s" + +#: nova/utils.py:171 +#, python-format +msgid "debug in callback: %s" +msgstr "回调中debug:%s" + +#: nova/utils.py:176 +#, python-format +msgid "Running %s" +msgstr "正在运行 %s" + +#: nova/utils.py:207 +#, python-format +msgid "Couldn't get IP, using 127.0.0.1 %s" +msgstr "不能获取IP,将使用 127.0.0.1 %s" + +#: nova/utils.py:289 +#, python-format +msgid "Invalid backend: %s" +msgstr "无效的后台:%s" + +#: nova/utils.py:300 +#, python-format +msgid "backend %s" +msgstr "后台 %s" + +#: nova/api/ec2/__init__.py:133 +msgid "Too many failed authentications." +msgstr "较多失败的认证" + +#: nova/api/ec2/__init__.py:142 +#, python-format +msgid "" +"Access key %s has had %d failed authentications and will be locked out for " +"%d minutes." +msgstr "访问键 %s时,存在%d个失败的认证,将于%d分钟后解锁" + +#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#, python-format +msgid "Authentication Failure: %s" +msgstr "认证失败:%s" + +#: nova/api/ec2/__init__.py:190 +#, python-format +msgid "Authenticated Request For %s:%s)" +msgstr "为%s:%s申请认证" + +#: nova/api/ec2/__init__.py:227 +#, python-format +msgid "action: %s" +msgstr "执行: %s" + +#: nova/api/ec2/__init__.py:229 +#, python-format +msgid "arg: %s\t\tval: %s" +msgstr "键为: %s\t\t值为: %s" + +#: nova/api/ec2/__init__.py:301 +#, python-format +msgid "Unauthorized request for controller=%s and action=%s" +msgstr "对于控制器=%s和执行=%s的请求,未审核" + +#: nova/api/ec2/__init__.py:339 +#, python-format +msgid "NotFound raised: %s" +msgstr "引起没有找到的错误: %s" + +#: nova/api/ec2/__init__.py:342 +#, python-format +msgid "ApiError raised: %s" +msgstr "引发了Api错误: %s" + +#: nova/api/ec2/__init__.py:349 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "引发了未知的错误: %s" + +#: nova/api/ec2/__init__.py:354 +msgid "An unknown error has occurred. Please try your request again." +msgstr "发生了一个未知的错误. 请重试你的请求." + +#: nova/api/ec2/admin.py:84 +#, python-format +msgid "Creating new user: %s" +msgstr "创建新用户: %s" + +#: nova/api/ec2/admin.py:92 +#, python-format +msgid "Deleting user: %s" +msgstr "删除用户: %s" + +#: nova/api/ec2/admin.py:114 +#, python-format +msgid "Adding role %s to user %s for project %s" +msgstr "增加角色 %s给用户 %s,在工程 %s中" + +#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#, python-format +msgid "Adding sitewide role %s to user %s" +msgstr "增加站点范围的 %s角色给用户 %s" + +#: nova/api/ec2/admin.py:122 +#, python-format +msgid "Removing role %s from user %s for project %s" +msgstr "移除角色 %s从用户 %s中,在工程 %s" + +#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#, python-format +msgid "Removing sitewide role %s from user %s" +msgstr "移除站点范围的 %s角色从用户 %s中" + +#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 +msgid "operation must be add or remove" +msgstr "操作必须为增加或删除" + +#: nova/api/ec2/admin.py:142 +#, python-format +msgid "Getting x509 for user: %s on project: %s" +msgstr "为用户 %s从工程%s中获取 x509" + +#: nova/api/ec2/admin.py:159 +#, python-format +msgid "Create project %s managed by %s" +msgstr "创建工程%s,此工程由%s管理" + +#: nova/api/ec2/admin.py:170 +#, python-format +msgid "Delete project: %s" +msgstr "删除工程%s" + +#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#, python-format +msgid "Adding user %s to project %s" +msgstr "增加用户%s到%s工程" + +#: nova/api/ec2/admin.py:188 +#, python-format +msgid "Removing user %s from project %s" +msgstr "移除用户%s从工程%s中" + +#: nova/api/ec2/apirequest.py:95 +#, python-format +msgid "Unsupported API request: controller = %s,action = %s" +msgstr "不支持的API请求: 控制器 = %s,执行 = %s" + +#: nova/api/ec2/cloud.py:117 +#, python-format +msgid "Generating root CA: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:277 +#, python-format +msgid "Create key pair %s" +msgstr "创建键值对 %s" + +#: nova/api/ec2/cloud.py:285 +#, python-format +msgid "Delete key pair %s" +msgstr "" + +#: nova/api/ec2/cloud.py:357 +#, python-format +msgid "%s is not a valid ipProtocol" +msgstr "" + +#: nova/api/ec2/cloud.py:361 +msgid "Invalid port range" +msgstr "" + +#: nova/api/ec2/cloud.py:392 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 +msgid "No rule for the specified parameters." +msgstr "" + +#: nova/api/ec2/cloud.py:421 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: nova/api/ec2/cloud.py:432 +#, python-format +msgid "This rule already exists in group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:460 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:463 +#, python-format +msgid "group %s already exists" +msgstr "" + +#: nova/api/ec2/cloud.py:475 +#, python-format +msgid "Delete security group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 +#, python-format +msgid "Get console output for instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:543 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: nova/api/ec2/cloud.py:567 +#, python-format +msgid "Attach volume %s to instacne %s at %s" +msgstr "" + +#: nova/api/ec2/cloud.py:579 +#, python-format +msgid "Detach volume %s" +msgstr "" + +#: nova/api/ec2/cloud.py:686 +msgid "Allocate address" +msgstr "" + +#: nova/api/ec2/cloud.py:691 +#, python-format +msgid "Release address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:696 +#, python-format +msgid "Associate address %s to instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:703 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:730 +msgid "Going to start terminating instances" +msgstr "" + +#: nova/api/ec2/cloud.py:738 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: nova/api/ec2/cloud.py:775 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: nova/api/ec2/cloud.py:783 +#, python-format +msgid "Registered image %s with id %s" +msgstr "" + +#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#, python-format +msgid "attribute not supported: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:794 +#, python-format +msgid "invalid id: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:807 +msgid "user or group not specified" +msgstr "" + +#: nova/api/ec2/cloud.py:809 +msgid "only group \"all\" is supported" +msgstr "" + +#: nova/api/ec2/cloud.py:811 +msgid "operation_type must be add or remove" +msgstr "" + +#: nova/api/ec2/cloud.py:812 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: nova/api/ec2/metadatarequesthandler.py:75 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:70 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:86 +msgid "Including admin operations in API." +msgstr "" + +#: nova/api/openstack/servers.py:184 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:199 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: nova/api/openstack/servers.py:213 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:224 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: nova/api/openstack/servers.py:235 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: nova/api/openstack/servers.py:246 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: nova/api/openstack/servers.py:257 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" +msgstr "" + +#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "" + +#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#, python-format +msgid "Project can't be created because project %s already exists" +msgstr "" + +#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#, python-format +msgid "Project can't be modified because manager %s doesn't exist" +msgstr "" + +#: nova/auth/dbdriver.py:245 +#, python-format +msgid "User \"%s\" not found" +msgstr "" + +#: nova/auth/dbdriver.py:248 +#, python-format +msgid "Project \"%s\" not found" +msgstr "" + +#: nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: nova/auth/ldapdriver.py:181 +#, python-format +msgid "LDAP object for %s doesn't exist" +msgstr "" + +#: nova/auth/ldapdriver.py:218 +#, python-format +msgid "Project can't be created because user %s doesn't exist" +msgstr "" + +#: nova/auth/ldapdriver.py:478 +#, python-format +msgid "User %s is already a member of the group %s" +msgstr "" + +#: nova/auth/ldapdriver.py:507 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: nova/auth/ldapdriver.py:528 +#, python-format +msgid "Group at dn %s doesn't exist" +msgstr "" + +#: nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" +msgstr "" + +#: nova/auth/manager.py:270 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: nova/auth/manager.py:275 +#, python-format +msgid "failed authorization: no project named %s (user=%s)" +msgstr "" + +#: nova/auth/manager.py:277 +#, python-format +msgid "No project called %s could be found" +msgstr "" + +#: nova/auth/manager.py:281 +#, python-format +msgid "Failed authorization: user %s not admin and not member of project %s" +msgstr "" + +#: nova/auth/manager.py:283 +#, python-format +msgid "User %s is not a member of project %s" +msgstr "" + +#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: nova/auth/manager.py:293 nova/auth/manager.py:304 +msgid "Signature does not match" +msgstr "" + +#: nova/auth/manager.py:374 +msgid "Must specify project" +msgstr "" + +#: nova/auth/manager.py:408 +#, python-format +msgid "The %s role can not be found" +msgstr "" + +#: nova/auth/manager.py:410 +#, python-format +msgid "The %s role is global only" +msgstr "" + +#: nova/auth/manager.py:412 +#, python-format +msgid "Adding role %s to user %s in project %s" +msgstr "" + +#: nova/auth/manager.py:438 +#, python-format +msgid "Removing role %s from user %s on project %s" +msgstr "" + +#: nova/auth/manager.py:505 +#, python-format +msgid "Created project %s with manager %s" +msgstr "" + +#: nova/auth/manager.py:523 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: nova/auth/manager.py:553 +#, python-format +msgid "Remove user %s from project %s" +msgstr "" + +#: nova/auth/manager.py:581 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: nova/auth/manager.py:637 +#, python-format +msgid "Created user %s (admin: %r)" +msgstr "" + +#: nova/auth/manager.py:645 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: nova/auth/manager.py:655 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:657 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:659 +#, python-format +msgid "Admin status set to %r for user %s" +msgstr "" + +#: nova/auth/manager.py:708 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "" + +#: nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:97 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: nova/compute/api.py:67 +#, python-format +msgid "Instance %d was not found in get_network_topic" +msgstr "" + +#: nova/compute/api.py:73 +#, python-format +msgid "Instance %d has no host" +msgstr "" + +#: nova/compute/api.py:92 +#, python-format +msgid "Quota exceeeded for %s, tried to run %s instances" +msgstr "" + +#: nova/compute/api.py:94 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." +msgstr "" + +#: nova/compute/api.py:109 +msgid "Creating a raw instance" +msgstr "" + +#: nova/compute/api.py:156 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: nova/compute/api.py:180 +#, python-format +msgid "Casting to scheduler for %s/%s's instance %s" +msgstr "" + +#: nova/compute/api.py:279 +#, python-format +msgid "Going to try and terminate %s" +msgstr "" + +#: nova/compute/api.py:283 +#, python-format +msgid "Instance %d was not found during terminate" +msgstr "" + +#: nova/compute/api.py:288 +#, python-format +msgid "Instance %d is already being terminated" +msgstr "" + +#: nova/compute/api.py:450 +#, python-format +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "" + +#: nova/compute/api.py:465 +msgid "Volume isn't attached to anything!" +msgstr "" + +#: nova/compute/disk.py:71 +#, python-format +msgid "Input partition size not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:75 +#, python-format +msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:128 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: nova/compute/disk.py:136 +#, python-format +msgid "Failed to load partition: %s" +msgstr "" + +#: nova/compute/disk.py:158 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "" + +#: nova/compute/manager.py:69 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: nova/compute/manager.py:71 +#, python-format +msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +msgstr "" + +#: nova/compute/manager.py:75 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: nova/compute/manager.py:77 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: nova/compute/manager.py:82 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: nova/compute/manager.py:86 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: nova/compute/manager.py:157 +msgid "Instance has already been created" +msgstr "" + +#: nova/compute/manager.py:158 +#, python-format +msgid "instance %s: starting..." +msgstr "" + +#: nova/compute/manager.py:197 +#, python-format +msgid "instance %s: Failed to spawn" +msgstr "" + +#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 +#, python-format +msgid "Terminating instance %s" +msgstr "" + +#: nova/compute/manager.py:217 +#, python-format +msgid "Disassociating address %s" +msgstr "" + +#: nova/compute/manager.py:230 +#, python-format +msgid "Deallocating address %s" +msgstr "" + +#: nova/compute/manager.py:243 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "" + +#: nova/compute/manager.py:257 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: nova/compute/manager.py:260 +#, python-format +msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:286 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: nova/compute/manager.py:289 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:301 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: nova/compute/manager.py:316 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: nova/compute/manager.py:335 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: nova/compute/manager.py:352 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: nova/compute/manager.py:369 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: nova/compute/manager.py:382 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: nova/compute/manager.py:401 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: nova/compute/manager.py:420 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: nova/compute/manager.py:432 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: nova/compute/manager.py:442 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: nova/compute/manager.py:462 +#, python-format +msgid "instance %s: attaching volume %s to %s" +msgstr "" + +#: nova/compute/manager.py:478 +#, python-format +msgid "instance %s: attach failed %s, removing" +msgstr "" + +#: nova/compute/manager.py:493 +#, python-format +msgid "Detach volume %s from mountpoint %s on instance %s" +msgstr "" + +#: nova/compute/manager.py:497 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: nova/compute/monitor.py:259 +#, python-format +msgid "updating %s..." +msgstr "" + +#: nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "" + +#: nova/compute/monitor.py:355 +#, python-format +msgid "Cannot get blockstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:377 +#, python-format +msgid "Cannot get ifstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:412 +msgid "unexpected exception getting connection" +msgstr "" + +#: nova/compute/monitor.py:427 +#, python-format +msgid "Found instance: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:43 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: nova/db/sqlalchemy/api.py:132 +#, python-format +msgid "No service for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:229 +#, python-format +msgid "No service for %s, %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:574 +#, python-format +msgid "No floating ip for address %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:668 +#, python-format +msgid "No instance for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 +#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#, python-format +msgid "Instance %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:891 +#, python-format +msgid "no keypair for user %s, name %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 +#, python-format +msgid "No network for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1036 +#, python-format +msgid "No network for bridge %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1050 +#, python-format +msgid "No network for instance %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1180 +#, python-format +msgid "Token %s does not exist" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1205 +#, python-format +msgid "No quota for project_id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1356 +#, python-format +msgid "No volume for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1401 +#, python-format +msgid "Volume %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1413 +#, python-format +msgid "No export device found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1426 +#, python-format +msgid "No target id found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1471 +#, python-format +msgid "No security group with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1488 +#, python-format +msgid "No security group named %s for project: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1576 +#, python-format +msgid "No secuity group rule with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1650 +#, python-format +msgid "No user for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1666 +#, python-format +msgid "No user for access key %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1728 +#, python-format +msgid "No project with id %s" +msgstr "" + +#: nova/image/glance.py:78 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images" +msgstr "" + +#: nova/image/glance.py:97 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images/detail" +msgstr "" + +#: nova/image/s3.py:82 +#, python-format +msgid "Image %s could not be found" +msgstr "" + +#: nova/network/api.py:39 +#, python-format +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "" + +#: nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" + +#: nova/network/linux_net.py:176 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: nova/network/linux_net.py:186 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#: nova/network/linux_net.py:254 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: nova/network/linux_net.py:256 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: nova/network/linux_net.py:334 +#, python-format +msgid "Killing dnsmasq threw %s" +msgstr "" + +#: nova/network/manager.py:135 +msgid "setting network host" +msgstr "" + +#: nova/network/manager.py:190 +#, python-format +msgid "Leasing IP %s" +msgstr "" + +#: nova/network/manager.py:194 +#, python-format +msgid "IP %s leased that isn't associated" +msgstr "" + +#: nova/network/manager.py:197 +#, python-format +msgid "IP %s leased to bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:205 +#, python-format +msgid "IP %s leased that was already deallocated" +msgstr "" + +#: nova/network/manager.py:214 +#, python-format +msgid "IP %s released that isn't associated" +msgstr "" + +#: nova/network/manager.py:217 +#, python-format +msgid "IP %s released from bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:220 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: nova/network/manager.py:442 +#, python-format +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "" + +#: nova/objectstore/handler.py:106 +#, python-format +msgid "Unknown S3 value type %r" +msgstr "" + +#: nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "" + +#: nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "" + +#: nova/objectstore/handler.py:209 +#, python-format +msgid "List keys for bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:217 +#, python-format +msgid "Unauthorized attempt to access bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:235 +#, python-format +msgid "Creating bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:245 +#, python-format +msgid "Deleting bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:249 +#, python-format +msgid "Unauthorized attempt to delete bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:271 +#, python-format +msgid "Getting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:274 +#, python-format +msgid "Unauthorized attempt to get object %s from bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:292 +#, python-format +msgid "Putting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:295 +#, python-format +msgid "Unauthorized attempt to upload object %s to bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:314 +#, python-format +msgid "Deleting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:393 +#, python-format +msgid "Not authorized to upload image: invalid directory %s" +msgstr "" + +#: nova/objectstore/handler.py:401 +#, python-format +msgid "Not authorized to upload image: unauthorized bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:406 +#, python-format +msgid "Starting image upload: %s" +msgstr "" + +#: nova/objectstore/handler.py:420 +#, python-format +msgid "Not authorized to update attributes of image %s" +msgstr "" + +#: nova/objectstore/handler.py:428 +#, python-format +msgid "Toggling publicity flag of image %s %r" +msgstr "" + +#: nova/objectstore/handler.py:433 +#, python-format +msgid "Updating user fields on image %s" +msgstr "" + +#: nova/objectstore/handler.py:447 +#, python-format +msgid "Unauthorized attempt to delete image %s" +msgstr "" + +#: nova/objectstore/handler.py:452 +#, python-format +msgid "Deleted image: %s" +msgstr "" + +#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 +#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 +msgid "No hosts found" +msgstr "" + +#: nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "" + +#: nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %s %s for %s" +msgstr "" + +#: nova/scheduler/simple.py:63 +msgid "All hosts have too many cores" +msgstr "" + +#: nova/scheduler/simple.py:95 +msgid "All hosts have too many gigabytes" +msgstr "" + +#: nova/scheduler/simple.py:115 +msgid "All hosts have too many networks" +msgstr "" + +#: nova/tests/test_cloud.py:198 +msgid "Can't test instances without a real virtual env." +msgstr "" + +#: nova/tests/test_cloud.py:210 +#, python-format +msgid "Need to watch instance %s until it's running..." +msgstr "" + +#: nova/tests/test_compute.py:104 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: nova/tests/test_compute.py:110 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: nova/tests/test_rpc.py:89 +#, python-format +msgid "Nested received %s, %s" +msgstr "" + +#: nova/tests/test_rpc.py:94 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#, python-format +msgid "Received %s" +msgstr "" + +#: nova/tests/test_volume.py:162 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: nova/virt/fake.py:210 +#, python-format +msgid "Instance %s Not Found" +msgstr "" + +#: nova/virt/hyperv.py:118 +msgid "In init host" +msgstr "" + +#: nova/virt/hyperv.py:131 +#, python-format +msgid "Attempt to create duplicate vm %s" +msgstr "" + +#: nova/virt/hyperv.py:148 +#, python-format +msgid "Starting VM %s " +msgstr "" + +#: nova/virt/hyperv.py:150 +#, python-format +msgid "Started VM %s " +msgstr "" + +#: nova/virt/hyperv.py:152 +#, python-format +msgid "spawn vm failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:169 +#, python-format +msgid "Failed to create VM %s" +msgstr "" + +#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 +#, python-format +msgid "Created VM %s..." +msgstr "" + +#: nova/virt/hyperv.py:188 +#, python-format +msgid "Set memory for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:198 +#, python-format +msgid "Set vcpus for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:202 +#, python-format +msgid "Creating disk for %s by attaching disk file %s" +msgstr "" + +#: nova/virt/hyperv.py:227 +#, python-format +msgid "Failed to add diskdrive to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:230 +#, python-format +msgid "New disk drive path is %s" +msgstr "" + +#: nova/virt/hyperv.py:247 +#, python-format +msgid "Failed to add vhd file to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:249 +#, python-format +msgid "Created disk for %s" +msgstr "" + +#: nova/virt/hyperv.py:253 +#, python-format +msgid "Creating nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:272 +msgid "Failed creating a port on the external vswitch" +msgstr "" + +#: nova/virt/hyperv.py:273 +#, python-format +msgid "Failed creating port for %s" +msgstr "" + +#: nova/virt/hyperv.py:275 +#, python-format +msgid "Created switch port %s on switch %s" +msgstr "" + +#: nova/virt/hyperv.py:285 +#, python-format +msgid "Failed to add nic to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:287 +#, python-format +msgid "Created nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:320 +#, python-format +msgid "WMI job failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:322 +#, python-format +msgid "WMI job succeeded: %s, Elapsed=%s " +msgstr "" + +#: nova/virt/hyperv.py:358 +#, python-format +msgid "Got request to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:383 +#, python-format +msgid "Failed to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:389 +#, python-format +msgid "Del: disk %s vm %s" +msgstr "" + +#: nova/virt/hyperv.py:405 +#, python-format +msgid "" +"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " +"cpu_time=%s" +msgstr "" + +#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#, python-format +msgid "duplicate name found: %s" +msgstr "" + +#: nova/virt/hyperv.py:444 +#, python-format +msgid "Successfully changed vm state of %s to %s" +msgstr "" + +#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#, python-format +msgid "Failed to change vm state of %s to %s" +msgstr "" + +#: nova/virt/images.py:70 +#, python-format +msgid "Finished retreving %s -- placed in %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:144 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:157 +msgid "Connection to libvirt broke" +msgstr "" + +#: nova/virt/libvirt_conn.py:229 +#, python-format +msgid "instance %s: deleting instance files %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:271 +#, python-format +msgid "No disk at %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:278 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "" + +#: nova/virt/libvirt_conn.py:294 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: nova/virt/libvirt_conn.py:297 +#, python-format +msgid "_wait_for_reboot failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:340 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: nova/virt/libvirt_conn.py:343 +#, python-format +msgid "_wait_for_rescue failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:370 +#, python-format +msgid "instance %s: is running" +msgstr "" + +#: nova/virt/libvirt_conn.py:381 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 +#, python-format +msgid "instance %s: failed to boot" +msgstr "" + +#: nova/virt/libvirt_conn.py:395 +#, python-format +msgid "virsh said: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:399 +msgid "cool, it's a device" +msgstr "" + +#: nova/virt/libvirt_conn.py:407 +#, python-format +msgid "data: %r, fpath: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:415 +#, python-format +msgid "Contents of file %s: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:449 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: nova/virt/libvirt_conn.py:505 +#, python-format +msgid "instance %s: injecting key into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:508 +#, python-format +msgid "instance %s: injecting net into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:516 +#, python-format +msgid "instance %s: ignoring error injecting data into image %s (%s)" +msgstr "" + +#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: nova/virt/libvirt_conn.py:589 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: nova/virt/xenapi_conn.py:113 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" +msgstr "" + +#: nova/virt/xenapi_conn.py:263 +#, python-format +msgid "Task [%s] %s status: success %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:271 +#, python-format +msgid "Task [%s] %s status: %s %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:72 +#, python-format +msgid "%s: _db_content => %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 +#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 +msgid "Raising NotImplemented" +msgstr "" + +#: nova/virt/xenapi/fake.py:249 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:283 +#, python-format +msgid "Calling %s %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:288 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:340 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:127 +#, python-format +msgid "Created VM %s as %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:147 +#, python-format +msgid "Creating VBD for VM %s, VDI %s ... " +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:149 +#, python-format +msgid "Created VBD %s for VM %s, VDI %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:165 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:175 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:202 +#, python-format +msgid "Creating VIF for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:205 +#, python-format +msgid "Created VIF %s for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:216 +#, python-format +msgid "Snapshotting VM %s with label '%s'..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:229 +#, python-format +msgid "Created snapshot %s from VM %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:243 +#, python-format +msgid "Asking xapi to upload %s as '%s'" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:261 +#, python-format +msgid "Asking xapi to fetch %s as %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:279 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:290 +#, python-format +msgid "PV Kernel in VDI:%d" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:318 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:331 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:333 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:390 +#, python-format +msgid "VHD %s has parent %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:407 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:448 +#, python-format +msgid "No VDIs found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:452 +#, python-format +msgid "Unexpected number of VDIs (%s) found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:62 +#, python-format +msgid "Attempted to create non-unique name %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:99 +#, python-format +msgid "Starting VM %s..." +msgstr "" + +#: nova/virt/xenapi/vmops.py:101 +#, python-format +msgid "Spawning VM %s created %s." +msgstr "" + +#: nova/virt/xenapi/vmops.py:112 +#, python-format +msgid "Instance %s: booted" +msgstr "" + +#: nova/virt/xenapi/vmops.py:137 +#, python-format +msgid "Instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:166 +#, python-format +msgid "Starting snapshot for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:174 +#, python-format +msgid "Unable to Snapshot %s: %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:184 +#, python-format +msgid "Finished snapshot and upload for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:252 +#, python-format +msgid "suspend: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:262 +#, python-format +msgid "resume: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:271 +#, python-format +msgid "Instance not found %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:57 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:74 +#, python-format +msgid "Introduced %s as %s." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:90 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:96 +#, python-format +msgid "Forgetting SR %s ... " +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:101 +#, python-format +msgid "Ignoring exception %s when getting PBDs for %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:107 +#, python-format +msgid "Ignoring exception %s when unplugging PBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:111 +#, python-format +msgid "Forgetting SR %s done." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:113 +#, python-format +msgid "Ignoring exception %s when forgetting SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:123 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:128 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:146 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:175 +#, python-format +msgid "Unable to obtain target information %s, %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:197 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %s, %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Unable to use SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:95 +#, python-format +msgid "Mountpoint %s attached to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:106 +#, python-format +msgid "Detach_volume: %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:113 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:128 +#, python-format +msgid "Mountpoint %s detached from instance %s" +msgstr "" + +#: nova/volume/api.py:44 +#, python-format +msgid "Quota exceeeded for %s, tried to create %sG volume" +msgstr "" + +#: nova/volume/api.py:46 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %s" +msgstr "" + +#: nova/volume/api.py:70 nova/volume/api.py:95 +msgid "Volume status must be available" +msgstr "" + +#: nova/volume/api.py:97 +msgid "Volume is already attached" +msgstr "" + +#: nova/volume/api.py:103 +msgid "Volume is already detached" +msgstr "" + +#: nova/volume/driver.py:76 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: nova/volume/driver.py:85 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: nova/volume/driver.py:210 +#, python-format +msgid "FAKE AOE: %s" +msgstr "" + +#: nova/volume/driver.py:315 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: nova/volume/manager.py:85 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: nova/volume/manager.py:93 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: nova/volume/manager.py:102 +#, python-format +msgid "volume %s: creating lv of size %sG" +msgstr "" + +#: nova/volume/manager.py:106 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: nova/volume/manager.py:113 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: nova/volume/manager.py:121 +msgid "Volume is still attached" +msgstr "" + +#: nova/volume/manager.py:123 +msgid "Volume is not local to this node" +msgstr "" + +#: nova/volume/manager.py:124 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: nova/volume/manager.py:126 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: nova/volume/manager.py:129 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" -- cgit From eabc4c00eea8859c37efed3f180edbc41fd3b71d Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Thu, 27 Jan 2011 23:53:28 -0600 Subject: Working on api / manager / db support for zones --- bin/nova-manage | 10 ++++++++++ nova/api/openstack/__init__.py | 5 +++++ nova/db/api.py | 8 ++++++++ nova/db/sqlalchemy/api.py | 9 +++++++++ 4 files changed, 32 insertions(+) diff --git a/bin/nova-manage b/bin/nova-manage index 7835ca551..b62687aec 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -569,6 +569,15 @@ class DbCommands(object): print migration.db_version() +class ZoneCommands(object): + """Methods for defining zones.""" + + def create(self, name): + """Create a new Zone for this deployment.""" + ctxt = context.get_admin_context() + db.create_zone(ctxt, name) + + class VolumeCommands(object): """Methods for dealing with a cloud in an odd state""" @@ -620,6 +629,7 @@ CATEGORIES = [ ('service', ServiceCommands), ('log', LogCommands), ('db', DbCommands), + ('zone', ZoneCommands), ('volume', VolumeCommands)] diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index c70bb39ed..025fa12a4 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -34,6 +34,7 @@ from nova.api.openstack import flavors from nova.api.openstack import images from nova.api.openstack import servers from nova.api.openstack import shared_ip_groups +from nova.api.openstack import zones LOG = logging.getLogger('nova.api.openstack') @@ -79,6 +80,10 @@ class APIRouter(wsgi.Router): server_members["actions"] = "GET" server_members['suspend'] = 'POST' server_members['resume'] = 'POST' + + mapper.resource("zone", "zones", controller=zones.Controller(), + collection={'detail': 'GET'}, + member=zone_members) mapper.resource("server", "servers", controller=servers.Controller(), collection={'detail': 'GET'}, diff --git a/nova/db/api.py b/nova/db/api.py index 789cb8ebb..dc35f20b2 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -980,3 +980,11 @@ def console_get_all_by_instance(context, instance_id): def console_get(context, console_id, instance_id=None): """Get a specific console (possibly on a given instance).""" return IMPL.console_get(context, console_id, instance_id) + + +#################### + + +def create_zone(context, name): + """Create a new Zone entry for this deployment.""" + return IMPL.create_zone(context, name) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 895e7eabe..ec36c481e 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1996,3 +1996,12 @@ def console_get(context, console_id, instance_id=None): raise exception.NotFound(_("No console with id %(console_id)s" " %(idesc)s") % locals()) return result + + +################## + + +@require_admin_context +def create_zone(context, zone): + session = get_session() + print "Creating Zone", zone -- cgit From c021c985660aa37861b6c01bba9db914f349d13d Mon Sep 17 00:00:00 2001 From: Launchpad Translations on behalf of nova-core <> Date: Tue, 1 Feb 2011 05:19:59 +0000 Subject: Launchpad automatic translations update. --- locale/ru.po | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/locale/ru.po b/locale/ru.po index c751f41b2..6a75c1727 100644 --- a/locale/ru.po +++ b/locale/ru.po @@ -8,13 +8,13 @@ msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: FULL NAME \n" "POT-Creation-Date: 2011-01-10 11:25-0800\n" -"PO-Revision-Date: 2011-01-25 17:45+0000\n" -"Last-Translator: Ilya Alekseyev \n" +"PO-Revision-Date: 2011-01-31 06:53+0000\n" +"Last-Translator: Andrey Olykainen \n" "Language-Team: Russian \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-01-28 05:20+0000\n" +"X-Launchpad-Export-Date: 2011-02-01 05:19+0000\n" "X-Generator: Launchpad (build 12177)\n" #: nova/crypto.py:46 @@ -31,7 +31,7 @@ msgstr "" #: nova/crypto.py:53 msgid "Where we keep our keys" -msgstr "" +msgstr "Путь к ключам" #: nova/crypto.py:55 msgid "Where we keep our root CA" @@ -112,7 +112,7 @@ msgstr "" #: nova/fakerabbit.py:120 #, python-format msgid "Getting from %s: %s" -msgstr "" +msgstr "Получение из %s: %s" #: nova/rpc.py:92 #, python-format @@ -174,7 +174,7 @@ msgstr "MSG_ID is %s" #: nova/rpc.py:356 #, python-format msgid "response %s" -msgstr "" +msgstr "ответ %s" #: nova/rpc.py:365 #, python-format @@ -264,7 +264,7 @@ msgstr "" #: nova/utils.py:176 #, python-format msgid "Running %s" -msgstr "" +msgstr "Выполняется %s" #: nova/utils.py:207 #, python-format @@ -291,16 +291,18 @@ msgid "" "Access key %s has had %d failed authentications and will be locked out for " "%d minutes." msgstr "" +"Ключ доступа %s имеет %d неудачных попыток аутентификации и будет " +"заблокирован на %d минут." #: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 #, python-format msgid "Authentication Failure: %s" -msgstr "" +msgstr "Ошибка аутентификации: %s" #: nova/api/ec2/__init__.py:190 #, python-format msgid "Authenticated Request For %s:%s)" -msgstr "" +msgstr "Запрос аутентификации для %s:%s)" #: nova/api/ec2/__init__.py:227 #, python-format @@ -547,7 +549,7 @@ msgstr "" #: nova/api/ec2/metadatarequesthandler.py:75 #, python-format msgid "Failed to get metadata for ip: %s" -msgstr "" +msgstr "Ошибка получения метаданных для ip: %s" #: nova/api/openstack/__init__.py:70 #, python-format -- cgit From 620eba09a96f25a059249c23a5e73efd18aaf89a Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Tue, 1 Feb 2011 14:11:21 -0600 Subject: forgot context param for network_get_all --- nova/virt/xenapi/vmops.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 68fa1ecd6..da2e5c672 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -95,9 +95,10 @@ class VMOps(object): VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True) # write network info - network = db.network_get_by_instance(context.get_admin_context(), + admin_context = context.get_admin_context() + network = db.network_get_by_instance(admin_context, instance['id']) - for network in db.network_get_all(): + for network in db.network_get_all(admin_context): mapping = {'label': network['label'], 'gateway': network['gateway'], 'mac': instance.mac_address, -- cgit From 0e6b1c02b3ae82526f3cf83ce70213e7a107701d Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Tue, 1 Feb 2011 15:41:53 -0600 Subject: added to inject networking data into the xenstore --- nova/virt/xenapi/vmops.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index da2e5c672..6edeae5c0 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -99,14 +99,16 @@ class VMOps(object): network = db.network_get_by_instance(admin_context, instance['id']) for network in db.network_get_all(admin_context): + mac_id = instance.mac_address.replace(':', '') + location = 'vm-data/networking/%s' % mac_id mapping = {'label': network['label'], 'gateway': network['gateway'], 'mac': instance.mac_address, - 'dns': network['dns'], + 'dns': [network['dns']], 'ips': [{'netmask': network['netmask'], 'enabled': '1', 'ip': '192.168.3.3'}]} # <===== CHANGE!!!! - self.write_network_config_to_xenstore(vm_ref, mapping) + self.write_to_param_xenstore(vm_ref, {location: mapping}) bridge = network['bridge'] network_ref = \ @@ -392,10 +394,6 @@ class VMOps(object): args = {'id': str(uuid.uuid4())} resp = self._make_agent_call('resetnetwork', vm, '', args) - def write_network_config_to_xenstore(self, instance, mapping): - vm = self._get_vm_opaque_ref(instance) - self.write_to_param_xenstore(vm, mapping) - def list_from_xenstore(self, vm, path): """Runs the xenstore-ls command to get a listing of all records from 'path' downward. Returns a dict with the sub-paths as keys, -- cgit From 60891ed6f3a978ce77575e8710b695aa9828adcc Mon Sep 17 00:00:00 2001 From: Launchpad Translations on behalf of nova-core <> Date: Fri, 4 Feb 2011 05:31:40 +0000 Subject: Launchpad automatic translations update. --- locale/ast.po | 2 +- locale/da.po | 2 +- locale/es.po | 2 +- locale/it.po | 2 +- locale/ja.po | 2 +- locale/pt_BR.po | 16 ++++++++-------- locale/ru.po | 2 +- locale/uk.po | 25 +++++++++++++++---------- locale/zh_CN.po | 2 +- 9 files changed, 30 insertions(+), 25 deletions(-) diff --git a/locale/ast.po b/locale/ast.po index c887bbc91..310f299be 100644 --- a/locale/ast.po +++ b/locale/ast.po @@ -14,7 +14,7 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-01-28 05:20+0000\n" +"X-Launchpad-Export-Date: 2011-02-04 05:31+0000\n" "X-Generator: Launchpad (build 12177)\n" #: nova/crypto.py:46 diff --git a/locale/da.po b/locale/da.po index 524b27a64..ac1b593b0 100644 --- a/locale/da.po +++ b/locale/da.po @@ -14,7 +14,7 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-01-28 05:20+0000\n" +"X-Launchpad-Export-Date: 2011-02-04 05:31+0000\n" "X-Generator: Launchpad (build 12177)\n" #: nova/crypto.py:46 diff --git a/locale/es.po b/locale/es.po index a1cf5b7f6..28f10c481 100644 --- a/locale/es.po +++ b/locale/es.po @@ -14,7 +14,7 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-01-28 05:20+0000\n" +"X-Launchpad-Export-Date: 2011-02-04 05:31+0000\n" "X-Generator: Launchpad (build 12177)\n" #: nova/crypto.py:46 diff --git a/locale/it.po b/locale/it.po index f2f6a6b87..f08497bac 100644 --- a/locale/it.po +++ b/locale/it.po @@ -14,7 +14,7 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-01-28 05:20+0000\n" +"X-Launchpad-Export-Date: 2011-02-04 05:31+0000\n" "X-Generator: Launchpad (build 12177)\n" #: nova/crypto.py:46 diff --git a/locale/ja.po b/locale/ja.po index 919625e9a..b11bb67b0 100644 --- a/locale/ja.po +++ b/locale/ja.po @@ -14,7 +14,7 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-01-28 05:20+0000\n" +"X-Launchpad-Export-Date: 2011-02-04 05:31+0000\n" "X-Generator: Launchpad (build 12177)\n" #: nova/crypto.py:46 diff --git a/locale/pt_BR.po b/locale/pt_BR.po index a58ccc182..c778bb631 100644 --- a/locale/pt_BR.po +++ b/locale/pt_BR.po @@ -8,13 +8,13 @@ msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: FULL NAME \n" "POT-Creation-Date: 2011-01-10 11:25-0800\n" -"PO-Revision-Date: 2011-01-13 18:44+0000\n" -"Last-Translator: Gustavo Morozowski \n" +"PO-Revision-Date: 2011-02-03 20:32+0000\n" +"Last-Translator: André Gondim \n" "Language-Team: Brazilian Portuguese \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-01-28 05:21+0000\n" +"X-Launchpad-Export-Date: 2011-02-04 05:31+0000\n" "X-Generator: Launchpad (build 12177)\n" #: nova/crypto.py:46 @@ -60,7 +60,7 @@ msgstr "Sujeito do certificado para vpns, %s para projeto, timestamp" #: nova/crypto.py:258 #, python-format msgid "Flags path: %s" -msgstr "" +msgstr "Caminho da sinalização: %s" #: nova/exception.py:33 msgid "Unexpected error while running command." @@ -103,7 +103,7 @@ msgstr "Declarando fila %s" #: nova/fakerabbit.py:89 #, python-format msgid "Declaring exchange %s" -msgstr "" +msgstr "Declarando troca %s" #: nova/fakerabbit.py:95 #, python-format @@ -432,7 +432,7 @@ msgstr "%s não é um ipProtocol válido" #: nova/api/ec2/cloud.py:361 msgid "Invalid port range" -msgstr "" +msgstr "Intervalo de porta inválido" #: nova/api/ec2/cloud.py:392 #, python-format @@ -767,7 +767,7 @@ msgstr "Criado usuário %s (administrador: %r)" #: nova/auth/manager.py:645 #, python-format msgid "Deleting user %s" -msgstr "" +msgstr "Apagando usuário %s" #: nova/auth/manager.py:655 #, python-format @@ -804,7 +804,7 @@ msgstr "" #: nova/cloudpipe/pipelib.py:97 #, python-format msgid "Launching VPN for %s" -msgstr "" +msgstr "Executando VPN para %s" #: nova/compute/api.py:67 #, python-format diff --git a/locale/ru.po b/locale/ru.po index 6a75c1727..fc97d5603 100644 --- a/locale/ru.po +++ b/locale/ru.po @@ -14,7 +14,7 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-02-01 05:19+0000\n" +"X-Launchpad-Export-Date: 2011-02-04 05:31+0000\n" "X-Generator: Launchpad (build 12177)\n" #: nova/crypto.py:46 diff --git a/locale/uk.po b/locale/uk.po index cdbffd130..be2371dbc 100644 --- a/locale/uk.po +++ b/locale/uk.po @@ -8,13 +8,13 @@ msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: FULL NAME \n" "POT-Creation-Date: 2011-01-10 11:25-0800\n" -"PO-Revision-Date: 2011-01-13 07:03+0000\n" +"PO-Revision-Date: 2011-02-03 22:02+0000\n" "Last-Translator: Wladimir Rossinski \n" "Language-Team: Ukrainian \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-01-28 05:20+0000\n" +"X-Launchpad-Export-Date: 2011-02-04 05:31+0000\n" "X-Generator: Launchpad (build 12177)\n" #: nova/crypto.py:46 @@ -23,7 +23,7 @@ msgstr "" #: nova/crypto.py:49 msgid "Filename of private key" -msgstr "" +msgstr "Ім'я файлу секретного ключа" #: nova/crypto.py:51 msgid "Filename of root Certificate Revokation List" @@ -31,7 +31,7 @@ msgstr "" #: nova/crypto.py:53 msgid "Where we keep our keys" -msgstr "" +msgstr "Шлях до збережених ключів" #: nova/crypto.py:55 msgid "Where we keep our root CA" @@ -63,7 +63,7 @@ msgstr "" #: nova/exception.py:33 msgid "Unexpected error while running command." -msgstr "" +msgstr "Неочікувана помилка при виконанні команди." #: nova/exception.py:36 #, python-format @@ -74,10 +74,15 @@ msgid "" "Stdout: %r\n" "Stderr: %r" msgstr "" +"%s\n" +"Команда: %s\n" +"Код завершення: %s\n" +"Stdout: %r\n" +"Stderr: %r" #: nova/exception.py:86 msgid "Uncaught exception" -msgstr "" +msgstr "Необроблене виключення" #: nova/fakerabbit.py:48 #, python-format @@ -92,12 +97,12 @@ msgstr "" #: nova/fakerabbit.py:83 #, python-format msgid "Declaring queue %s" -msgstr "" +msgstr "Оголошення черги %s" #: nova/fakerabbit.py:89 #, python-format msgid "Declaring exchange %s" -msgstr "" +msgstr "Оголошення точки обміну %s" #: nova/fakerabbit.py:95 #, python-format @@ -107,7 +112,7 @@ msgstr "" #: nova/fakerabbit.py:120 #, python-format msgid "Getting from %s: %s" -msgstr "" +msgstr "Отримання з %s: %s" #: nova/rpc.py:92 #, python-format @@ -121,7 +126,7 @@ msgstr "Не вдалось під'єднатися до серверу AMQP п #: nova/rpc.py:118 msgid "Reconnected to queue" -msgstr "" +msgstr "Оновлено з'єднання до черги" #: nova/rpc.py:125 msgid "Failed to fetch message from queue" diff --git a/locale/zh_CN.po b/locale/zh_CN.po index 4805eb327..feee54cfe 100644 --- a/locale/zh_CN.po +++ b/locale/zh_CN.po @@ -14,7 +14,7 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-01-28 05:21+0000\n" +"X-Launchpad-Export-Date: 2011-02-04 05:31+0000\n" "X-Generator: Launchpad (build 12177)\n" #: nova/twistd.py:268 -- cgit From e283bd21babc245f691e3ca394c5c2b2484a4022 Mon Sep 17 00:00:00 2001 From: Launchpad Translations on behalf of nova-core <> Date: Sat, 5 Feb 2011 05:36:48 +0000 Subject: Launchpad automatic translations update. --- locale/ast.po | 2 +- locale/da.po | 2 +- locale/es.po | 2 +- locale/it.po | 2 +- locale/ja.po | 2 +- locale/pt_BR.po | 2 +- locale/ru.po | 2 +- locale/uk.po | 2 +- locale/zh_CN.po | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/locale/ast.po b/locale/ast.po index 310f299be..6e224f235 100644 --- a/locale/ast.po +++ b/locale/ast.po @@ -14,7 +14,7 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-02-04 05:31+0000\n" +"X-Launchpad-Export-Date: 2011-02-05 05:36+0000\n" "X-Generator: Launchpad (build 12177)\n" #: nova/crypto.py:46 diff --git a/locale/da.po b/locale/da.po index ac1b593b0..f845f11b0 100644 --- a/locale/da.po +++ b/locale/da.po @@ -14,7 +14,7 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-02-04 05:31+0000\n" +"X-Launchpad-Export-Date: 2011-02-05 05:36+0000\n" "X-Generator: Launchpad (build 12177)\n" #: nova/crypto.py:46 diff --git a/locale/es.po b/locale/es.po index 28f10c481..8d4f90b26 100644 --- a/locale/es.po +++ b/locale/es.po @@ -14,7 +14,7 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-02-04 05:31+0000\n" +"X-Launchpad-Export-Date: 2011-02-05 05:36+0000\n" "X-Generator: Launchpad (build 12177)\n" #: nova/crypto.py:46 diff --git a/locale/it.po b/locale/it.po index f08497bac..3f439f9dd 100644 --- a/locale/it.po +++ b/locale/it.po @@ -14,7 +14,7 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-02-04 05:31+0000\n" +"X-Launchpad-Export-Date: 2011-02-05 05:36+0000\n" "X-Generator: Launchpad (build 12177)\n" #: nova/crypto.py:46 diff --git a/locale/ja.po b/locale/ja.po index b11bb67b0..2cea24640 100644 --- a/locale/ja.po +++ b/locale/ja.po @@ -14,7 +14,7 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-02-04 05:31+0000\n" +"X-Launchpad-Export-Date: 2011-02-05 05:36+0000\n" "X-Generator: Launchpad (build 12177)\n" #: nova/crypto.py:46 diff --git a/locale/pt_BR.po b/locale/pt_BR.po index c778bb631..e57f7304a 100644 --- a/locale/pt_BR.po +++ b/locale/pt_BR.po @@ -14,7 +14,7 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-02-04 05:31+0000\n" +"X-Launchpad-Export-Date: 2011-02-05 05:36+0000\n" "X-Generator: Launchpad (build 12177)\n" #: nova/crypto.py:46 diff --git a/locale/ru.po b/locale/ru.po index fc97d5603..5d031ac08 100644 --- a/locale/ru.po +++ b/locale/ru.po @@ -14,7 +14,7 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-02-04 05:31+0000\n" +"X-Launchpad-Export-Date: 2011-02-05 05:36+0000\n" "X-Generator: Launchpad (build 12177)\n" #: nova/crypto.py:46 diff --git a/locale/uk.po b/locale/uk.po index be2371dbc..f3e217690 100644 --- a/locale/uk.po +++ b/locale/uk.po @@ -14,7 +14,7 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-02-04 05:31+0000\n" +"X-Launchpad-Export-Date: 2011-02-05 05:36+0000\n" "X-Generator: Launchpad (build 12177)\n" #: nova/crypto.py:46 diff --git a/locale/zh_CN.po b/locale/zh_CN.po index feee54cfe..6bc231e50 100644 --- a/locale/zh_CN.po +++ b/locale/zh_CN.po @@ -14,7 +14,7 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-02-04 05:31+0000\n" +"X-Launchpad-Export-Date: 2011-02-05 05:36+0000\n" "X-Generator: Launchpad (build 12177)\n" #: nova/twistd.py:268 -- cgit From 96ea3dd3db826440a7b52d32fa1663c17aa8394e Mon Sep 17 00:00:00 2001 From: Launchpad Translations on behalf of nova-core <> Date: Mon, 7 Feb 2011 05:52:49 +0000 Subject: Launchpad automatic translations update. --- locale/cs.po | 2130 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 2130 insertions(+) create mode 100644 locale/cs.po diff --git a/locale/cs.po b/locale/cs.po new file mode 100644 index 000000000..b9403687a --- /dev/null +++ b/locale/cs.po @@ -0,0 +1,2130 @@ +# Czech translation for nova +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the nova package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: nova\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2011-01-10 11:25-0800\n" +"PO-Revision-Date: 2011-02-07 04:36+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Czech \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Launchpad-Export-Date: 2011-02-07 05:52+0000\n" +"X-Generator: Launchpad (build 12177)\n" + +#: nova/crypto.py:46 +msgid "Filename of root CA" +msgstr "" + +#: nova/crypto.py:49 +msgid "Filename of private key" +msgstr "" + +#: nova/crypto.py:51 +msgid "Filename of root Certificate Revokation List" +msgstr "" + +#: nova/crypto.py:53 +msgid "Where we keep our keys" +msgstr "" + +#: nova/crypto.py:55 +msgid "Where we keep our root CA" +msgstr "" + +#: nova/crypto.py:57 +msgid "Should we use a CA for each project?" +msgstr "" + +#: nova/crypto.py:61 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" + +#: nova/crypto.py:66 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "" + +#: nova/crypto.py:71 +#, python-format +msgid "Subject for certificate for vpns, %s for project, timestamp" +msgstr "" + +#: nova/crypto.py:258 +#, python-format +msgid "Flags path: %s" +msgstr "" + +#: nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "" + +#: nova/exception.py:36 +#, python-format +msgid "" +"%s\n" +"Command: %s\n" +"Exit code: %s\n" +"Stdout: %r\n" +"Stderr: %r" +msgstr "" + +#: nova/exception.py:86 +msgid "Uncaught exception" +msgstr "" + +#: nova/fakerabbit.py:48 +#, python-format +msgid "(%s) publish (key: %s) %s" +msgstr "" + +#: nova/fakerabbit.py:53 +#, python-format +msgid "Publishing to route %s" +msgstr "" + +#: nova/fakerabbit.py:83 +#, python-format +msgid "Declaring queue %s" +msgstr "" + +#: nova/fakerabbit.py:89 +#, python-format +msgid "Declaring exchange %s" +msgstr "" + +#: nova/fakerabbit.py:95 +#, python-format +msgid "Binding %s to %s with key %s" +msgstr "" + +#: nova/fakerabbit.py:120 +#, python-format +msgid "Getting from %s: %s" +msgstr "" + +#: nova/rpc.py:92 +#, python-format +msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +msgstr "" + +#: nova/rpc.py:99 +#, python-format +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "" + +#: nova/rpc.py:118 +msgid "Reconnected to queue" +msgstr "" + +#: nova/rpc.py:125 +msgid "Failed to fetch message from queue" +msgstr "" + +#: nova/rpc.py:155 +#, python-format +msgid "Initing the Adapter Consumer for %s" +msgstr "" + +#: nova/rpc.py:170 +#, python-format +msgid "received %s" +msgstr "" + +#: nova/rpc.py:183 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: nova/rpc.py:184 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: nova/rpc.py:245 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: nova/rpc.py:286 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: nova/rpc.py:305 +msgid "Making asynchronous call..." +msgstr "" + +#: nova/rpc.py:308 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: nova/rpc.py:356 +#, python-format +msgid "response %s" +msgstr "" + +#: nova/rpc.py:365 +#, python-format +msgid "topic is %s" +msgstr "" + +#: nova/rpc.py:366 +#, python-format +msgid "message %s" +msgstr "" + +#: nova/service.py:157 +#, python-format +msgid "Starting %s node" +msgstr "" + +#: nova/service.py:169 +msgid "Service killed that has no database entry" +msgstr "" + +#: nova/service.py:190 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: nova/service.py:202 +msgid "Recovered model server connection!" +msgstr "" + +#: nova/service.py:208 +msgid "model server went away" +msgstr "" + +#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#, python-format +msgid "Data store %s is unreachable. Trying again in %d seconds." +msgstr "" + +#: nova/service.py:232 nova/twistd.py:232 +#, python-format +msgid "Serving %s" +msgstr "" + +#: nova/service.py:234 nova/twistd.py:264 +msgid "Full set of FLAGS:" +msgstr "" + +#: nova/twistd.py:211 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "" + +#: nova/twistd.py:268 +#, python-format +msgid "Starting %s" +msgstr "" + +#: nova/utils.py:53 +#, python-format +msgid "Inner Exception: %s" +msgstr "" + +#: nova/utils.py:54 +#, python-format +msgid "Class %s cannot be found" +msgstr "" + +#: nova/utils.py:113 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: nova/utils.py:125 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: nova/utils.py:138 +#, python-format +msgid "Result was %s" +msgstr "" + +#: nova/utils.py:171 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: nova/utils.py:176 +#, python-format +msgid "Running %s" +msgstr "" + +#: nova/utils.py:207 +#, python-format +msgid "Couldn't get IP, using 127.0.0.1 %s" +msgstr "" + +#: nova/utils.py:289 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: nova/utils.py:300 +#, python-format +msgid "backend %s" +msgstr "" + +#: nova/api/ec2/__init__.py:133 +msgid "Too many failed authentications." +msgstr "" + +#: nova/api/ec2/__init__.py:142 +#, python-format +msgid "" +"Access key %s has had %d failed authentications and will be locked out for " +"%d minutes." +msgstr "" + +#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#, python-format +msgid "Authentication Failure: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:190 +#, python-format +msgid "Authenticated Request For %s:%s)" +msgstr "" + +#: nova/api/ec2/__init__.py:227 +#, python-format +msgid "action: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:229 +#, python-format +msgid "arg: %s\t\tval: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:301 +#, python-format +msgid "Unauthorized request for controller=%s and action=%s" +msgstr "" + +#: nova/api/ec2/__init__.py:339 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:342 +#, python-format +msgid "ApiError raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:349 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:354 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: nova/api/ec2/admin.py:84 +#, python-format +msgid "Creating new user: %s" +msgstr "" + +#: nova/api/ec2/admin.py:92 +#, python-format +msgid "Deleting user: %s" +msgstr "" + +#: nova/api/ec2/admin.py:114 +#, python-format +msgid "Adding role %s to user %s for project %s" +msgstr "" + +#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#, python-format +msgid "Adding sitewide role %s to user %s" +msgstr "" + +#: nova/api/ec2/admin.py:122 +#, python-format +msgid "Removing role %s from user %s for project %s" +msgstr "" + +#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#, python-format +msgid "Removing sitewide role %s from user %s" +msgstr "" + +#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 +msgid "operation must be add or remove" +msgstr "" + +#: nova/api/ec2/admin.py:142 +#, python-format +msgid "Getting x509 for user: %s on project: %s" +msgstr "" + +#: nova/api/ec2/admin.py:159 +#, python-format +msgid "Create project %s managed by %s" +msgstr "" + +#: nova/api/ec2/admin.py:170 +#, python-format +msgid "Delete project: %s" +msgstr "" + +#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#, python-format +msgid "Adding user %s to project %s" +msgstr "" + +#: nova/api/ec2/admin.py:188 +#, python-format +msgid "Removing user %s from project %s" +msgstr "" + +#: nova/api/ec2/apirequest.py:95 +#, python-format +msgid "Unsupported API request: controller = %s,action = %s" +msgstr "" + +#: nova/api/ec2/cloud.py:117 +#, python-format +msgid "Generating root CA: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:277 +#, python-format +msgid "Create key pair %s" +msgstr "" + +#: nova/api/ec2/cloud.py:285 +#, python-format +msgid "Delete key pair %s" +msgstr "" + +#: nova/api/ec2/cloud.py:357 +#, python-format +msgid "%s is not a valid ipProtocol" +msgstr "" + +#: nova/api/ec2/cloud.py:361 +msgid "Invalid port range" +msgstr "" + +#: nova/api/ec2/cloud.py:392 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 +msgid "No rule for the specified parameters." +msgstr "" + +#: nova/api/ec2/cloud.py:421 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: nova/api/ec2/cloud.py:432 +#, python-format +msgid "This rule already exists in group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:460 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:463 +#, python-format +msgid "group %s already exists" +msgstr "" + +#: nova/api/ec2/cloud.py:475 +#, python-format +msgid "Delete security group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 +#, python-format +msgid "Get console output for instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:543 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: nova/api/ec2/cloud.py:567 +#, python-format +msgid "Attach volume %s to instacne %s at %s" +msgstr "" + +#: nova/api/ec2/cloud.py:579 +#, python-format +msgid "Detach volume %s" +msgstr "" + +#: nova/api/ec2/cloud.py:686 +msgid "Allocate address" +msgstr "" + +#: nova/api/ec2/cloud.py:691 +#, python-format +msgid "Release address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:696 +#, python-format +msgid "Associate address %s to instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:703 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:730 +msgid "Going to start terminating instances" +msgstr "" + +#: nova/api/ec2/cloud.py:738 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: nova/api/ec2/cloud.py:775 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: nova/api/ec2/cloud.py:783 +#, python-format +msgid "Registered image %s with id %s" +msgstr "" + +#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#, python-format +msgid "attribute not supported: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:794 +#, python-format +msgid "invalid id: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:807 +msgid "user or group not specified" +msgstr "" + +#: nova/api/ec2/cloud.py:809 +msgid "only group \"all\" is supported" +msgstr "" + +#: nova/api/ec2/cloud.py:811 +msgid "operation_type must be add or remove" +msgstr "" + +#: nova/api/ec2/cloud.py:812 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: nova/api/ec2/metadatarequesthandler.py:75 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:70 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:86 +msgid "Including admin operations in API." +msgstr "" + +#: nova/api/openstack/servers.py:184 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:199 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: nova/api/openstack/servers.py:213 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:224 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: nova/api/openstack/servers.py:235 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: nova/api/openstack/servers.py:246 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: nova/api/openstack/servers.py:257 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" +msgstr "" + +#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "" + +#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#, python-format +msgid "Project can't be created because project %s already exists" +msgstr "" + +#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#, python-format +msgid "Project can't be modified because manager %s doesn't exist" +msgstr "" + +#: nova/auth/dbdriver.py:245 +#, python-format +msgid "User \"%s\" not found" +msgstr "" + +#: nova/auth/dbdriver.py:248 +#, python-format +msgid "Project \"%s\" not found" +msgstr "" + +#: nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: nova/auth/ldapdriver.py:181 +#, python-format +msgid "LDAP object for %s doesn't exist" +msgstr "" + +#: nova/auth/ldapdriver.py:218 +#, python-format +msgid "Project can't be created because user %s doesn't exist" +msgstr "" + +#: nova/auth/ldapdriver.py:478 +#, python-format +msgid "User %s is already a member of the group %s" +msgstr "" + +#: nova/auth/ldapdriver.py:507 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: nova/auth/ldapdriver.py:528 +#, python-format +msgid "Group at dn %s doesn't exist" +msgstr "" + +#: nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" +msgstr "" + +#: nova/auth/manager.py:270 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: nova/auth/manager.py:275 +#, python-format +msgid "failed authorization: no project named %s (user=%s)" +msgstr "" + +#: nova/auth/manager.py:277 +#, python-format +msgid "No project called %s could be found" +msgstr "" + +#: nova/auth/manager.py:281 +#, python-format +msgid "Failed authorization: user %s not admin and not member of project %s" +msgstr "" + +#: nova/auth/manager.py:283 +#, python-format +msgid "User %s is not a member of project %s" +msgstr "" + +#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: nova/auth/manager.py:293 nova/auth/manager.py:304 +msgid "Signature does not match" +msgstr "" + +#: nova/auth/manager.py:374 +msgid "Must specify project" +msgstr "" + +#: nova/auth/manager.py:408 +#, python-format +msgid "The %s role can not be found" +msgstr "" + +#: nova/auth/manager.py:410 +#, python-format +msgid "The %s role is global only" +msgstr "" + +#: nova/auth/manager.py:412 +#, python-format +msgid "Adding role %s to user %s in project %s" +msgstr "" + +#: nova/auth/manager.py:438 +#, python-format +msgid "Removing role %s from user %s on project %s" +msgstr "" + +#: nova/auth/manager.py:505 +#, python-format +msgid "Created project %s with manager %s" +msgstr "" + +#: nova/auth/manager.py:523 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: nova/auth/manager.py:553 +#, python-format +msgid "Remove user %s from project %s" +msgstr "" + +#: nova/auth/manager.py:581 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: nova/auth/manager.py:637 +#, python-format +msgid "Created user %s (admin: %r)" +msgstr "" + +#: nova/auth/manager.py:645 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: nova/auth/manager.py:655 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:657 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:659 +#, python-format +msgid "Admin status set to %r for user %s" +msgstr "" + +#: nova/auth/manager.py:708 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "" + +#: nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:97 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: nova/compute/api.py:67 +#, python-format +msgid "Instance %d was not found in get_network_topic" +msgstr "" + +#: nova/compute/api.py:73 +#, python-format +msgid "Instance %d has no host" +msgstr "" + +#: nova/compute/api.py:92 +#, python-format +msgid "Quota exceeeded for %s, tried to run %s instances" +msgstr "" + +#: nova/compute/api.py:94 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." +msgstr "" + +#: nova/compute/api.py:109 +msgid "Creating a raw instance" +msgstr "" + +#: nova/compute/api.py:156 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: nova/compute/api.py:180 +#, python-format +msgid "Casting to scheduler for %s/%s's instance %s" +msgstr "" + +#: nova/compute/api.py:279 +#, python-format +msgid "Going to try and terminate %s" +msgstr "" + +#: nova/compute/api.py:283 +#, python-format +msgid "Instance %d was not found during terminate" +msgstr "" + +#: nova/compute/api.py:288 +#, python-format +msgid "Instance %d is already being terminated" +msgstr "" + +#: nova/compute/api.py:450 +#, python-format +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "" + +#: nova/compute/api.py:465 +msgid "Volume isn't attached to anything!" +msgstr "" + +#: nova/compute/disk.py:71 +#, python-format +msgid "Input partition size not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:75 +#, python-format +msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:128 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: nova/compute/disk.py:136 +#, python-format +msgid "Failed to load partition: %s" +msgstr "" + +#: nova/compute/disk.py:158 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "" + +#: nova/compute/manager.py:69 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: nova/compute/manager.py:71 +#, python-format +msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +msgstr "" + +#: nova/compute/manager.py:75 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: nova/compute/manager.py:77 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: nova/compute/manager.py:82 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: nova/compute/manager.py:86 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: nova/compute/manager.py:157 +msgid "Instance has already been created" +msgstr "" + +#: nova/compute/manager.py:158 +#, python-format +msgid "instance %s: starting..." +msgstr "" + +#: nova/compute/manager.py:197 +#, python-format +msgid "instance %s: Failed to spawn" +msgstr "" + +#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 +#, python-format +msgid "Terminating instance %s" +msgstr "" + +#: nova/compute/manager.py:217 +#, python-format +msgid "Disassociating address %s" +msgstr "" + +#: nova/compute/manager.py:230 +#, python-format +msgid "Deallocating address %s" +msgstr "" + +#: nova/compute/manager.py:243 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "" + +#: nova/compute/manager.py:257 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: nova/compute/manager.py:260 +#, python-format +msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:286 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: nova/compute/manager.py:289 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:301 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: nova/compute/manager.py:316 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: nova/compute/manager.py:335 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: nova/compute/manager.py:352 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: nova/compute/manager.py:369 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: nova/compute/manager.py:382 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: nova/compute/manager.py:401 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: nova/compute/manager.py:420 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: nova/compute/manager.py:432 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: nova/compute/manager.py:442 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: nova/compute/manager.py:462 +#, python-format +msgid "instance %s: attaching volume %s to %s" +msgstr "" + +#: nova/compute/manager.py:478 +#, python-format +msgid "instance %s: attach failed %s, removing" +msgstr "" + +#: nova/compute/manager.py:493 +#, python-format +msgid "Detach volume %s from mountpoint %s on instance %s" +msgstr "" + +#: nova/compute/manager.py:497 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: nova/compute/monitor.py:259 +#, python-format +msgid "updating %s..." +msgstr "" + +#: nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "" + +#: nova/compute/monitor.py:355 +#, python-format +msgid "Cannot get blockstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:377 +#, python-format +msgid "Cannot get ifstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:412 +msgid "unexpected exception getting connection" +msgstr "" + +#: nova/compute/monitor.py:427 +#, python-format +msgid "Found instance: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:43 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: nova/db/sqlalchemy/api.py:132 +#, python-format +msgid "No service for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:229 +#, python-format +msgid "No service for %s, %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:574 +#, python-format +msgid "No floating ip for address %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:668 +#, python-format +msgid "No instance for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 +#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#, python-format +msgid "Instance %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:891 +#, python-format +msgid "no keypair for user %s, name %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 +#, python-format +msgid "No network for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1036 +#, python-format +msgid "No network for bridge %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1050 +#, python-format +msgid "No network for instance %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1180 +#, python-format +msgid "Token %s does not exist" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1205 +#, python-format +msgid "No quota for project_id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1356 +#, python-format +msgid "No volume for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1401 +#, python-format +msgid "Volume %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1413 +#, python-format +msgid "No export device found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1426 +#, python-format +msgid "No target id found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1471 +#, python-format +msgid "No security group with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1488 +#, python-format +msgid "No security group named %s for project: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1576 +#, python-format +msgid "No secuity group rule with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1650 +#, python-format +msgid "No user for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1666 +#, python-format +msgid "No user for access key %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1728 +#, python-format +msgid "No project with id %s" +msgstr "" + +#: nova/image/glance.py:78 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images" +msgstr "" + +#: nova/image/glance.py:97 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images/detail" +msgstr "" + +#: nova/image/s3.py:82 +#, python-format +msgid "Image %s could not be found" +msgstr "" + +#: nova/network/api.py:39 +#, python-format +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "" + +#: nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" + +#: nova/network/linux_net.py:176 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: nova/network/linux_net.py:186 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#: nova/network/linux_net.py:254 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: nova/network/linux_net.py:256 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: nova/network/linux_net.py:334 +#, python-format +msgid "Killing dnsmasq threw %s" +msgstr "" + +#: nova/network/manager.py:135 +msgid "setting network host" +msgstr "" + +#: nova/network/manager.py:190 +#, python-format +msgid "Leasing IP %s" +msgstr "" + +#: nova/network/manager.py:194 +#, python-format +msgid "IP %s leased that isn't associated" +msgstr "" + +#: nova/network/manager.py:197 +#, python-format +msgid "IP %s leased to bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:205 +#, python-format +msgid "IP %s leased that was already deallocated" +msgstr "" + +#: nova/network/manager.py:214 +#, python-format +msgid "IP %s released that isn't associated" +msgstr "" + +#: nova/network/manager.py:217 +#, python-format +msgid "IP %s released from bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:220 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: nova/network/manager.py:442 +#, python-format +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "" + +#: nova/objectstore/handler.py:106 +#, python-format +msgid "Unknown S3 value type %r" +msgstr "" + +#: nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "" + +#: nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "" + +#: nova/objectstore/handler.py:209 +#, python-format +msgid "List keys for bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:217 +#, python-format +msgid "Unauthorized attempt to access bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:235 +#, python-format +msgid "Creating bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:245 +#, python-format +msgid "Deleting bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:249 +#, python-format +msgid "Unauthorized attempt to delete bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:271 +#, python-format +msgid "Getting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:274 +#, python-format +msgid "Unauthorized attempt to get object %s from bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:292 +#, python-format +msgid "Putting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:295 +#, python-format +msgid "Unauthorized attempt to upload object %s to bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:314 +#, python-format +msgid "Deleting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:393 +#, python-format +msgid "Not authorized to upload image: invalid directory %s" +msgstr "" + +#: nova/objectstore/handler.py:401 +#, python-format +msgid "Not authorized to upload image: unauthorized bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:406 +#, python-format +msgid "Starting image upload: %s" +msgstr "" + +#: nova/objectstore/handler.py:420 +#, python-format +msgid "Not authorized to update attributes of image %s" +msgstr "" + +#: nova/objectstore/handler.py:428 +#, python-format +msgid "Toggling publicity flag of image %s %r" +msgstr "" + +#: nova/objectstore/handler.py:433 +#, python-format +msgid "Updating user fields on image %s" +msgstr "" + +#: nova/objectstore/handler.py:447 +#, python-format +msgid "Unauthorized attempt to delete image %s" +msgstr "" + +#: nova/objectstore/handler.py:452 +#, python-format +msgid "Deleted image: %s" +msgstr "" + +#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 +#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 +msgid "No hosts found" +msgstr "" + +#: nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "" + +#: nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %s %s for %s" +msgstr "" + +#: nova/scheduler/simple.py:63 +msgid "All hosts have too many cores" +msgstr "" + +#: nova/scheduler/simple.py:95 +msgid "All hosts have too many gigabytes" +msgstr "" + +#: nova/scheduler/simple.py:115 +msgid "All hosts have too many networks" +msgstr "" + +#: nova/tests/test_cloud.py:198 +msgid "Can't test instances without a real virtual env." +msgstr "" + +#: nova/tests/test_cloud.py:210 +#, python-format +msgid "Need to watch instance %s until it's running..." +msgstr "" + +#: nova/tests/test_compute.py:104 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: nova/tests/test_compute.py:110 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: nova/tests/test_rpc.py:89 +#, python-format +msgid "Nested received %s, %s" +msgstr "" + +#: nova/tests/test_rpc.py:94 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#, python-format +msgid "Received %s" +msgstr "" + +#: nova/tests/test_volume.py:162 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: nova/virt/fake.py:210 +#, python-format +msgid "Instance %s Not Found" +msgstr "" + +#: nova/virt/hyperv.py:118 +msgid "In init host" +msgstr "" + +#: nova/virt/hyperv.py:131 +#, python-format +msgid "Attempt to create duplicate vm %s" +msgstr "" + +#: nova/virt/hyperv.py:148 +#, python-format +msgid "Starting VM %s " +msgstr "" + +#: nova/virt/hyperv.py:150 +#, python-format +msgid "Started VM %s " +msgstr "" + +#: nova/virt/hyperv.py:152 +#, python-format +msgid "spawn vm failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:169 +#, python-format +msgid "Failed to create VM %s" +msgstr "" + +#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 +#, python-format +msgid "Created VM %s..." +msgstr "" + +#: nova/virt/hyperv.py:188 +#, python-format +msgid "Set memory for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:198 +#, python-format +msgid "Set vcpus for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:202 +#, python-format +msgid "Creating disk for %s by attaching disk file %s" +msgstr "" + +#: nova/virt/hyperv.py:227 +#, python-format +msgid "Failed to add diskdrive to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:230 +#, python-format +msgid "New disk drive path is %s" +msgstr "" + +#: nova/virt/hyperv.py:247 +#, python-format +msgid "Failed to add vhd file to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:249 +#, python-format +msgid "Created disk for %s" +msgstr "" + +#: nova/virt/hyperv.py:253 +#, python-format +msgid "Creating nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:272 +msgid "Failed creating a port on the external vswitch" +msgstr "" + +#: nova/virt/hyperv.py:273 +#, python-format +msgid "Failed creating port for %s" +msgstr "" + +#: nova/virt/hyperv.py:275 +#, python-format +msgid "Created switch port %s on switch %s" +msgstr "" + +#: nova/virt/hyperv.py:285 +#, python-format +msgid "Failed to add nic to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:287 +#, python-format +msgid "Created nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:320 +#, python-format +msgid "WMI job failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:322 +#, python-format +msgid "WMI job succeeded: %s, Elapsed=%s " +msgstr "" + +#: nova/virt/hyperv.py:358 +#, python-format +msgid "Got request to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:383 +#, python-format +msgid "Failed to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:389 +#, python-format +msgid "Del: disk %s vm %s" +msgstr "" + +#: nova/virt/hyperv.py:405 +#, python-format +msgid "" +"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " +"cpu_time=%s" +msgstr "" + +#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#, python-format +msgid "duplicate name found: %s" +msgstr "" + +#: nova/virt/hyperv.py:444 +#, python-format +msgid "Successfully changed vm state of %s to %s" +msgstr "" + +#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#, python-format +msgid "Failed to change vm state of %s to %s" +msgstr "" + +#: nova/virt/images.py:70 +#, python-format +msgid "Finished retreving %s -- placed in %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:144 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:157 +msgid "Connection to libvirt broke" +msgstr "" + +#: nova/virt/libvirt_conn.py:229 +#, python-format +msgid "instance %s: deleting instance files %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:271 +#, python-format +msgid "No disk at %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:278 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "" + +#: nova/virt/libvirt_conn.py:294 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: nova/virt/libvirt_conn.py:297 +#, python-format +msgid "_wait_for_reboot failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:340 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: nova/virt/libvirt_conn.py:343 +#, python-format +msgid "_wait_for_rescue failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:370 +#, python-format +msgid "instance %s: is running" +msgstr "" + +#: nova/virt/libvirt_conn.py:381 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 +#, python-format +msgid "instance %s: failed to boot" +msgstr "" + +#: nova/virt/libvirt_conn.py:395 +#, python-format +msgid "virsh said: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:399 +msgid "cool, it's a device" +msgstr "" + +#: nova/virt/libvirt_conn.py:407 +#, python-format +msgid "data: %r, fpath: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:415 +#, python-format +msgid "Contents of file %s: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:449 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: nova/virt/libvirt_conn.py:505 +#, python-format +msgid "instance %s: injecting key into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:508 +#, python-format +msgid "instance %s: injecting net into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:516 +#, python-format +msgid "instance %s: ignoring error injecting data into image %s (%s)" +msgstr "" + +#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: nova/virt/libvirt_conn.py:589 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: nova/virt/xenapi_conn.py:113 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" +msgstr "" + +#: nova/virt/xenapi_conn.py:263 +#, python-format +msgid "Task [%s] %s status: success %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:271 +#, python-format +msgid "Task [%s] %s status: %s %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:72 +#, python-format +msgid "%s: _db_content => %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 +#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 +msgid "Raising NotImplemented" +msgstr "" + +#: nova/virt/xenapi/fake.py:249 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:283 +#, python-format +msgid "Calling %s %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:288 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:340 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:127 +#, python-format +msgid "Created VM %s as %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:147 +#, python-format +msgid "Creating VBD for VM %s, VDI %s ... " +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:149 +#, python-format +msgid "Created VBD %s for VM %s, VDI %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:165 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:175 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:202 +#, python-format +msgid "Creating VIF for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:205 +#, python-format +msgid "Created VIF %s for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:216 +#, python-format +msgid "Snapshotting VM %s with label '%s'..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:229 +#, python-format +msgid "Created snapshot %s from VM %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:243 +#, python-format +msgid "Asking xapi to upload %s as '%s'" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:261 +#, python-format +msgid "Asking xapi to fetch %s as %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:279 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:290 +#, python-format +msgid "PV Kernel in VDI:%d" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:318 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:331 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:333 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:390 +#, python-format +msgid "VHD %s has parent %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:407 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:448 +#, python-format +msgid "No VDIs found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:452 +#, python-format +msgid "Unexpected number of VDIs (%s) found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:62 +#, python-format +msgid "Attempted to create non-unique name %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:99 +#, python-format +msgid "Starting VM %s..." +msgstr "" + +#: nova/virt/xenapi/vmops.py:101 +#, python-format +msgid "Spawning VM %s created %s." +msgstr "" + +#: nova/virt/xenapi/vmops.py:112 +#, python-format +msgid "Instance %s: booted" +msgstr "" + +#: nova/virt/xenapi/vmops.py:137 +#, python-format +msgid "Instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:166 +#, python-format +msgid "Starting snapshot for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:174 +#, python-format +msgid "Unable to Snapshot %s: %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:184 +#, python-format +msgid "Finished snapshot and upload for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:252 +#, python-format +msgid "suspend: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:262 +#, python-format +msgid "resume: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:271 +#, python-format +msgid "Instance not found %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:57 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:74 +#, python-format +msgid "Introduced %s as %s." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:90 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:96 +#, python-format +msgid "Forgetting SR %s ... " +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:101 +#, python-format +msgid "Ignoring exception %s when getting PBDs for %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:107 +#, python-format +msgid "Ignoring exception %s when unplugging PBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:111 +#, python-format +msgid "Forgetting SR %s done." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:113 +#, python-format +msgid "Ignoring exception %s when forgetting SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:123 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:128 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:146 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:175 +#, python-format +msgid "Unable to obtain target information %s, %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:197 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %s, %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Unable to use SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:95 +#, python-format +msgid "Mountpoint %s attached to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:106 +#, python-format +msgid "Detach_volume: %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:113 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:128 +#, python-format +msgid "Mountpoint %s detached from instance %s" +msgstr "" + +#: nova/volume/api.py:44 +#, python-format +msgid "Quota exceeeded for %s, tried to create %sG volume" +msgstr "" + +#: nova/volume/api.py:46 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %s" +msgstr "" + +#: nova/volume/api.py:70 nova/volume/api.py:95 +msgid "Volume status must be available" +msgstr "" + +#: nova/volume/api.py:97 +msgid "Volume is already attached" +msgstr "" + +#: nova/volume/api.py:103 +msgid "Volume is already detached" +msgstr "" + +#: nova/volume/driver.py:76 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: nova/volume/driver.py:85 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: nova/volume/driver.py:210 +#, python-format +msgid "FAKE AOE: %s" +msgstr "" + +#: nova/volume/driver.py:315 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: nova/volume/manager.py:85 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: nova/volume/manager.py:93 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: nova/volume/manager.py:102 +#, python-format +msgid "volume %s: creating lv of size %sG" +msgstr "" + +#: nova/volume/manager.py:106 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: nova/volume/manager.py:113 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: nova/volume/manager.py:121 +msgid "Volume is still attached" +msgstr "" + +#: nova/volume/manager.py:123 +msgid "Volume is not local to this node" +msgstr "" + +#: nova/volume/manager.py:124 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: nova/volume/manager.py:126 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: nova/volume/manager.py:129 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" -- cgit From 41e615b843c284631a0d878db2c93ef97f2eb4b8 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Mon, 7 Feb 2011 14:46:54 -0400 Subject: minor --- nova/api/openstack/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index 025fa12a4..8901a8987 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -83,7 +83,7 @@ class APIRouter(wsgi.Router): mapper.resource("zone", "zones", controller=zones.Controller(), collection={'detail': 'GET'}, - member=zone_members) + collection_name='zones') mapper.resource("server", "servers", controller=servers.Controller(), collection={'detail': 'GET'}, -- cgit From b6022c1f7d7dc9294f6b1b613c7e99bd9437a72e Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Mon, 7 Feb 2011 13:43:23 -0600 Subject: added network_get_all_by_instance(), call to reset_network in vmops --- nova/db/sqlalchemy/api.py | 19 +++++++++++++------ nova/virt/xenapi/vmops.py | 11 +++++++---- 2 files changed, 20 insertions(+), 10 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 31865d553..26b685e43 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1055,12 +1055,6 @@ def network_get(context, network_id, session=None): return result -@require_context -def network_get_all(context): - session = get_session() - return session.query(models.Network).all() - - # NOTE(vish): pylint complains because of the long method name, but # it fits with the names of the rest of the methods # pylint: disable-msg=C0103 @@ -1104,6 +1098,19 @@ def network_get_by_instance(_context, instance_id): return rv +@require_admin_context +def network_get_all_by_instance(_context, instance_id): + session = get_session() + rv = session.query(models.Network).\ + filter_by(deleted=False).\ + join(models.Network.fixed_ips).\ + filter_by(instance_id=instance_id).\ + filter_by(deleted=False) + if not rv: + raise exception.NotFound(_('No network for instance %s') % instance_id) + return rv + + @require_admin_context def network_set_host(context, network_id, host_id): session = get_session() diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 6edeae5c0..4056e99bc 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -96,9 +96,11 @@ class VMOps(object): # write network info admin_context = context.get_admin_context() - network = db.network_get_by_instance(admin_context, - instance['id']) - for network in db.network_get_all(admin_context): + #network = db.network_get_by_instance(admin_context, + # instance['id']) + + for network in db.network_get_all_by_instance(admin_context, + instance['id']): mac_id = instance.mac_address.replace(':', '') location = 'vm-data/networking/%s' % mac_id mapping = {'label': network['label'], @@ -119,6 +121,7 @@ class VMOps(object): network_ref, instance.mac_address) # call reset networking + self.reset_network(vm_ref) LOG.debug(_('Starting VM %s...'), vm_ref) self._session.call_xenapi('VM.start', vm_ref, False, False) @@ -389,7 +392,7 @@ class VMOps(object): # TODO: implement this! return 'http://fakeajaxconsole/fake_url' - def reset_networking(self, instance): + def reset_network(self, instance): vm = self._get_vm_opaque_ref(instance) args = {'id': str(uuid.uuid4())} resp = self._make_agent_call('resetnetwork', vm, '', args) -- cgit From 25f96558743a0dd10dfa82d1e5f463c0ed5ccfaa Mon Sep 17 00:00:00 2001 From: Launchpad Translations on behalf of nova-core <> Date: Tue, 8 Feb 2011 05:28:14 +0000 Subject: Launchpad automatic translations update. --- locale/cs.po | 51 +++++++++++++++++++++++++++++---------------------- 1 file changed, 29 insertions(+), 22 deletions(-) diff --git a/locale/cs.po b/locale/cs.po index b9403687a..861efa37e 100644 --- a/locale/cs.po +++ b/locale/cs.po @@ -8,22 +8,22 @@ msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: FULL NAME \n" "POT-Creation-Date: 2011-01-10 11:25-0800\n" -"PO-Revision-Date: 2011-02-07 04:36+0000\n" -"Last-Translator: FULL NAME \n" +"PO-Revision-Date: 2011-02-07 12:45+0000\n" +"Last-Translator: David Pravec \n" "Language-Team: Czech \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-02-07 05:52+0000\n" +"X-Launchpad-Export-Date: 2011-02-08 05:28+0000\n" "X-Generator: Launchpad (build 12177)\n" #: nova/crypto.py:46 msgid "Filename of root CA" -msgstr "" +msgstr "Jméno souboru kořenové CA" #: nova/crypto.py:49 msgid "Filename of private key" -msgstr "" +msgstr "Jméno souboru s privátním klíčem" #: nova/crypto.py:51 msgid "Filename of root Certificate Revokation List" @@ -31,15 +31,15 @@ msgstr "" #: nova/crypto.py:53 msgid "Where we keep our keys" -msgstr "" +msgstr "Adresář, do kterého ukládáme naše klíče" #: nova/crypto.py:55 msgid "Where we keep our root CA" -msgstr "" +msgstr "Adresář, do kterého ukládáme naši kořenovou CA" #: nova/crypto.py:57 msgid "Should we use a CA for each project?" -msgstr "" +msgstr "Použijeme CA pro každý projekt?" #: nova/crypto.py:61 #, python-format @@ -63,7 +63,7 @@ msgstr "" #: nova/exception.py:33 msgid "Unexpected error while running command." -msgstr "" +msgstr "Při spouštění příkazu došlo k nečekané chybě" #: nova/exception.py:36 #, python-format @@ -74,10 +74,15 @@ msgid "" "Stdout: %r\n" "Stderr: %r" msgstr "" +"%s\n" +"Příkaz: %s\n" +"Vrácená hodnota: %s\n" +"Stdout: %r\n" +"Stderr: %r" #: nova/exception.py:86 msgid "Uncaught exception" -msgstr "" +msgstr "Neošetřená výjimka" #: nova/fakerabbit.py:48 #, python-format @@ -112,20 +117,22 @@ msgstr "" #: nova/rpc.py:92 #, python-format msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." -msgstr "" +msgstr "AMQP server na %s:%d není dosažitelný. Zkusím znovu za %d sekund." #: nova/rpc.py:99 #, python-format msgid "Unable to connect to AMQP server after %d tries. Shutting down." msgstr "" +"Nepodařilo se připojit k AMQP serveru ani po %d pokusech. Tento proces bude " +"ukončen." #: nova/rpc.py:118 msgid "Reconnected to queue" -msgstr "" +msgstr "Znovu připojeno k AMQP frontě" #: nova/rpc.py:125 msgid "Failed to fetch message from queue" -msgstr "" +msgstr "Selhalo získání zprávy z AMQP fronty" #: nova/rpc.py:155 #, python-format @@ -135,41 +142,41 @@ msgstr "" #: nova/rpc.py:170 #, python-format msgid "received %s" -msgstr "" +msgstr "získáno: %s" #: nova/rpc.py:183 #, python-format msgid "no method for message: %s" -msgstr "" +msgstr "Není metoda pro zpracování zprávy: %s" #: nova/rpc.py:184 #, python-format msgid "No method for message: %s" -msgstr "" +msgstr "Není metoda pro zpracování zprávy: %s" #: nova/rpc.py:245 #, python-format msgid "Returning exception %s to caller" -msgstr "" +msgstr "Volajícímu je vrácena výjimka: %s" #: nova/rpc.py:286 #, python-format msgid "unpacked context: %s" -msgstr "" +msgstr "rozbalený obsah: %s" #: nova/rpc.py:305 msgid "Making asynchronous call..." -msgstr "" +msgstr "Volání asynchronní funkce..." #: nova/rpc.py:308 #, python-format msgid "MSG_ID is %s" -msgstr "" +msgstr "MSG_ID je %s" #: nova/rpc.py:356 #, python-format msgid "response %s" -msgstr "" +msgstr "odpověď %s" #: nova/rpc.py:365 #, python-format @@ -179,7 +186,7 @@ msgstr "" #: nova/rpc.py:366 #, python-format msgid "message %s" -msgstr "" +msgstr "zpráva %s" #: nova/service.py:157 #, python-format -- cgit From 129935dfa787c79f32b1e317e360bd05a3126319 Mon Sep 17 00:00:00 2001 From: Launchpad Translations on behalf of nova-core <> Date: Wed, 9 Feb 2011 05:41:14 +0000 Subject: Launchpad automatic translations update. --- locale/de.po | 2136 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 2136 insertions(+) create mode 100644 locale/de.po diff --git a/locale/de.po b/locale/de.po new file mode 100644 index 000000000..e96292597 --- /dev/null +++ b/locale/de.po @@ -0,0 +1,2136 @@ +# German translation for nova +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the nova package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: nova\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2011-01-10 11:25-0800\n" +"PO-Revision-Date: 2011-02-08 13:06+0000\n" +"Last-Translator: Christian Berendt \n" +"Language-Team: German \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Launchpad-Export-Date: 2011-02-09 05:41+0000\n" +"X-Generator: Launchpad (build 12177)\n" + +#: nova/crypto.py:46 +msgid "Filename of root CA" +msgstr "Dateiname der Root CA" + +#: nova/crypto.py:49 +msgid "Filename of private key" +msgstr "Dateiname des Private Key" + +#: nova/crypto.py:51 +msgid "Filename of root Certificate Revokation List" +msgstr "Dateiname der Certificate Revocation List" + +#: nova/crypto.py:53 +msgid "Where we keep our keys" +msgstr "" + +#: nova/crypto.py:55 +msgid "Where we keep our root CA" +msgstr "" + +#: nova/crypto.py:57 +msgid "Should we use a CA for each project?" +msgstr "Soll eine eigenständige CA für jedes Projekt verwendet werden?" + +#: nova/crypto.py:61 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" + +#: nova/crypto.py:66 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "" + +#: nova/crypto.py:71 +#, python-format +msgid "Subject for certificate for vpns, %s for project, timestamp" +msgstr "" + +#: nova/crypto.py:258 +#, python-format +msgid "Flags path: %s" +msgstr "" + +#: nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "Unerwarteter Fehler bei Ausführung des Kommandos." + +#: nova/exception.py:36 +#, python-format +msgid "" +"%s\n" +"Command: %s\n" +"Exit code: %s\n" +"Stdout: %r\n" +"Stderr: %r" +msgstr "" +"%s\n" +"Kommando: %s\n" +"Exit Code: %s\n" +"Stdout: %r\n" +"Stderr: %r" + +#: nova/exception.py:86 +msgid "Uncaught exception" +msgstr "Nicht abgefangene Ausnahme" + +#: nova/fakerabbit.py:48 +#, python-format +msgid "(%s) publish (key: %s) %s" +msgstr "(%s) öffentlich (Schlüssel: %s) %s" + +#: nova/fakerabbit.py:53 +#, python-format +msgid "Publishing to route %s" +msgstr "" + +#: nova/fakerabbit.py:83 +#, python-format +msgid "Declaring queue %s" +msgstr "" + +#: nova/fakerabbit.py:89 +#, python-format +msgid "Declaring exchange %s" +msgstr "" + +#: nova/fakerabbit.py:95 +#, python-format +msgid "Binding %s to %s with key %s" +msgstr "" + +#: nova/fakerabbit.py:120 +#, python-format +msgid "Getting from %s: %s" +msgstr "Beziehe von %s: %s" + +#: nova/rpc.py:92 +#, python-format +msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +msgstr "" +"Der AMQP server %s:%d ist nicht erreichbar. Erneuter Versuch in %d Sekunden." + +#: nova/rpc.py:99 +#, python-format +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "" + +#: nova/rpc.py:118 +msgid "Reconnected to queue" +msgstr "" + +#: nova/rpc.py:125 +msgid "Failed to fetch message from queue" +msgstr "" + +#: nova/rpc.py:155 +#, python-format +msgid "Initing the Adapter Consumer for %s" +msgstr "" + +#: nova/rpc.py:170 +#, python-format +msgid "received %s" +msgstr "" + +#: nova/rpc.py:183 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: nova/rpc.py:184 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: nova/rpc.py:245 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: nova/rpc.py:286 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: nova/rpc.py:305 +msgid "Making asynchronous call..." +msgstr "" + +#: nova/rpc.py:308 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: nova/rpc.py:356 +#, python-format +msgid "response %s" +msgstr "" + +#: nova/rpc.py:365 +#, python-format +msgid "topic is %s" +msgstr "" + +#: nova/rpc.py:366 +#, python-format +msgid "message %s" +msgstr "" + +#: nova/service.py:157 +#, python-format +msgid "Starting %s node" +msgstr "" + +#: nova/service.py:169 +msgid "Service killed that has no database entry" +msgstr "" + +#: nova/service.py:190 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: nova/service.py:202 +msgid "Recovered model server connection!" +msgstr "" + +#: nova/service.py:208 +msgid "model server went away" +msgstr "" + +#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#, python-format +msgid "Data store %s is unreachable. Trying again in %d seconds." +msgstr "" + +#: nova/service.py:232 nova/twistd.py:232 +#, python-format +msgid "Serving %s" +msgstr "" + +#: nova/service.py:234 nova/twistd.py:264 +msgid "Full set of FLAGS:" +msgstr "" + +#: nova/twistd.py:211 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "" + +#: nova/twistd.py:268 +#, python-format +msgid "Starting %s" +msgstr "" + +#: nova/utils.py:53 +#, python-format +msgid "Inner Exception: %s" +msgstr "" + +#: nova/utils.py:54 +#, python-format +msgid "Class %s cannot be found" +msgstr "" + +#: nova/utils.py:113 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: nova/utils.py:125 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: nova/utils.py:138 +#, python-format +msgid "Result was %s" +msgstr "" + +#: nova/utils.py:171 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: nova/utils.py:176 +#, python-format +msgid "Running %s" +msgstr "" + +#: nova/utils.py:207 +#, python-format +msgid "Couldn't get IP, using 127.0.0.1 %s" +msgstr "" + +#: nova/utils.py:289 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: nova/utils.py:300 +#, python-format +msgid "backend %s" +msgstr "" + +#: nova/api/ec2/__init__.py:133 +msgid "Too many failed authentications." +msgstr "" + +#: nova/api/ec2/__init__.py:142 +#, python-format +msgid "" +"Access key %s has had %d failed authentications and will be locked out for " +"%d minutes." +msgstr "" + +#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#, python-format +msgid "Authentication Failure: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:190 +#, python-format +msgid "Authenticated Request For %s:%s)" +msgstr "" + +#: nova/api/ec2/__init__.py:227 +#, python-format +msgid "action: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:229 +#, python-format +msgid "arg: %s\t\tval: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:301 +#, python-format +msgid "Unauthorized request for controller=%s and action=%s" +msgstr "" + +#: nova/api/ec2/__init__.py:339 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:342 +#, python-format +msgid "ApiError raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:349 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:354 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: nova/api/ec2/admin.py:84 +#, python-format +msgid "Creating new user: %s" +msgstr "" + +#: nova/api/ec2/admin.py:92 +#, python-format +msgid "Deleting user: %s" +msgstr "" + +#: nova/api/ec2/admin.py:114 +#, python-format +msgid "Adding role %s to user %s for project %s" +msgstr "" + +#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#, python-format +msgid "Adding sitewide role %s to user %s" +msgstr "" + +#: nova/api/ec2/admin.py:122 +#, python-format +msgid "Removing role %s from user %s for project %s" +msgstr "" + +#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#, python-format +msgid "Removing sitewide role %s from user %s" +msgstr "" + +#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 +msgid "operation must be add or remove" +msgstr "" + +#: nova/api/ec2/admin.py:142 +#, python-format +msgid "Getting x509 for user: %s on project: %s" +msgstr "" + +#: nova/api/ec2/admin.py:159 +#, python-format +msgid "Create project %s managed by %s" +msgstr "" + +#: nova/api/ec2/admin.py:170 +#, python-format +msgid "Delete project: %s" +msgstr "" + +#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#, python-format +msgid "Adding user %s to project %s" +msgstr "" + +#: nova/api/ec2/admin.py:188 +#, python-format +msgid "Removing user %s from project %s" +msgstr "" + +#: nova/api/ec2/apirequest.py:95 +#, python-format +msgid "Unsupported API request: controller = %s,action = %s" +msgstr "" + +#: nova/api/ec2/cloud.py:117 +#, python-format +msgid "Generating root CA: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:277 +#, python-format +msgid "Create key pair %s" +msgstr "" + +#: nova/api/ec2/cloud.py:285 +#, python-format +msgid "Delete key pair %s" +msgstr "" + +#: nova/api/ec2/cloud.py:357 +#, python-format +msgid "%s is not a valid ipProtocol" +msgstr "" + +#: nova/api/ec2/cloud.py:361 +msgid "Invalid port range" +msgstr "" + +#: nova/api/ec2/cloud.py:392 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 +msgid "No rule for the specified parameters." +msgstr "" + +#: nova/api/ec2/cloud.py:421 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: nova/api/ec2/cloud.py:432 +#, python-format +msgid "This rule already exists in group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:460 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:463 +#, python-format +msgid "group %s already exists" +msgstr "" + +#: nova/api/ec2/cloud.py:475 +#, python-format +msgid "Delete security group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 +#, python-format +msgid "Get console output for instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:543 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: nova/api/ec2/cloud.py:567 +#, python-format +msgid "Attach volume %s to instacne %s at %s" +msgstr "" + +#: nova/api/ec2/cloud.py:579 +#, python-format +msgid "Detach volume %s" +msgstr "" + +#: nova/api/ec2/cloud.py:686 +msgid "Allocate address" +msgstr "" + +#: nova/api/ec2/cloud.py:691 +#, python-format +msgid "Release address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:696 +#, python-format +msgid "Associate address %s to instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:703 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:730 +msgid "Going to start terminating instances" +msgstr "" + +#: nova/api/ec2/cloud.py:738 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: nova/api/ec2/cloud.py:775 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: nova/api/ec2/cloud.py:783 +#, python-format +msgid "Registered image %s with id %s" +msgstr "" + +#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#, python-format +msgid "attribute not supported: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:794 +#, python-format +msgid "invalid id: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:807 +msgid "user or group not specified" +msgstr "" + +#: nova/api/ec2/cloud.py:809 +msgid "only group \"all\" is supported" +msgstr "" + +#: nova/api/ec2/cloud.py:811 +msgid "operation_type must be add or remove" +msgstr "" + +#: nova/api/ec2/cloud.py:812 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: nova/api/ec2/metadatarequesthandler.py:75 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:70 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:86 +msgid "Including admin operations in API." +msgstr "" + +#: nova/api/openstack/servers.py:184 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:199 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: nova/api/openstack/servers.py:213 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:224 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: nova/api/openstack/servers.py:235 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: nova/api/openstack/servers.py:246 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: nova/api/openstack/servers.py:257 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" +msgstr "" + +#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "" + +#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#, python-format +msgid "Project can't be created because project %s already exists" +msgstr "" + +#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#, python-format +msgid "Project can't be modified because manager %s doesn't exist" +msgstr "" + +#: nova/auth/dbdriver.py:245 +#, python-format +msgid "User \"%s\" not found" +msgstr "" + +#: nova/auth/dbdriver.py:248 +#, python-format +msgid "Project \"%s\" not found" +msgstr "" + +#: nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: nova/auth/ldapdriver.py:181 +#, python-format +msgid "LDAP object for %s doesn't exist" +msgstr "" + +#: nova/auth/ldapdriver.py:218 +#, python-format +msgid "Project can't be created because user %s doesn't exist" +msgstr "" + +#: nova/auth/ldapdriver.py:478 +#, python-format +msgid "User %s is already a member of the group %s" +msgstr "" + +#: nova/auth/ldapdriver.py:507 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: nova/auth/ldapdriver.py:528 +#, python-format +msgid "Group at dn %s doesn't exist" +msgstr "" + +#: nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" +msgstr "" + +#: nova/auth/manager.py:270 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: nova/auth/manager.py:275 +#, python-format +msgid "failed authorization: no project named %s (user=%s)" +msgstr "" + +#: nova/auth/manager.py:277 +#, python-format +msgid "No project called %s could be found" +msgstr "" + +#: nova/auth/manager.py:281 +#, python-format +msgid "Failed authorization: user %s not admin and not member of project %s" +msgstr "" + +#: nova/auth/manager.py:283 +#, python-format +msgid "User %s is not a member of project %s" +msgstr "" + +#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: nova/auth/manager.py:293 nova/auth/manager.py:304 +msgid "Signature does not match" +msgstr "" + +#: nova/auth/manager.py:374 +msgid "Must specify project" +msgstr "" + +#: nova/auth/manager.py:408 +#, python-format +msgid "The %s role can not be found" +msgstr "" + +#: nova/auth/manager.py:410 +#, python-format +msgid "The %s role is global only" +msgstr "" + +#: nova/auth/manager.py:412 +#, python-format +msgid "Adding role %s to user %s in project %s" +msgstr "" + +#: nova/auth/manager.py:438 +#, python-format +msgid "Removing role %s from user %s on project %s" +msgstr "" + +#: nova/auth/manager.py:505 +#, python-format +msgid "Created project %s with manager %s" +msgstr "" + +#: nova/auth/manager.py:523 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: nova/auth/manager.py:553 +#, python-format +msgid "Remove user %s from project %s" +msgstr "" + +#: nova/auth/manager.py:581 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: nova/auth/manager.py:637 +#, python-format +msgid "Created user %s (admin: %r)" +msgstr "" + +#: nova/auth/manager.py:645 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: nova/auth/manager.py:655 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:657 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:659 +#, python-format +msgid "Admin status set to %r for user %s" +msgstr "" + +#: nova/auth/manager.py:708 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "" + +#: nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:97 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: nova/compute/api.py:67 +#, python-format +msgid "Instance %d was not found in get_network_topic" +msgstr "" + +#: nova/compute/api.py:73 +#, python-format +msgid "Instance %d has no host" +msgstr "" + +#: nova/compute/api.py:92 +#, python-format +msgid "Quota exceeeded for %s, tried to run %s instances" +msgstr "" + +#: nova/compute/api.py:94 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." +msgstr "" + +#: nova/compute/api.py:109 +msgid "Creating a raw instance" +msgstr "" + +#: nova/compute/api.py:156 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: nova/compute/api.py:180 +#, python-format +msgid "Casting to scheduler for %s/%s's instance %s" +msgstr "" + +#: nova/compute/api.py:279 +#, python-format +msgid "Going to try and terminate %s" +msgstr "" + +#: nova/compute/api.py:283 +#, python-format +msgid "Instance %d was not found during terminate" +msgstr "" + +#: nova/compute/api.py:288 +#, python-format +msgid "Instance %d is already being terminated" +msgstr "" + +#: nova/compute/api.py:450 +#, python-format +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "" + +#: nova/compute/api.py:465 +msgid "Volume isn't attached to anything!" +msgstr "" + +#: nova/compute/disk.py:71 +#, python-format +msgid "Input partition size not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:75 +#, python-format +msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:128 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: nova/compute/disk.py:136 +#, python-format +msgid "Failed to load partition: %s" +msgstr "" + +#: nova/compute/disk.py:158 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "" + +#: nova/compute/manager.py:69 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: nova/compute/manager.py:71 +#, python-format +msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +msgstr "" + +#: nova/compute/manager.py:75 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: nova/compute/manager.py:77 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: nova/compute/manager.py:82 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: nova/compute/manager.py:86 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: nova/compute/manager.py:157 +msgid "Instance has already been created" +msgstr "" + +#: nova/compute/manager.py:158 +#, python-format +msgid "instance %s: starting..." +msgstr "" + +#: nova/compute/manager.py:197 +#, python-format +msgid "instance %s: Failed to spawn" +msgstr "" + +#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 +#, python-format +msgid "Terminating instance %s" +msgstr "" + +#: nova/compute/manager.py:217 +#, python-format +msgid "Disassociating address %s" +msgstr "" + +#: nova/compute/manager.py:230 +#, python-format +msgid "Deallocating address %s" +msgstr "" + +#: nova/compute/manager.py:243 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "" + +#: nova/compute/manager.py:257 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: nova/compute/manager.py:260 +#, python-format +msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:286 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: nova/compute/manager.py:289 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:301 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: nova/compute/manager.py:316 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: nova/compute/manager.py:335 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: nova/compute/manager.py:352 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: nova/compute/manager.py:369 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: nova/compute/manager.py:382 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: nova/compute/manager.py:401 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: nova/compute/manager.py:420 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: nova/compute/manager.py:432 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: nova/compute/manager.py:442 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: nova/compute/manager.py:462 +#, python-format +msgid "instance %s: attaching volume %s to %s" +msgstr "" + +#: nova/compute/manager.py:478 +#, python-format +msgid "instance %s: attach failed %s, removing" +msgstr "" + +#: nova/compute/manager.py:493 +#, python-format +msgid "Detach volume %s from mountpoint %s on instance %s" +msgstr "" + +#: nova/compute/manager.py:497 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: nova/compute/monitor.py:259 +#, python-format +msgid "updating %s..." +msgstr "" + +#: nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "" + +#: nova/compute/monitor.py:355 +#, python-format +msgid "Cannot get blockstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:377 +#, python-format +msgid "Cannot get ifstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:412 +msgid "unexpected exception getting connection" +msgstr "" + +#: nova/compute/monitor.py:427 +#, python-format +msgid "Found instance: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:43 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: nova/db/sqlalchemy/api.py:132 +#, python-format +msgid "No service for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:229 +#, python-format +msgid "No service for %s, %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:574 +#, python-format +msgid "No floating ip for address %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:668 +#, python-format +msgid "No instance for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 +#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#, python-format +msgid "Instance %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:891 +#, python-format +msgid "no keypair for user %s, name %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 +#, python-format +msgid "No network for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1036 +#, python-format +msgid "No network for bridge %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1050 +#, python-format +msgid "No network for instance %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1180 +#, python-format +msgid "Token %s does not exist" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1205 +#, python-format +msgid "No quota for project_id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1356 +#, python-format +msgid "No volume for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1401 +#, python-format +msgid "Volume %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1413 +#, python-format +msgid "No export device found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1426 +#, python-format +msgid "No target id found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1471 +#, python-format +msgid "No security group with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1488 +#, python-format +msgid "No security group named %s for project: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1576 +#, python-format +msgid "No secuity group rule with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1650 +#, python-format +msgid "No user for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1666 +#, python-format +msgid "No user for access key %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1728 +#, python-format +msgid "No project with id %s" +msgstr "" + +#: nova/image/glance.py:78 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images" +msgstr "" + +#: nova/image/glance.py:97 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images/detail" +msgstr "" + +#: nova/image/s3.py:82 +#, python-format +msgid "Image %s could not be found" +msgstr "" + +#: nova/network/api.py:39 +#, python-format +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "" + +#: nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" + +#: nova/network/linux_net.py:176 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: nova/network/linux_net.py:186 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#: nova/network/linux_net.py:254 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: nova/network/linux_net.py:256 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: nova/network/linux_net.py:334 +#, python-format +msgid "Killing dnsmasq threw %s" +msgstr "" + +#: nova/network/manager.py:135 +msgid "setting network host" +msgstr "" + +#: nova/network/manager.py:190 +#, python-format +msgid "Leasing IP %s" +msgstr "" + +#: nova/network/manager.py:194 +#, python-format +msgid "IP %s leased that isn't associated" +msgstr "" + +#: nova/network/manager.py:197 +#, python-format +msgid "IP %s leased to bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:205 +#, python-format +msgid "IP %s leased that was already deallocated" +msgstr "" + +#: nova/network/manager.py:214 +#, python-format +msgid "IP %s released that isn't associated" +msgstr "" + +#: nova/network/manager.py:217 +#, python-format +msgid "IP %s released from bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:220 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: nova/network/manager.py:442 +#, python-format +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "" + +#: nova/objectstore/handler.py:106 +#, python-format +msgid "Unknown S3 value type %r" +msgstr "" + +#: nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "" + +#: nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "" + +#: nova/objectstore/handler.py:209 +#, python-format +msgid "List keys for bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:217 +#, python-format +msgid "Unauthorized attempt to access bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:235 +#, python-format +msgid "Creating bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:245 +#, python-format +msgid "Deleting bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:249 +#, python-format +msgid "Unauthorized attempt to delete bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:271 +#, python-format +msgid "Getting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:274 +#, python-format +msgid "Unauthorized attempt to get object %s from bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:292 +#, python-format +msgid "Putting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:295 +#, python-format +msgid "Unauthorized attempt to upload object %s to bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:314 +#, python-format +msgid "Deleting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:393 +#, python-format +msgid "Not authorized to upload image: invalid directory %s" +msgstr "" + +#: nova/objectstore/handler.py:401 +#, python-format +msgid "Not authorized to upload image: unauthorized bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:406 +#, python-format +msgid "Starting image upload: %s" +msgstr "" + +#: nova/objectstore/handler.py:420 +#, python-format +msgid "Not authorized to update attributes of image %s" +msgstr "" + +#: nova/objectstore/handler.py:428 +#, python-format +msgid "Toggling publicity flag of image %s %r" +msgstr "" + +#: nova/objectstore/handler.py:433 +#, python-format +msgid "Updating user fields on image %s" +msgstr "" + +#: nova/objectstore/handler.py:447 +#, python-format +msgid "Unauthorized attempt to delete image %s" +msgstr "" + +#: nova/objectstore/handler.py:452 +#, python-format +msgid "Deleted image: %s" +msgstr "" + +#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 +#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 +msgid "No hosts found" +msgstr "" + +#: nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "" + +#: nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %s %s for %s" +msgstr "" + +#: nova/scheduler/simple.py:63 +msgid "All hosts have too many cores" +msgstr "" + +#: nova/scheduler/simple.py:95 +msgid "All hosts have too many gigabytes" +msgstr "" + +#: nova/scheduler/simple.py:115 +msgid "All hosts have too many networks" +msgstr "" + +#: nova/tests/test_cloud.py:198 +msgid "Can't test instances without a real virtual env." +msgstr "" + +#: nova/tests/test_cloud.py:210 +#, python-format +msgid "Need to watch instance %s until it's running..." +msgstr "" + +#: nova/tests/test_compute.py:104 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: nova/tests/test_compute.py:110 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: nova/tests/test_rpc.py:89 +#, python-format +msgid "Nested received %s, %s" +msgstr "" + +#: nova/tests/test_rpc.py:94 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#, python-format +msgid "Received %s" +msgstr "" + +#: nova/tests/test_volume.py:162 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: nova/virt/fake.py:210 +#, python-format +msgid "Instance %s Not Found" +msgstr "" + +#: nova/virt/hyperv.py:118 +msgid "In init host" +msgstr "" + +#: nova/virt/hyperv.py:131 +#, python-format +msgid "Attempt to create duplicate vm %s" +msgstr "" + +#: nova/virt/hyperv.py:148 +#, python-format +msgid "Starting VM %s " +msgstr "" + +#: nova/virt/hyperv.py:150 +#, python-format +msgid "Started VM %s " +msgstr "" + +#: nova/virt/hyperv.py:152 +#, python-format +msgid "spawn vm failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:169 +#, python-format +msgid "Failed to create VM %s" +msgstr "" + +#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 +#, python-format +msgid "Created VM %s..." +msgstr "" + +#: nova/virt/hyperv.py:188 +#, python-format +msgid "Set memory for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:198 +#, python-format +msgid "Set vcpus for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:202 +#, python-format +msgid "Creating disk for %s by attaching disk file %s" +msgstr "" + +#: nova/virt/hyperv.py:227 +#, python-format +msgid "Failed to add diskdrive to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:230 +#, python-format +msgid "New disk drive path is %s" +msgstr "" + +#: nova/virt/hyperv.py:247 +#, python-format +msgid "Failed to add vhd file to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:249 +#, python-format +msgid "Created disk for %s" +msgstr "" + +#: nova/virt/hyperv.py:253 +#, python-format +msgid "Creating nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:272 +msgid "Failed creating a port on the external vswitch" +msgstr "" + +#: nova/virt/hyperv.py:273 +#, python-format +msgid "Failed creating port for %s" +msgstr "" + +#: nova/virt/hyperv.py:275 +#, python-format +msgid "Created switch port %s on switch %s" +msgstr "" + +#: nova/virt/hyperv.py:285 +#, python-format +msgid "Failed to add nic to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:287 +#, python-format +msgid "Created nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:320 +#, python-format +msgid "WMI job failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:322 +#, python-format +msgid "WMI job succeeded: %s, Elapsed=%s " +msgstr "" + +#: nova/virt/hyperv.py:358 +#, python-format +msgid "Got request to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:383 +#, python-format +msgid "Failed to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:389 +#, python-format +msgid "Del: disk %s vm %s" +msgstr "" + +#: nova/virt/hyperv.py:405 +#, python-format +msgid "" +"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " +"cpu_time=%s" +msgstr "" + +#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#, python-format +msgid "duplicate name found: %s" +msgstr "" + +#: nova/virt/hyperv.py:444 +#, python-format +msgid "Successfully changed vm state of %s to %s" +msgstr "" + +#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#, python-format +msgid "Failed to change vm state of %s to %s" +msgstr "" + +#: nova/virt/images.py:70 +#, python-format +msgid "Finished retreving %s -- placed in %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:144 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:157 +msgid "Connection to libvirt broke" +msgstr "" + +#: nova/virt/libvirt_conn.py:229 +#, python-format +msgid "instance %s: deleting instance files %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:271 +#, python-format +msgid "No disk at %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:278 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "" + +#: nova/virt/libvirt_conn.py:294 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: nova/virt/libvirt_conn.py:297 +#, python-format +msgid "_wait_for_reboot failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:340 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: nova/virt/libvirt_conn.py:343 +#, python-format +msgid "_wait_for_rescue failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:370 +#, python-format +msgid "instance %s: is running" +msgstr "" + +#: nova/virt/libvirt_conn.py:381 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 +#, python-format +msgid "instance %s: failed to boot" +msgstr "" + +#: nova/virt/libvirt_conn.py:395 +#, python-format +msgid "virsh said: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:399 +msgid "cool, it's a device" +msgstr "" + +#: nova/virt/libvirt_conn.py:407 +#, python-format +msgid "data: %r, fpath: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:415 +#, python-format +msgid "Contents of file %s: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:449 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: nova/virt/libvirt_conn.py:505 +#, python-format +msgid "instance %s: injecting key into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:508 +#, python-format +msgid "instance %s: injecting net into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:516 +#, python-format +msgid "instance %s: ignoring error injecting data into image %s (%s)" +msgstr "" + +#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: nova/virt/libvirt_conn.py:589 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: nova/virt/xenapi_conn.py:113 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" +msgstr "" + +#: nova/virt/xenapi_conn.py:263 +#, python-format +msgid "Task [%s] %s status: success %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:271 +#, python-format +msgid "Task [%s] %s status: %s %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:72 +#, python-format +msgid "%s: _db_content => %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 +#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 +msgid "Raising NotImplemented" +msgstr "" + +#: nova/virt/xenapi/fake.py:249 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:283 +#, python-format +msgid "Calling %s %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:288 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:340 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:127 +#, python-format +msgid "Created VM %s as %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:147 +#, python-format +msgid "Creating VBD for VM %s, VDI %s ... " +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:149 +#, python-format +msgid "Created VBD %s for VM %s, VDI %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:165 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:175 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:202 +#, python-format +msgid "Creating VIF for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:205 +#, python-format +msgid "Created VIF %s for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:216 +#, python-format +msgid "Snapshotting VM %s with label '%s'..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:229 +#, python-format +msgid "Created snapshot %s from VM %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:243 +#, python-format +msgid "Asking xapi to upload %s as '%s'" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:261 +#, python-format +msgid "Asking xapi to fetch %s as %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:279 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:290 +#, python-format +msgid "PV Kernel in VDI:%d" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:318 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:331 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:333 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:390 +#, python-format +msgid "VHD %s has parent %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:407 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:448 +#, python-format +msgid "No VDIs found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:452 +#, python-format +msgid "Unexpected number of VDIs (%s) found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:62 +#, python-format +msgid "Attempted to create non-unique name %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:99 +#, python-format +msgid "Starting VM %s..." +msgstr "" + +#: nova/virt/xenapi/vmops.py:101 +#, python-format +msgid "Spawning VM %s created %s." +msgstr "" + +#: nova/virt/xenapi/vmops.py:112 +#, python-format +msgid "Instance %s: booted" +msgstr "" + +#: nova/virt/xenapi/vmops.py:137 +#, python-format +msgid "Instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:166 +#, python-format +msgid "Starting snapshot for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:174 +#, python-format +msgid "Unable to Snapshot %s: %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:184 +#, python-format +msgid "Finished snapshot and upload for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:252 +#, python-format +msgid "suspend: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:262 +#, python-format +msgid "resume: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:271 +#, python-format +msgid "Instance not found %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:57 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:74 +#, python-format +msgid "Introduced %s as %s." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:90 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:96 +#, python-format +msgid "Forgetting SR %s ... " +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:101 +#, python-format +msgid "Ignoring exception %s when getting PBDs for %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:107 +#, python-format +msgid "Ignoring exception %s when unplugging PBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:111 +#, python-format +msgid "Forgetting SR %s done." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:113 +#, python-format +msgid "Ignoring exception %s when forgetting SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:123 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:128 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:146 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:175 +#, python-format +msgid "Unable to obtain target information %s, %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:197 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %s, %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Unable to use SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:95 +#, python-format +msgid "Mountpoint %s attached to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:106 +#, python-format +msgid "Detach_volume: %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:113 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:128 +#, python-format +msgid "Mountpoint %s detached from instance %s" +msgstr "" + +#: nova/volume/api.py:44 +#, python-format +msgid "Quota exceeeded for %s, tried to create %sG volume" +msgstr "" + +#: nova/volume/api.py:46 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %s" +msgstr "" + +#: nova/volume/api.py:70 nova/volume/api.py:95 +msgid "Volume status must be available" +msgstr "" + +#: nova/volume/api.py:97 +msgid "Volume is already attached" +msgstr "" + +#: nova/volume/api.py:103 +msgid "Volume is already detached" +msgstr "" + +#: nova/volume/driver.py:76 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: nova/volume/driver.py:85 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: nova/volume/driver.py:210 +#, python-format +msgid "FAKE AOE: %s" +msgstr "" + +#: nova/volume/driver.py:315 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: nova/volume/manager.py:85 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: nova/volume/manager.py:93 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: nova/volume/manager.py:102 +#, python-format +msgid "volume %s: creating lv of size %sG" +msgstr "" + +#: nova/volume/manager.py:106 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: nova/volume/manager.py:113 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: nova/volume/manager.py:121 +msgid "Volume is still attached" +msgstr "" + +#: nova/volume/manager.py:123 +msgid "Volume is not local to this node" +msgstr "" + +#: nova/volume/manager.py:124 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: nova/volume/manager.py:126 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: nova/volume/manager.py:129 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" -- cgit From 6e881239c9b8a1fb209868addf1a2b83042f2128 Mon Sep 17 00:00:00 2001 From: brian-lamar Date: Wed, 9 Feb 2011 13:30:40 -0500 Subject: 1) Moved tests for limiter to test_common.py (from __init__.py) and expanded test suite to include bad inputs and tests for custom limits (#2) 2) Wrapped int() calls in blocks to ensure logic regardless of input. 3) Moved 1000 hard limit hard-coding to a keyword param. 4) Added comments as I went. --- nova/api/openstack/common.py | 33 ++++--- nova/tests/api/openstack/__init__.py | 28 ------ nova/tests/api/openstack/test_common.py | 161 ++++++++++++++++++++++++++++++++ 3 files changed, 181 insertions(+), 41 deletions(-) create mode 100644 nova/tests/api/openstack/test_common.py diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index 6d2fa16e8..1dc3767e2 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -18,22 +18,29 @@ from nova import exception -def limited(items, req): - """Return a slice of items according to requested offset and limit. - - items - a sliceable - req - wobob.Request possibly containing offset and limit GET variables. - offset is where to start in the list, and limit is the maximum number - of items to return. +def limited(items, request, max_limit=1000): + """ + Return a slice of items according to requested offset and limit. - If limit is not specified, 0, or > 1000, defaults to 1000. + @param items: A sliceable entity + @param request: `webob.Request` possibly containing 'offset' and 'limit' + GET variables. 'offset' is where to start in the list, + and 'limit' is the maximum number of items to return. If + 'limit' is not specified, 0, or > max_limit, we default + to max_limit. + @kwarg max_limit: The maximum number of items to return from 'items' """ + try: + offset = int(request.GET.get('offset', 0)) + except ValueError: + offset = 0 + + try: + limit = int(request.GET.get('limit', max_limit)) + except ValueError: + limit = max_limit - offset = int(req.GET.get('offset', 0)) - limit = int(req.GET.get('limit', 0)) - if not limit: - limit = 1000 - limit = min(1000, limit) + limit = min(max_limit, limit or max_limit) range_end = offset + limit return items[offset:range_end] diff --git a/nova/tests/api/openstack/__init__.py b/nova/tests/api/openstack/__init__.py index 14eaaa62c..77b1dd37f 100644 --- a/nova/tests/api/openstack/__init__.py +++ b/nova/tests/api/openstack/__init__.py @@ -92,31 +92,3 @@ class RateLimitingMiddlewareTest(unittest.TestCase): self.assertEqual(middleware.limiter.__class__.__name__, "Limiter") middleware = RateLimitingMiddleware(simple_wsgi, service_host='foobar') self.assertEqual(middleware.limiter.__class__.__name__, "WSGIAppProxy") - - -class LimiterTest(unittest.TestCase): - - def test_limiter(self): - items = range(2000) - req = Request.blank('/') - self.assertEqual(limited(items, req), items[:1000]) - req = Request.blank('/?offset=0') - self.assertEqual(limited(items, req), items[:1000]) - req = Request.blank('/?offset=3') - self.assertEqual(limited(items, req), items[3:1003]) - req = Request.blank('/?offset=2005') - self.assertEqual(limited(items, req), []) - req = Request.blank('/?limit=10') - self.assertEqual(limited(items, req), items[:10]) - req = Request.blank('/?limit=0') - self.assertEqual(limited(items, req), items[:1000]) - req = Request.blank('/?limit=3000') - self.assertEqual(limited(items, req), items[:1000]) - req = Request.blank('/?offset=1&limit=3') - self.assertEqual(limited(items, req), items[1:4]) - req = Request.blank('/?offset=3&limit=0') - self.assertEqual(limited(items, req), items[3:1003]) - req = Request.blank('/?offset=3&limit=1500') - self.assertEqual(limited(items, req), items[3:1003]) - req = Request.blank('/?offset=3000&limit=10') - self.assertEqual(limited(items, req), []) diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py new file mode 100644 index 000000000..9d9837cc9 --- /dev/null +++ b/nova/tests/api/openstack/test_common.py @@ -0,0 +1,161 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Test suites for 'common' code used throughout the OpenStack HTTP API. +""" + +import unittest + +from webob import Request + +from nova.api.openstack.common import limited + + +class LimiterTest(unittest.TestCase): + """ + Unit tests for the `nova.api.openstack.common.limited` method which takes + in a list of items and, depending on the 'offset' and 'limit' GET params, + returns a subset or complete set of the given items. + """ + + def setUp(self): + """ + Run before each test. + """ + self.tiny = range(1) + self.small = range(10) + self.medium = range(1000) + self.large = range(10000) + + def test_limiter_offset_zero(self): + """ + Test offset key works with 0. + """ + req = Request.blank('/?offset=0') + self.assertEqual(limited(self.tiny, req), self.tiny) + self.assertEqual(limited(self.small, req), self.small) + self.assertEqual(limited(self.medium, req), self.medium) + self.assertEqual(limited(self.large, req), self.large[:1000]) + + def test_limiter_offset_medium(self): + """ + Test offset key works with a medium sized number. + """ + req = Request.blank('/?offset=10') + self.assertEqual(limited(self.tiny, req), []) + self.assertEqual(limited(self.small, req), self.small[10:]) + self.assertEqual(limited(self.medium, req), self.medium[10:]) + self.assertEqual(limited(self.large, req), self.large[10:1010]) + + def test_limiter_offset_over_max(self): + """ + Test offset key works with a number over 1000 (max_limit). + """ + req = Request.blank('/?offset=1001') + self.assertEqual(limited(self.tiny, req), []) + self.assertEqual(limited(self.small, req), []) + self.assertEqual(limited(self.medium, req), []) + self.assertEqual(limited(self.large, req), self.large[1001:2001]) + + def test_limiter_offset_blank(self): + """ + Test offset key works with a blank offset. + """ + req = Request.blank('/?offset=') + self.assertEqual(limited(self.tiny, req), self.tiny) + self.assertEqual(limited(self.small, req), self.small) + self.assertEqual(limited(self.medium, req), self.medium) + self.assertEqual(limited(self.large, req), self.large[:1000]) + + def test_limiter_offset_bad(self): + """ + Test offset key works with a BAD offset. + """ + req = Request.blank(u'/?offset=\u0020aa') + self.assertEqual(limited(self.tiny, req), self.tiny) + self.assertEqual(limited(self.small, req), self.small) + self.assertEqual(limited(self.medium, req), self.medium) + self.assertEqual(limited(self.large, req), self.large[:1000]) + + def test_limiter_nothing(self): + """ + Test request with no offset or limit + """ + req = Request.blank('/') + self.assertEqual(limited(self.tiny, req), self.tiny) + self.assertEqual(limited(self.small, req), self.small) + self.assertEqual(limited(self.medium, req), self.medium) + self.assertEqual(limited(self.large, req), self.large[:1000]) + + def test_limiter_limit_zero(self): + """ + Test limit of zero. + """ + req = Request.blank('/?limit=0') + self.assertEqual(limited(self.tiny, req), self.tiny) + self.assertEqual(limited(self.small, req), self.small) + self.assertEqual(limited(self.medium, req), self.medium) + self.assertEqual(limited(self.large, req), self.large[:1000]) + + def test_limiter_limit_medium(self): + """ + Test limit of 10. + """ + req = Request.blank('/?limit=10') + self.assertEqual(limited(self.tiny, req), self.tiny) + self.assertEqual(limited(self.small, req), self.small) + self.assertEqual(limited(self.medium, req), self.medium[:10]) + self.assertEqual(limited(self.large, req), self.large[:10]) + + def test_limiter_limit_over_max(self): + """ + Test limit of 3000. + """ + req = Request.blank('/?limit=3000') + self.assertEqual(limited(self.tiny, req), self.tiny) + self.assertEqual(limited(self.small, req), self.small) + self.assertEqual(limited(self.medium, req), self.medium) + self.assertEqual(limited(self.large, req), self.large[:1000]) + + def test_limiter_limit_and_offset(self): + """ + Test request with both limit and offset. + """ + items = range(2000) + req = Request.blank('/?offset=1&limit=3') + self.assertEqual(limited(items, req), items[1:4]) + req = Request.blank('/?offset=3&limit=0') + self.assertEqual(limited(items, req), items[3:1003]) + req = Request.blank('/?offset=3&limit=1500') + self.assertEqual(limited(items, req), items[3:1003]) + req = Request.blank('/?offset=3000&limit=10') + self.assertEqual(limited(items, req), []) + + def test_limiter_custom_max_limit(self): + """ + Test a max_limit other than 1000. + """ + items = range(2000) + req = Request.blank('/?offset=1&limit=3') + self.assertEqual(limited(items, req, max_limit=2000), items[1:4]) + req = Request.blank('/?offset=3&limit=0') + self.assertEqual(limited(items, req, max_limit=2000), items[3:]) + req = Request.blank('/?offset=3&limit=2500') + self.assertEqual(limited(items, req, max_limit=2000), items[3:]) + req = Request.blank('/?offset=3000&limit=10') + self.assertEqual(limited(items, req, max_limit=2000), []) -- cgit From 590f5f1793c1f829101b4edbacbc79eac7acd2ef Mon Sep 17 00:00:00 2001 From: brian-lamar Date: Wed, 9 Feb 2011 13:36:16 -0500 Subject: Added myself to Authors --- Authors | 1 + 1 file changed, 1 insertion(+) diff --git a/Authors b/Authors index 27782738f..14cc95377 100644 --- a/Authors +++ b/Authors @@ -3,6 +3,7 @@ Anne Gentle Anthony Young Antony Messerli Armando Migliaccio +Brian Lamar Chiradeep Vittal Chmouel Boudjnah Chris Behrens -- cgit From 52e1ad5321590b7b4671349373217bc8fce275fc Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Wed, 9 Feb 2011 15:55:29 -0500 Subject: - population of public and private addresses containers in openstack api - replacement of sqlalchemy model in instance stub with dict --- nova/api/openstack/servers.py | 18 +++++++++ nova/tests/api/openstack/test_servers.py | 66 ++++++++++++++++++++++++++++++-- 2 files changed, 81 insertions(+), 3 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 17c5519a1..60f3d96e3 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -64,6 +64,24 @@ def _translate_detail_keys(inst): inst_dict['status'] = power_mapping[inst_dict['status']] inst_dict['addresses'] = dict(public=[], private=[]) + + # grab single private fixed ip + try: + private_ip = inst['fixed_ip']['address'] + if private_ip: + inst_dict['addresses']['private'].append(private_ip) + except KeyError: + LOG.debug(_("Failed to read private ip")) + pass + + # grab all public floating ips + try: + [inst_dict['addresses']['public'].append(floating['address']) \ + for floating in inst['fixed_ip']['floating_ips']] + except KeyError: + LOG.debug(_("Failed to read public ip(s)")) + pass + inst_dict['metadata'] = {} inst_dict['hostId'] = '' diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 724f14f19..816a0ab8c 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -17,6 +17,7 @@ import json import unittest +import datetime import stubout import webob @@ -39,6 +40,13 @@ def return_server(context, id): return stub_instance(id) +def return_server_with_addresses(private, public): + def _return_server(context, id): + return stub_instance(id, private_address=private, + public_addresses=public) + return _return_server + + def return_servers(context, user_id=1): return [stub_instance(i, user_id) for i in xrange(5)] @@ -55,9 +63,45 @@ def instance_address(context, instance_id): return None -def stub_instance(id, user_id=1): - return Instance(id=id, state=0, image_id=10, user_id=user_id, - display_name='server%s' % id) +def stub_instance(id, user_id=1, private_address=None, public_addresses=None): + if public_addresses == None: + public_addresses = list() + + instance = { + "id": id, + "admin_pass": "", + "user_id": user_id, + "project_id": "", + "image_id": 10, + "kernel_id": "", + "ramdisk_id": "", + "launch_index": 0, + "key_name": "", + "key_data": "", + "state": 0, + "state_description": "", + "memory_mb": 0, + "vcpus": 0, + "local_gb": 0, + "hostname": "", + "host": "", + "instance_type": "", + "user_data": "", + "reservation_id": "", + "mac_address": "", + "scheduled_at": datetime.datetime.now(), + "launched_at": datetime.datetime.now(), + "terminated_at": datetime.datetime.now(), + "availability_zone": "", + "display_name": "server%s" % id, + "display_description": "", + "locked": False} + + instance["fixed_ip"] = { + "address": private_address, + "floating_ips": [{"address":ip} for ip in public_addresses]} + + return instance def fake_compute_api(cls, req, id): @@ -105,6 +149,22 @@ class ServersTest(unittest.TestCase): self.assertEqual(res_dict['server']['id'], '1') self.assertEqual(res_dict['server']['name'], 'server1') + def test_get_server_by_id_with_addresses(self): + private = "192.168.0.3" + public = ["1.2.3.4"] + new_return_server = return_server_with_addresses(private, public) + self.stubs.Set(nova.db.api, 'instance_get', new_return_server) + req = webob.Request.blank('/v1.0/servers/1') + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + self.assertEqual(res_dict['server']['id'], '1') + self.assertEqual(res_dict['server']['name'], 'server1') + addresses = res_dict['server']['addresses'] + self.assertEqual(len(addresses["public"]), len(public)) + self.assertEqual(addresses["public"][0], public[0]) + self.assertEqual(len(addresses["private"]), 1) + self.assertEqual(addresses["private"][0], private) + def test_get_server_list(self): req = webob.Request.blank('/v1.0/servers') res = req.get_response(fakes.wsgi_app()) -- cgit From 4a4a3f04b78ba2cbaa0d02ecf0f7cd3cf580901b Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Wed, 9 Feb 2011 21:54:52 -0500 Subject: adding myself to Authors file --- Authors | 1 + 1 file changed, 1 insertion(+) diff --git a/Authors b/Authors index 27782738f..563ddf759 100644 --- a/Authors +++ b/Authors @@ -3,6 +3,7 @@ Anne Gentle Anthony Young Antony Messerli Armando Migliaccio +Brian Waldon Chiradeep Vittal Chmouel Boudjnah Chris Behrens -- cgit From 2e7fd058bf68e3d4c7699a29645423b4f30af812 Mon Sep 17 00:00:00 2001 From: Launchpad Translations on behalf of nova-core <> Date: Thu, 10 Feb 2011 05:13:45 +0000 Subject: Launchpad automatic translations update. --- locale/de.po | 43 ++++++++++++++++++++++--------------------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/locale/de.po b/locale/de.po index e96292597..3b30c2fa9 100644 --- a/locale/de.po +++ b/locale/de.po @@ -8,13 +8,13 @@ msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: FULL NAME \n" "POT-Creation-Date: 2011-01-10 11:25-0800\n" -"PO-Revision-Date: 2011-02-08 13:06+0000\n" +"PO-Revision-Date: 2011-02-09 10:49+0000\n" "Last-Translator: Christian Berendt \n" "Language-Team: German \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-02-09 05:41+0000\n" +"X-Launchpad-Export-Date: 2011-02-10 05:13+0000\n" "X-Generator: Launchpad (build 12177)\n" #: nova/crypto.py:46 @@ -146,12 +146,12 @@ msgstr "" #: nova/rpc.py:183 #, python-format msgid "no method for message: %s" -msgstr "" +msgstr "keine Methode für diese Nachricht gefunden: %s" #: nova/rpc.py:184 #, python-format msgid "No method for message: %s" -msgstr "" +msgstr "keine Methode für diese Nachricht gefunden: %s" #: nova/rpc.py:245 #, python-format @@ -165,12 +165,12 @@ msgstr "" #: nova/rpc.py:305 msgid "Making asynchronous call..." -msgstr "" +msgstr "führe asynchronen Aufruf durch..." #: nova/rpc.py:308 #, python-format msgid "MSG_ID is %s" -msgstr "" +msgstr "MSG_ID ist %s" #: nova/rpc.py:356 #, python-format @@ -180,12 +180,12 @@ msgstr "" #: nova/rpc.py:365 #, python-format msgid "topic is %s" -msgstr "" +msgstr "Betreff ist %s" #: nova/rpc.py:366 #, python-format msgid "message %s" -msgstr "" +msgstr "Nachricht %s" #: nova/service.py:157 #, python-format @@ -212,6 +212,7 @@ msgstr "" #, python-format msgid "Data store %s is unreachable. Trying again in %d seconds." msgstr "" +"Datastore %s ist nicht erreichbar. Versuche es erneut in %d Sekunden." #: nova/service.py:232 nova/twistd.py:232 #, python-format @@ -220,17 +221,17 @@ msgstr "" #: nova/service.py:234 nova/twistd.py:264 msgid "Full set of FLAGS:" -msgstr "" +msgstr "Alle vorhandenen FLAGS:" #: nova/twistd.py:211 #, python-format msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "" +msgstr "PID-Datei %s existiert nicht. Läuft der Daemon nicht?\n" #: nova/twistd.py:268 #, python-format msgid "Starting %s" -msgstr "" +msgstr "%s wird gestartet" #: nova/utils.py:53 #, python-format @@ -240,7 +241,7 @@ msgstr "" #: nova/utils.py:54 #, python-format msgid "Class %s cannot be found" -msgstr "" +msgstr "Klasse %s konnte nicht gefunden werden" #: nova/utils.py:113 #, python-format @@ -250,12 +251,12 @@ msgstr "" #: nova/utils.py:125 #, python-format msgid "Running cmd (subprocess): %s" -msgstr "" +msgstr "Führe Kommando (subprocess) aus: %s" #: nova/utils.py:138 #, python-format msgid "Result was %s" -msgstr "" +msgstr "Ergebnis war %s" #: nova/utils.py:171 #, python-format @@ -2095,22 +2096,22 @@ msgstr "" #: nova/volume/manager.py:93 #, python-format msgid "volume %s: creating" -msgstr "" +msgstr "Volume %s: wird erstellt" #: nova/volume/manager.py:102 #, python-format msgid "volume %s: creating lv of size %sG" -msgstr "" +msgstr "Volume %s: erstelle LV mit %sG" #: nova/volume/manager.py:106 #, python-format msgid "volume %s: creating export" -msgstr "" +msgstr "Volume %s: erstelle Export" #: nova/volume/manager.py:113 #, python-format msgid "volume %s: created successfully" -msgstr "" +msgstr "Volume %s: erfolgreich erstellt" #: nova/volume/manager.py:121 msgid "Volume is still attached" @@ -2123,14 +2124,14 @@ msgstr "" #: nova/volume/manager.py:124 #, python-format msgid "volume %s: removing export" -msgstr "" +msgstr "Volume %s: entferne Export" #: nova/volume/manager.py:126 #, python-format msgid "volume %s: deleting" -msgstr "" +msgstr "Volume %s: wird entfernt" #: nova/volume/manager.py:129 #, python-format msgid "volume %s: deleted successfully" -msgstr "" +msgstr "Volume %s: erfolgreich entfernt" -- cgit From 16ffc15b1fb45a09de14cece6b382357a030b9dc Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Thu, 10 Feb 2011 08:43:46 -0400 Subject: removed ZoneCommands from nova-manage --- bin/nova-manage | 10 ---------- nova/api/openstack/__init__.py | 2 +- 2 files changed, 1 insertion(+), 11 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index b62687aec..7835ca551 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -569,15 +569,6 @@ class DbCommands(object): print migration.db_version() -class ZoneCommands(object): - """Methods for defining zones.""" - - def create(self, name): - """Create a new Zone for this deployment.""" - ctxt = context.get_admin_context() - db.create_zone(ctxt, name) - - class VolumeCommands(object): """Methods for dealing with a cloud in an odd state""" @@ -629,7 +620,6 @@ CATEGORIES = [ ('service', ServiceCommands), ('log', LogCommands), ('db', DbCommands), - ('zone', ZoneCommands), ('volume', VolumeCommands)] diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index 8901a8987..69a4d66c0 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -80,7 +80,7 @@ class APIRouter(wsgi.Router): server_members["actions"] = "GET" server_members['suspend'] = 'POST' server_members['resume'] = 'POST' - + mapper.resource("zone", "zones", controller=zones.Controller(), collection={'detail': 'GET'}, collection_name='zones') -- cgit From 389b548e332a496bcc74d637030f753c66add570 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Thu, 10 Feb 2011 16:08:19 -0400 Subject: template adjusted to NOVA_TOOLS, zone db & os api layers added --- nova/api/openstack/zones.py | 75 ++++++++++++++++++++++++++++++++++++++++++++ nova/auth/novarc.template | 7 ++--- nova/db/api.py | 26 +++++++++++++-- nova/db/sqlalchemy/api.py | 41 ++++++++++++++++++++++-- nova/db/sqlalchemy/models.py | 11 ++++++- 5 files changed, 149 insertions(+), 11 deletions(-) create mode 100644 nova/api/openstack/zones.py diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py new file mode 100644 index 000000000..a12d1cc0c --- /dev/null +++ b/nova/api/openstack/zones.py @@ -0,0 +1,75 @@ +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging + +from nova import flags +from nova import wsgi +from nova import db + + +FLAGS = flags.FLAGS + + +def _filter_keys(item, keys): + """ + Filters all model attributes except for keys + item is a dict + + """ + return dict((k, v) for k, v in item.iteritems() if k in keys) + + +class Controller(wsgi.Controller): + + _serialization_metadata = { + 'application/xml': { + "attributes": { + "zone": ["id", "api_url"]}}} + + def index(self, req): + """Return all zones in brief""" + items = db.zone_get_all(req.environ['nova.context']) + items = common.limited(items, req) + items = [_filter_keys(item, ('id', 'api_url')) for item in items] + return dict(zones=items) + + def detail(self, req): + """Return all zones in detail""" + return self.index(req) + + def show(self, req, id): + """Return data about the given zone id""" + zone_id = int(id) + zone = db.zone_get(req.environ['nova.context'], zone_id) + return dict(zone=zone) + + def delete(self, req, id): + zone_id = int(id) + db.zone_delete(req.environ['nova.context'], zone_id) + return {} + + def create(self, req): + context = req.environ['nova.context'] + env = self._deserialize(req.body, req) + zone = db.zone_create(context, env["zone"]) + return dict(zone=zone) + + def update(self, req, id): + context = req.environ['nova.context'] + env = self._deserialize(req.body, req) + zone_id = int(id) + zone = db.zone_update(context, zone_id, env["zone"]) + return dict(zone=zone) diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template index c53a4acdc..702df3bb0 100644 --- a/nova/auth/novarc.template +++ b/nova/auth/novarc.template @@ -10,7 +10,6 @@ export NOVA_CERT=${NOVA_KEY_DIR}/%(nova)s export EUCALYPTUS_CERT=${NOVA_CERT} # euca-bundle-image seems to require this set alias ec2-bundle-image="ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_PRIVATE_KEY} --user 42 --ec2cert ${NOVA_CERT}" alias ec2-upload-bundle="ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}" -export CLOUD_SERVERS_API_KEY="%(access)s" -export CLOUD_SERVERS_USERNAME="%(user)s" -export CLOUD_SERVERS_URL="%(os)s" - +export NOVA_TOOLS_API_KEY="%(access)s" +export NOVA_TOOLS_USERNAME="%(user)s" +export NOVA_TOOLS_URL="%(os)s" diff --git a/nova/db/api.py b/nova/db/api.py index dc35f20b2..fa73d86ad 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -985,6 +985,26 @@ def console_get(context, console_id, instance_id=None): #################### -def create_zone(context, name): - """Create a new Zone entry for this deployment.""" - return IMPL.create_zone(context, name) +def zone_create(context, values): + """Create a new ChildZone entry in this Zone.""" + return IMPL.zone_create(context, values) + + +def zone_update(context, zone_id, values): + """Update a ChildZone entry in this Zone.""" + return IMPL.zone_update(context, values) + + +def zone_delete(context, zone_id): + """Delete a ChildZone.""" + return IMPL.zone_delete(context, zone_id) + + +def zone_get(context, zone_id): + """Get a specific ChildZone.""" + return IMPL.zone_get(context, zone_id) + + +def zone_get_all(context): + """Get all ChildZone's.""" + return IMPL.zone_get_all(context) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index f568d3470..cdd6db25f 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -2010,10 +2010,45 @@ def console_get(context, console_id, instance_id=None): return result -################## +#################### + + +@require_admin_context +def zone_create(context, values): + zone = models.ChildZone() + zone.update(values) + zone.save() + return zone + + +@require_admin_context +def zone_update(context, zone_id, values): + zone = session.query(models.ChildZone).filter_by(id=zone_id).first() + if not zone: + raise exception.NotFound(_("No zone with id %(zone_id)s") % locals()) + zone.update(values) + zone.save() + return zone + + +@require_admin_context +def zone_delete(context, zone_id): + session = get_session() + with session.begin(): + session.execute('delete from childzones ' + 'where id=:id', {'id': zone_id}) + + +@require_admin_context +def zone_get(context, zone_id): + session = get_session() + result = session.query(models.ChildZone).filter_by(id=zone_id).first() + if not result: + raise exception.NotFound(_("No zone with id %(zone_id)s") % locals()) + return result @require_admin_context -def create_zone(context, zone): +def zone_get_all(context): session = get_session() - print "Creating Zone", zone + return session.query(models.ChildZone).filter_by(id=zone_id).all() diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 7efb36c0e..3c677cad8 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -535,6 +535,15 @@ class Console(BASE, NovaBase): pool = relationship(ConsolePool, backref=backref('consoles')) +class ChildZone(BASE, NovaBase): + """Represents a child zone of this zone.""" + __tablename__ = 'childzones' + id = Column(Integer, primary_key=True) + api_url = Column(String(255)) + username = Column(String(255)) + password = Column(String(255)) + + def register_models(): """Register Models and create metadata. @@ -547,7 +556,7 @@ def register_models(): Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp, Network, SecurityGroup, SecurityGroupIngressRule, SecurityGroupInstanceAssociation, AuthToken, User, - Project, Certificate, ConsolePool, Console) # , Image, Host + Project, Certificate, ConsolePool, Console, ChildZone) engine = create_engine(FLAGS.sql_connection, echo=False) for model in models: model.metadata.create_all(engine) -- cgit From b0c6190e0b098af4d808d993c6dcd0796cc80e83 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 10 Feb 2011 14:18:16 -0600 Subject: forgot to add network_get_all_by_instance to db.api --- nova/db/api.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/nova/db/api.py b/nova/db/api.py index f22cd5615..a38f187a8 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -522,6 +522,11 @@ def network_get_by_instance(context, instance_id): return IMPL.network_get_by_instance(context, instance_id) +def network_get_all_by_instance(context, instance_id): + """Get all networks by instance id or raise if it does not exist.""" + return IMPL.network_get_all_by_instance(context, instance_id) + + def network_get_index(context, network_id): """Get non-conflicting index for network.""" return IMPL.network_get_index(context, network_id) -- cgit From 87d0b5203610f1e0a7a2e09033c79071fabacaba Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 10 Feb 2011 15:01:31 -0600 Subject: passing instance to reset_network instead of vm_ref, also not converting to an opaque ref before making plugin call --- nova/virt/xenapi/vmops.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 4056e99bc..575e53f80 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -121,7 +121,7 @@ class VMOps(object): network_ref, instance.mac_address) # call reset networking - self.reset_network(vm_ref) + self.reset_network(instance) LOG.debug(_('Starting VM %s...'), vm_ref) self._session.call_xenapi('VM.start', vm_ref, False, False) @@ -393,9 +393,8 @@ class VMOps(object): return 'http://fakeajaxconsole/fake_url' def reset_network(self, instance): - vm = self._get_vm_opaque_ref(instance) args = {'id': str(uuid.uuid4())} - resp = self._make_agent_call('resetnetwork', vm, '', args) + resp = self._make_agent_call('resetnetwork', instance, '', args) def list_from_xenstore(self, vm, path): """Runs the xenstore-ls command to get a listing of all records -- cgit From 96640472934c4eba48c6ab0048ac5bcf3c192eb4 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 10 Feb 2011 15:25:26 -0600 Subject: added resetnetwork to the XenAPIPlugin.dispatch dict --- plugins/xenserver/xenapi/etc/xapi.d/plugins/agent | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent index 5c5ec7c45..b4c742396 100755 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent @@ -135,4 +135,5 @@ def _wait_for_agent(self, request_id, arg_dict): if __name__ == "__main__": XenAPIPlugin.dispatch( {"key_init": key_init, - "password": password}) + "password": password, + "resetnetwork": resetnetwork}) -- cgit From 57e58ba23c5c6a1af0f132385d3d9b9cc370b47d Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 10 Feb 2011 16:26:08 -0600 Subject: added get IPs by instance --- nova/db/api.py | 5 +++++ nova/db/sqlalchemy/api.py | 11 +++++++++++ 2 files changed, 16 insertions(+) diff --git a/nova/db/api.py b/nova/db/api.py index a38f187a8..a2c1dbdce 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -294,6 +294,11 @@ def fixed_ip_get_by_address(context, address): return IMPL.fixed_ip_get_by_address(context, address) +def fixed_ip_get_all_by_instance(context, instance_id): + """Get fixed ips by instance or raise if none exist.""" + return IMPL.fixed_ip_get_all_by_instance(context, instance_id) + + def fixed_ip_get_instance(context, address): """Get an instance for a fixed ip by address.""" return IMPL.fixed_ip_get_instance(context, address) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 26b685e43..f20f4e266 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -606,6 +606,17 @@ def fixed_ip_get_instance(context, address): return fixed_ip_ref.instance +@require_context +def fixed_ip_get_all_by_instance(context, instance_id): + session = get_session() + rv = session.query(models.Network.fixed_ips).\ + filter_by(instance_id=instance_id).\ + filter_by(deleted=False) + if not rv: + raise exception.NotFound(_('No address for instance %s') % instance_id) + return rv + + @require_context def fixed_ip_get_instance_v6(context, address): session = get_session() -- cgit From c230dba962a3db2a3a8bb502dfb33313f0ef274b Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Fri, 11 Feb 2011 11:25:55 -0400 Subject: rough cut at zone api tests --- nova/tests/api/openstack/test_zones.py | 74 ++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) create mode 100644 nova/tests/api/openstack/test_zones.py diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py new file mode 100644 index 000000000..8a817bebe --- /dev/null +++ b/nova/tests/api/openstack/test_zones.py @@ -0,0 +1,74 @@ +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import unittest + +import stubout +import webob + +import nova.api +from nova.api.openstack import zones +from nova.tests.api.openstack import fakes + + +class ZonesTest(unittest.TestCase): + def setUp(self): + self.stubs = stubout.StubOutForTesting() + fakes.FakeAuthManager.auth_data = {} + fakes.FakeAuthDatabase.data = {} + fakes.stub_out_networking(self.stubs) + fakes.stub_out_rate_limiting(self.stubs) + fakes.stub_out_auth(self.stubs) + + def tearDown(self): + self.stubs.UnsetAll() + + def test_get_zone_list(self): + req = webob.Request.blank('/v1.0/zones') + res = req.get_response(fakes.wsgi_app()) + + def test_get_zone_by_id(self): + req = webob.Request.blank('/v1.0/zones/1') + res = req.get_response(fakes.wsgi_app()) + + def test_zone_delete(self): + req = webob.Request.blank('/v1.0/zones/1') + res = req.get_response(fakes.wsgi_app()) + + def test_zone_create(self): + body = dict(server=dict(api_url='http://blah.zoo', username='bob', + password='qwerty')) + req = webob.Request.blank('/v1.0/zones') + req.method = 'POST' + req.body = json.dumps(body) + + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 200) + + def test_zone_update(self): + body = dict(server=dict(api_url='http://blah.zoo', username='zeb', + password='sneaky')) + req = webob.Request.blank('/v1.0/zones/1') + req.method = 'PUT' + req.body = json.dumps(body) + + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 200) + + +if __name__ == '__main__': + unittest.main() -- cgit From df9bf23ecda1f32fd31ebffc6013e2f60f7fd3fa Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Fri, 11 Feb 2011 15:13:05 -0400 Subject: zone api tests passing --- nova/api/openstack/__init__.py | 3 +- nova/api/openstack/zones.py | 1 + nova/tests/api/openstack/test_zones.py | 81 +++++++++++++++++++++++++++++++--- 3 files changed, 78 insertions(+), 7 deletions(-) diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index 8aeb69693..33d040ab3 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -82,8 +82,7 @@ class APIRouter(wsgi.Router): server_members['resume'] = 'POST' mapper.resource("zone", "zones", controller=zones.Controller(), - collection={'detail': 'GET'}, - collection_name='zones') + collection={'detail': 'GET'}) mapper.resource("server", "servers", controller=servers.Controller(), collection={'detail': 'GET'}, diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py index a12d1cc0c..e84b38fa9 100644 --- a/nova/api/openstack/zones.py +++ b/nova/api/openstack/zones.py @@ -13,6 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. +import common import logging from nova import flags diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py index 8a817bebe..8dbdffa41 100644 --- a/nova/tests/api/openstack/test_zones.py +++ b/nova/tests/api/openstack/test_zones.py @@ -17,12 +17,50 @@ import unittest import stubout import webob +import json -import nova.api +import nova.db +from nova import context +from nova import flags from nova.api.openstack import zones from nova.tests.api.openstack import fakes +FLAGS = flags.FLAGS +FLAGS.verbose = True + + +def zone_get(context, zone_id): + return dict(id=1, api_url='http://foo.com', username='bob', + password='xxx') + + +def zone_create(context, values): + zone = dict(id=1) + zone.update(values) + return zone + + +def zone_update(context, zone_id, values): + zone = dict(id=zone_id, api_url='http://foo.com', username='bob', + password='xxx') + zone.update(values) + return zone + + +def zone_delete(context, zone_id): + pass + + +def zone_get_all(context): + return [ + dict(id=1, api_url='http://foo.com', username='bob', + password='xxx'), + dict(id=2, api_url='http://blah.com', username='alice', + password='qwerty') + ] + + class ZonesTest(unittest.TestCase): def setUp(self): self.stubs = stubout.StubOutForTesting() @@ -32,42 +70,75 @@ class ZonesTest(unittest.TestCase): fakes.stub_out_rate_limiting(self.stubs) fakes.stub_out_auth(self.stubs) + self.allow_admin = FLAGS.allow_admin_api + FLAGS.allow_admin_api = True + + self.stubs.Set(nova.db, 'zone_get', zone_get) + self.stubs.Set(nova.db, 'zone_get_all', zone_get_all) + self.stubs.Set(nova.db, 'zone_update', zone_update) + self.stubs.Set(nova.db, 'zone_create', zone_create) + self.stubs.Set(nova.db, 'zone_delete', zone_delete) + def tearDown(self): self.stubs.UnsetAll() + FLAGS.allow_admin_api = self.allow_admin def test_get_zone_list(self): req = webob.Request.blank('/v1.0/zones') res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 200) + self.assertEqual(len(res_dict['zones']), 2) def test_get_zone_by_id(self): req = webob.Request.blank('/v1.0/zones/1') res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res_dict['zone']['id'], 1) + self.assertEqual(res_dict['zone']['api_url'], 'http://foo.com') + self.assertEqual(res_dict['zone']['username'], 'bob') + self.assertEqual(res_dict['zone']['password'], 'xxx') + + self.assertEqual(res.status_int, 200) def test_zone_delete(self): req = webob.Request.blank('/v1.0/zones/1') res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + def test_zone_create(self): - body = dict(server=dict(api_url='http://blah.zoo', username='bob', - password='qwerty')) + body = dict(zone=dict(api_url='http://blah.zoo', username='fred', + password='fubar')) req = webob.Request.blank('/v1.0/zones') req.method = 'POST' req.body = json.dumps(body) res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) self.assertEqual(res.status_int, 200) + self.assertEqual(res_dict['zone']['id'], 1) + self.assertEqual(res_dict['zone']['api_url'], 'http://blah.zoo') + self.assertEqual(res_dict['zone']['username'], 'fred') + self.assertEqual(res_dict['zone']['password'], 'fubar') def test_zone_update(self): - body = dict(server=dict(api_url='http://blah.zoo', username='zeb', - password='sneaky')) + body = dict(zone=dict(username='zeb', password='sneaky')) req = webob.Request.blank('/v1.0/zones/1') req.method = 'PUT' req.body = json.dumps(body) res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) self.assertEqual(res.status_int, 200) + self.assertEqual(res_dict['zone']['id'], 1) + self.assertEqual(res_dict['zone']['api_url'], 'http://foo.com') + self.assertEqual(res_dict['zone']['username'], 'zeb') + self.assertEqual(res_dict['zone']['password'], 'sneaky') if __name__ == '__main__': -- cgit From ae70e05c0dd0e703da0826e4d7087cef3283af56 Mon Sep 17 00:00:00 2001 From: Launchpad Translations on behalf of nova-core <> Date: Sat, 12 Feb 2011 05:37:22 +0000 Subject: Launchpad automatic translations update. --- locale/zh_CN.po | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/locale/zh_CN.po b/locale/zh_CN.po index 6bc231e50..bc82115ae 100644 --- a/locale/zh_CN.po +++ b/locale/zh_CN.po @@ -8,14 +8,14 @@ msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: FULL NAME \n" "POT-Creation-Date: 2011-01-10 11:25-0800\n" -"PO-Revision-Date: 2011-01-22 03:11+0000\n" -"Last-Translator: combo \n" +"PO-Revision-Date: 2011-02-12 05:05+0000\n" +"Last-Translator: Winston Dillon \n" "Language-Team: Chinese (Simplified) \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-02-05 05:36+0000\n" -"X-Generator: Launchpad (build 12177)\n" +"X-Launchpad-Export-Date: 2011-02-12 05:37+0000\n" +"X-Generator: Launchpad (build 12351)\n" #: nova/twistd.py:268 #, python-format @@ -315,7 +315,7 @@ msgstr "键为: %s\t\t值为: %s" #: nova/api/ec2/__init__.py:301 #, python-format msgid "Unauthorized request for controller=%s and action=%s" -msgstr "对于控制器=%s和执行=%s的请求,未审核" +msgstr "对控制器=%s及动作=%s未经授权" #: nova/api/ec2/__init__.py:339 #, python-format @@ -330,7 +330,7 @@ msgstr "引发了Api错误: %s" #: nova/api/ec2/__init__.py:349 #, python-format msgid "Unexpected error raised: %s" -msgstr "引发了未知的错误: %s" +msgstr "引发了意外的错误:%s" #: nova/api/ec2/__init__.py:354 msgid "An unknown error has occurred. Please try your request again." @@ -349,7 +349,7 @@ msgstr "删除用户: %s" #: nova/api/ec2/admin.py:114 #, python-format msgid "Adding role %s to user %s for project %s" -msgstr "增加角色 %s给用户 %s,在工程 %s中" +msgstr "正将%s角色赋予用户%s(在工程%s中)" #: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 #, python-format @@ -359,12 +359,12 @@ msgstr "增加站点范围的 %s角色给用户 %s" #: nova/api/ec2/admin.py:122 #, python-format msgid "Removing role %s from user %s for project %s" -msgstr "移除角色 %s从用户 %s中,在工程 %s" +msgstr "正将角色%s从用户%s在工程%s中移除" #: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 #, python-format msgid "Removing sitewide role %s from user %s" -msgstr "移除站点范围的 %s角色从用户 %s中" +msgstr "" #: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 msgid "operation must be add or remove" @@ -393,7 +393,7 @@ msgstr "增加用户%s到%s工程" #: nova/api/ec2/admin.py:188 #, python-format msgid "Removing user %s from project %s" -msgstr "移除用户%s从工程%s中" +msgstr "正将用户%s从工程%s中移除" #: nova/api/ec2/apirequest.py:95 #, python-format @@ -403,7 +403,7 @@ msgstr "不支持的API请求: 控制器 = %s,执行 = %s" #: nova/api/ec2/cloud.py:117 #, python-format msgid "Generating root CA: %s" -msgstr "" +msgstr "生成根证书: %s" #: nova/api/ec2/cloud.py:277 #, python-format -- cgit From b554867a3ff9dd67bb528c0731f14b6730a28cf4 Mon Sep 17 00:00:00 2001 From: Launchpad Translations on behalf of nova-core <> Date: Sun, 13 Feb 2011 05:09:17 +0000 Subject: Launchpad automatic translations update. --- locale/zh_CN.po | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/locale/zh_CN.po b/locale/zh_CN.po index bc82115ae..64e051a62 100644 --- a/locale/zh_CN.po +++ b/locale/zh_CN.po @@ -14,7 +14,7 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-02-12 05:37+0000\n" +"X-Launchpad-Export-Date: 2011-02-13 05:09+0000\n" "X-Generator: Launchpad (build 12351)\n" #: nova/twistd.py:268 -- cgit From cad2e12da52c235f2b97a17a9151296861830901 Mon Sep 17 00:00:00 2001 From: Launchpad Translations on behalf of nova-core <> Date: Mon, 14 Feb 2011 05:22:52 +0000 Subject: Launchpad automatic translations update. --- locale/zh_CN.po | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/locale/zh_CN.po b/locale/zh_CN.po index 64e051a62..01b8dc378 100644 --- a/locale/zh_CN.po +++ b/locale/zh_CN.po @@ -8,13 +8,13 @@ msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: FULL NAME \n" "POT-Creation-Date: 2011-01-10 11:25-0800\n" -"PO-Revision-Date: 2011-02-12 05:05+0000\n" +"PO-Revision-Date: 2011-02-14 02:26+0000\n" "Last-Translator: Winston Dillon \n" "Language-Team: Chinese (Simplified) \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-02-13 05:09+0000\n" +"X-Launchpad-Export-Date: 2011-02-14 05:22+0000\n" "X-Generator: Launchpad (build 12351)\n" #: nova/twistd.py:268 @@ -413,50 +413,50 @@ msgstr "创建键值对 %s" #: nova/api/ec2/cloud.py:285 #, python-format msgid "Delete key pair %s" -msgstr "" +msgstr "删除键值对 %s" #: nova/api/ec2/cloud.py:357 #, python-format msgid "%s is not a valid ipProtocol" -msgstr "" +msgstr "%s是无效的IP协议" #: nova/api/ec2/cloud.py:361 msgid "Invalid port range" -msgstr "" +msgstr "端口范围无效" #: nova/api/ec2/cloud.py:392 #, python-format msgid "Revoke security group ingress %s" -msgstr "" +msgstr "撤销输入安全组 %s" #: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 msgid "No rule for the specified parameters." -msgstr "" +msgstr "对给定的参数无特定规则。" #: nova/api/ec2/cloud.py:421 #, python-format msgid "Authorize security group ingress %s" -msgstr "" +msgstr "验证输入安全组 %s" #: nova/api/ec2/cloud.py:432 #, python-format msgid "This rule already exists in group %s" -msgstr "" +msgstr "这条规则已经存在安全组%s中。" #: nova/api/ec2/cloud.py:460 #, python-format msgid "Create Security Group %s" -msgstr "" +msgstr "创建安全组%s" #: nova/api/ec2/cloud.py:463 #, python-format msgid "group %s already exists" -msgstr "" +msgstr "安全组%s已经存在" #: nova/api/ec2/cloud.py:475 #, python-format msgid "Delete security group %s" -msgstr "" +msgstr "删除安全组 %s" #: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 #, python-format -- cgit From 8001f334221a16d8328289f6954ef549844f76f3 Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Mon, 14 Feb 2011 14:26:32 +0100 Subject: Fix DescribeRegion answer by using specific 'listen' configuration parameter instead of overloading ec2_host --- bin/nova-api | 4 ++-- bin/nova-combined | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bin/nova-api b/bin/nova-api index 11176a021..eb59d0191 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -59,12 +59,12 @@ def run_app(paste_config_file): LOG.debug(_("App Config: %(api)s\n%(config)r") % locals()) wsgi.paste_config_to_flags(config, { "verbose": FLAGS.verbose, - "%s_host" % api: config.get('host', '0.0.0.0'), + "%s_host" % api: getattr(FLAGS, "%s_host" % api), "%s_port" % api: getattr(FLAGS, "%s_port" % api)}) LOG.info(_("Running %s API"), api) app = wsgi.load_paste_app(paste_config_file, api) apps.append((app, getattr(FLAGS, "%s_port" % api), - getattr(FLAGS, "%s_host" % api))) + config.get('listen', '0.0.0.0'))) if len(apps) == 0: LOG.error(_("No known API applications configured in %s."), paste_config_file) diff --git a/bin/nova-combined b/bin/nova-combined index 913c866bf..889600eb7 100755 --- a/bin/nova-combined +++ b/bin/nova-combined @@ -67,11 +67,11 @@ if __name__ == '__main__': continue wsgi.paste_config_to_flags(config, { "verbose": FLAGS.verbose, - "%s_host" % api: config.get('host', '0.0.0.0'), + "%s_host" % api: getattr(FLAGS, "%s_host" % api), "%s_port" % api: getattr(FLAGS, "%s_port" % api)}) app = wsgi.load_paste_app(paste_config_file, api) apps.append((app, getattr(FLAGS, "%s_port" % api), - getattr(FLAGS, "%s_host" % api))) + config.get('listen', '0.0.0.0'))) if len(apps) > 0: logging.basicConfig() server = wsgi.Server() -- cgit From 3e412a5f34c6dae44d8f4d6bce030fb267aa5aea Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Mon, 14 Feb 2011 11:04:45 -0500 Subject: Merge Distutils.Extra changes for automating translation message catalog compilation --- babel.cfg | 2 - locale/nova.pot | 2130 ------------------------------------------- po/nova.pot | 2705 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ setup.py | 12 +- 4 files changed, 2715 insertions(+), 2134 deletions(-) delete mode 100644 babel.cfg delete mode 100644 locale/nova.pot create mode 100644 po/nova.pot diff --git a/babel.cfg b/babel.cfg deleted file mode 100644 index 15cd6cb76..000000000 --- a/babel.cfg +++ /dev/null @@ -1,2 +0,0 @@ -[python: **.py] - diff --git a/locale/nova.pot b/locale/nova.pot deleted file mode 100644 index a96411e33..000000000 --- a/locale/nova.pot +++ /dev/null @@ -1,2130 +0,0 @@ -# Translations template for nova. -# Copyright (C) 2011 ORGANIZATION -# This file is distributed under the same license as the nova project. -# FIRST AUTHOR , 2011. -# -#, fuzzy -msgid "" -msgstr "" -"Project-Id-Version: nova 2011.1\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2011-01-10 11:25-0800\n" -"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" -"Last-Translator: FULL NAME \n" -"Language-Team: LANGUAGE \n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 0.9.4\n" - -#: nova/crypto.py:46 -msgid "Filename of root CA" -msgstr "" - -#: nova/crypto.py:49 -msgid "Filename of private key" -msgstr "" - -#: nova/crypto.py:51 -msgid "Filename of root Certificate Revokation List" -msgstr "" - -#: nova/crypto.py:53 -msgid "Where we keep our keys" -msgstr "" - -#: nova/crypto.py:55 -msgid "Where we keep our root CA" -msgstr "" - -#: nova/crypto.py:57 -msgid "Should we use a CA for each project?" -msgstr "" - -#: nova/crypto.py:61 -#, python-format -msgid "Subject for certificate for users, %s for project, user, timestamp" -msgstr "" - -#: nova/crypto.py:66 -#, python-format -msgid "Subject for certificate for projects, %s for project, timestamp" -msgstr "" - -#: nova/crypto.py:71 -#, python-format -msgid "Subject for certificate for vpns, %s for project, timestamp" -msgstr "" - -#: nova/crypto.py:258 -#, python-format -msgid "Flags path: %s" -msgstr "" - -#: nova/exception.py:33 -msgid "Unexpected error while running command." -msgstr "" - -#: nova/exception.py:36 -#, python-format -msgid "" -"%s\n" -"Command: %s\n" -"Exit code: %s\n" -"Stdout: %r\n" -"Stderr: %r" -msgstr "" - -#: nova/exception.py:86 -msgid "Uncaught exception" -msgstr "" - -#: nova/fakerabbit.py:48 -#, python-format -msgid "(%s) publish (key: %s) %s" -msgstr "" - -#: nova/fakerabbit.py:53 -#, python-format -msgid "Publishing to route %s" -msgstr "" - -#: nova/fakerabbit.py:83 -#, python-format -msgid "Declaring queue %s" -msgstr "" - -#: nova/fakerabbit.py:89 -#, python-format -msgid "Declaring exchange %s" -msgstr "" - -#: nova/fakerabbit.py:95 -#, python-format -msgid "Binding %s to %s with key %s" -msgstr "" - -#: nova/fakerabbit.py:120 -#, python-format -msgid "Getting from %s: %s" -msgstr "" - -#: nova/rpc.py:92 -#, python-format -msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." -msgstr "" - -#: nova/rpc.py:99 -#, python-format -msgid "Unable to connect to AMQP server after %d tries. Shutting down." -msgstr "" - -#: nova/rpc.py:118 -msgid "Reconnected to queue" -msgstr "" - -#: nova/rpc.py:125 -msgid "Failed to fetch message from queue" -msgstr "" - -#: nova/rpc.py:155 -#, python-format -msgid "Initing the Adapter Consumer for %s" -msgstr "" - -#: nova/rpc.py:170 -#, python-format -msgid "received %s" -msgstr "" - -#: nova/rpc.py:183 -#, python-format -msgid "no method for message: %s" -msgstr "" - -#: nova/rpc.py:184 -#, python-format -msgid "No method for message: %s" -msgstr "" - -#: nova/rpc.py:245 -#, python-format -msgid "Returning exception %s to caller" -msgstr "" - -#: nova/rpc.py:286 -#, python-format -msgid "unpacked context: %s" -msgstr "" - -#: nova/rpc.py:305 -msgid "Making asynchronous call..." -msgstr "" - -#: nova/rpc.py:308 -#, python-format -msgid "MSG_ID is %s" -msgstr "" - -#: nova/rpc.py:356 -#, python-format -msgid "response %s" -msgstr "" - -#: nova/rpc.py:365 -#, python-format -msgid "topic is %s" -msgstr "" - -#: nova/rpc.py:366 -#, python-format -msgid "message %s" -msgstr "" - -#: nova/service.py:157 -#, python-format -msgid "Starting %s node" -msgstr "" - -#: nova/service.py:169 -msgid "Service killed that has no database entry" -msgstr "" - -#: nova/service.py:190 -msgid "The service database object disappeared, Recreating it." -msgstr "" - -#: nova/service.py:202 -msgid "Recovered model server connection!" -msgstr "" - -#: nova/service.py:208 -msgid "model server went away" -msgstr "" - -#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 -#, python-format -msgid "Data store %s is unreachable. Trying again in %d seconds." -msgstr "" - -#: nova/service.py:232 nova/twistd.py:232 -#, python-format -msgid "Serving %s" -msgstr "" - -#: nova/service.py:234 nova/twistd.py:264 -msgid "Full set of FLAGS:" -msgstr "" - -#: nova/twistd.py:211 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "" - -#: nova/twistd.py:268 -#, python-format -msgid "Starting %s" -msgstr "" - -#: nova/utils.py:53 -#, python-format -msgid "Inner Exception: %s" -msgstr "" - -#: nova/utils.py:54 -#, python-format -msgid "Class %s cannot be found" -msgstr "" - -#: nova/utils.py:113 -#, python-format -msgid "Fetching %s" -msgstr "" - -#: nova/utils.py:125 -#, python-format -msgid "Running cmd (subprocess): %s" -msgstr "" - -#: nova/utils.py:138 -#, python-format -msgid "Result was %s" -msgstr "" - -#: nova/utils.py:171 -#, python-format -msgid "debug in callback: %s" -msgstr "" - -#: nova/utils.py:176 -#, python-format -msgid "Running %s" -msgstr "" - -#: nova/utils.py:207 -#, python-format -msgid "Couldn't get IP, using 127.0.0.1 %s" -msgstr "" - -#: nova/utils.py:289 -#, python-format -msgid "Invalid backend: %s" -msgstr "" - -#: nova/utils.py:300 -#, python-format -msgid "backend %s" -msgstr "" - -#: nova/api/ec2/__init__.py:133 -msgid "Too many failed authentications." -msgstr "" - -#: nova/api/ec2/__init__.py:142 -#, python-format -msgid "" -"Access key %s has had %d failed authentications and will be locked out " -"for %d minutes." -msgstr "" - -#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 -#, python-format -msgid "Authentication Failure: %s" -msgstr "" - -#: nova/api/ec2/__init__.py:190 -#, python-format -msgid "Authenticated Request For %s:%s)" -msgstr "" - -#: nova/api/ec2/__init__.py:227 -#, python-format -msgid "action: %s" -msgstr "" - -#: nova/api/ec2/__init__.py:229 -#, python-format -msgid "arg: %s\t\tval: %s" -msgstr "" - -#: nova/api/ec2/__init__.py:301 -#, python-format -msgid "Unauthorized request for controller=%s and action=%s" -msgstr "" - -#: nova/api/ec2/__init__.py:339 -#, python-format -msgid "NotFound raised: %s" -msgstr "" - -#: nova/api/ec2/__init__.py:342 -#, python-format -msgid "ApiError raised: %s" -msgstr "" - -#: nova/api/ec2/__init__.py:349 -#, python-format -msgid "Unexpected error raised: %s" -msgstr "" - -#: nova/api/ec2/__init__.py:354 -msgid "An unknown error has occurred. Please try your request again." -msgstr "" - -#: nova/api/ec2/admin.py:84 -#, python-format -msgid "Creating new user: %s" -msgstr "" - -#: nova/api/ec2/admin.py:92 -#, python-format -msgid "Deleting user: %s" -msgstr "" - -#: nova/api/ec2/admin.py:114 -#, python-format -msgid "Adding role %s to user %s for project %s" -msgstr "" - -#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 -#, python-format -msgid "Adding sitewide role %s to user %s" -msgstr "" - -#: nova/api/ec2/admin.py:122 -#, python-format -msgid "Removing role %s from user %s for project %s" -msgstr "" - -#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 -#, python-format -msgid "Removing sitewide role %s from user %s" -msgstr "" - -#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 -msgid "operation must be add or remove" -msgstr "" - -#: nova/api/ec2/admin.py:142 -#, python-format -msgid "Getting x509 for user: %s on project: %s" -msgstr "" - -#: nova/api/ec2/admin.py:159 -#, python-format -msgid "Create project %s managed by %s" -msgstr "" - -#: nova/api/ec2/admin.py:170 -#, python-format -msgid "Delete project: %s" -msgstr "" - -#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 -#, python-format -msgid "Adding user %s to project %s" -msgstr "" - -#: nova/api/ec2/admin.py:188 -#, python-format -msgid "Removing user %s from project %s" -msgstr "" - -#: nova/api/ec2/apirequest.py:95 -#, python-format -msgid "Unsupported API request: controller = %s,action = %s" -msgstr "" - -#: nova/api/ec2/cloud.py:117 -#, python-format -msgid "Generating root CA: %s" -msgstr "" - -#: nova/api/ec2/cloud.py:277 -#, python-format -msgid "Create key pair %s" -msgstr "" - -#: nova/api/ec2/cloud.py:285 -#, python-format -msgid "Delete key pair %s" -msgstr "" - -#: nova/api/ec2/cloud.py:357 -#, python-format -msgid "%s is not a valid ipProtocol" -msgstr "" - -#: nova/api/ec2/cloud.py:361 -msgid "Invalid port range" -msgstr "" - -#: nova/api/ec2/cloud.py:392 -#, python-format -msgid "Revoke security group ingress %s" -msgstr "" - -#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 -msgid "No rule for the specified parameters." -msgstr "" - -#: nova/api/ec2/cloud.py:421 -#, python-format -msgid "Authorize security group ingress %s" -msgstr "" - -#: nova/api/ec2/cloud.py:432 -#, python-format -msgid "This rule already exists in group %s" -msgstr "" - -#: nova/api/ec2/cloud.py:460 -#, python-format -msgid "Create Security Group %s" -msgstr "" - -#: nova/api/ec2/cloud.py:463 -#, python-format -msgid "group %s already exists" -msgstr "" - -#: nova/api/ec2/cloud.py:475 -#, python-format -msgid "Delete security group %s" -msgstr "" - -#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 -#, python-format -msgid "Get console output for instance %s" -msgstr "" - -#: nova/api/ec2/cloud.py:543 -#, python-format -msgid "Create volume of %s GB" -msgstr "" - -#: nova/api/ec2/cloud.py:567 -#, python-format -msgid "Attach volume %s to instacne %s at %s" -msgstr "" - -#: nova/api/ec2/cloud.py:579 -#, python-format -msgid "Detach volume %s" -msgstr "" - -#: nova/api/ec2/cloud.py:686 -msgid "Allocate address" -msgstr "" - -#: nova/api/ec2/cloud.py:691 -#, python-format -msgid "Release address %s" -msgstr "" - -#: nova/api/ec2/cloud.py:696 -#, python-format -msgid "Associate address %s to instance %s" -msgstr "" - -#: nova/api/ec2/cloud.py:703 -#, python-format -msgid "Disassociate address %s" -msgstr "" - -#: nova/api/ec2/cloud.py:730 -msgid "Going to start terminating instances" -msgstr "" - -#: nova/api/ec2/cloud.py:738 -#, python-format -msgid "Reboot instance %r" -msgstr "" - -#: nova/api/ec2/cloud.py:775 -#, python-format -msgid "De-registering image %s" -msgstr "" - -#: nova/api/ec2/cloud.py:783 -#, python-format -msgid "Registered image %s with id %s" -msgstr "" - -#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 -#, python-format -msgid "attribute not supported: %s" -msgstr "" - -#: nova/api/ec2/cloud.py:794 -#, python-format -msgid "invalid id: %s" -msgstr "" - -#: nova/api/ec2/cloud.py:807 -msgid "user or group not specified" -msgstr "" - -#: nova/api/ec2/cloud.py:809 -msgid "only group \"all\" is supported" -msgstr "" - -#: nova/api/ec2/cloud.py:811 -msgid "operation_type must be add or remove" -msgstr "" - -#: nova/api/ec2/cloud.py:812 -#, python-format -msgid "Updating image %s publicity" -msgstr "" - -#: nova/api/ec2/metadatarequesthandler.py:75 -#, python-format -msgid "Failed to get metadata for ip: %s" -msgstr "" - -#: nova/api/openstack/__init__.py:70 -#, python-format -msgid "Caught error: %s" -msgstr "" - -#: nova/api/openstack/__init__.py:86 -msgid "Including admin operations in API." -msgstr "" - -#: nova/api/openstack/servers.py:184 -#, python-format -msgid "Compute.api::lock %s" -msgstr "" - -#: nova/api/openstack/servers.py:199 -#, python-format -msgid "Compute.api::unlock %s" -msgstr "" - -#: nova/api/openstack/servers.py:213 -#, python-format -msgid "Compute.api::get_lock %s" -msgstr "" - -#: nova/api/openstack/servers.py:224 -#, python-format -msgid "Compute.api::pause %s" -msgstr "" - -#: nova/api/openstack/servers.py:235 -#, python-format -msgid "Compute.api::unpause %s" -msgstr "" - -#: nova/api/openstack/servers.py:246 -#, python-format -msgid "compute.api::suspend %s" -msgstr "" - -#: nova/api/openstack/servers.py:257 -#, python-format -msgid "compute.api::resume %s" -msgstr "" - -#: nova/auth/dbdriver.py:84 -#, python-format -msgid "User %s already exists" -msgstr "" - -#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 -#, python-format -msgid "Project can't be created because manager %s doesn't exist" -msgstr "" - -#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 -#, python-format -msgid "Project can't be created because project %s already exists" -msgstr "" - -#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 -#, python-format -msgid "Project can't be modified because manager %s doesn't exist" -msgstr "" - -#: nova/auth/dbdriver.py:245 -#, python-format -msgid "User \"%s\" not found" -msgstr "" - -#: nova/auth/dbdriver.py:248 -#, python-format -msgid "Project \"%s\" not found" -msgstr "" - -#: nova/auth/fakeldap.py:33 -msgid "Attempted to instantiate singleton" -msgstr "" - -#: nova/auth/ldapdriver.py:181 -#, python-format -msgid "LDAP object for %s doesn't exist" -msgstr "" - -#: nova/auth/ldapdriver.py:218 -#, python-format -msgid "Project can't be created because user %s doesn't exist" -msgstr "" - -#: nova/auth/ldapdriver.py:478 -#, python-format -msgid "User %s is already a member of the group %s" -msgstr "" - -#: nova/auth/ldapdriver.py:507 -#, python-format -msgid "" -"Attempted to remove the last member of a group. Deleting the group at %s " -"instead." -msgstr "" - -#: nova/auth/ldapdriver.py:528 -#, python-format -msgid "Group at dn %s doesn't exist" -msgstr "" - -#: nova/auth/manager.py:259 -#, python-format -msgid "Looking up user: %r" -msgstr "" - -#: nova/auth/manager.py:263 -#, python-format -msgid "Failed authorization for access key %s" -msgstr "" - -#: nova/auth/manager.py:264 -#, python-format -msgid "No user found for access key %s" -msgstr "" - -#: nova/auth/manager.py:270 -#, python-format -msgid "Using project name = user name (%s)" -msgstr "" - -#: nova/auth/manager.py:275 -#, python-format -msgid "failed authorization: no project named %s (user=%s)" -msgstr "" - -#: nova/auth/manager.py:277 -#, python-format -msgid "No project called %s could be found" -msgstr "" - -#: nova/auth/manager.py:281 -#, python-format -msgid "Failed authorization: user %s not admin and not member of project %s" -msgstr "" - -#: nova/auth/manager.py:283 -#, python-format -msgid "User %s is not a member of project %s" -msgstr "" - -#: nova/auth/manager.py:292 nova/auth/manager.py:303 -#, python-format -msgid "Invalid signature for user %s" -msgstr "" - -#: nova/auth/manager.py:293 nova/auth/manager.py:304 -msgid "Signature does not match" -msgstr "" - -#: nova/auth/manager.py:374 -msgid "Must specify project" -msgstr "" - -#: nova/auth/manager.py:408 -#, python-format -msgid "The %s role can not be found" -msgstr "" - -#: nova/auth/manager.py:410 -#, python-format -msgid "The %s role is global only" -msgstr "" - -#: nova/auth/manager.py:412 -#, python-format -msgid "Adding role %s to user %s in project %s" -msgstr "" - -#: nova/auth/manager.py:438 -#, python-format -msgid "Removing role %s from user %s on project %s" -msgstr "" - -#: nova/auth/manager.py:505 -#, python-format -msgid "Created project %s with manager %s" -msgstr "" - -#: nova/auth/manager.py:523 -#, python-format -msgid "modifying project %s" -msgstr "" - -#: nova/auth/manager.py:553 -#, python-format -msgid "Remove user %s from project %s" -msgstr "" - -#: nova/auth/manager.py:581 -#, python-format -msgid "Deleting project %s" -msgstr "" - -#: nova/auth/manager.py:637 -#, python-format -msgid "Created user %s (admin: %r)" -msgstr "" - -#: nova/auth/manager.py:645 -#, python-format -msgid "Deleting user %s" -msgstr "" - -#: nova/auth/manager.py:655 -#, python-format -msgid "Access Key change for user %s" -msgstr "" - -#: nova/auth/manager.py:657 -#, python-format -msgid "Secret Key change for user %s" -msgstr "" - -#: nova/auth/manager.py:659 -#, python-format -msgid "Admin status set to %r for user %s" -msgstr "" - -#: nova/auth/manager.py:708 -#, python-format -msgid "No vpn data for project %s" -msgstr "" - -#: nova/cloudpipe/pipelib.py:45 -msgid "Template for script to run on cloudpipe instance boot" -msgstr "" - -#: nova/cloudpipe/pipelib.py:48 -msgid "Network to push into openvpn config" -msgstr "" - -#: nova/cloudpipe/pipelib.py:51 -msgid "Netmask to push into openvpn config" -msgstr "" - -#: nova/cloudpipe/pipelib.py:97 -#, python-format -msgid "Launching VPN for %s" -msgstr "" - -#: nova/compute/api.py:67 -#, python-format -msgid "Instance %d was not found in get_network_topic" -msgstr "" - -#: nova/compute/api.py:73 -#, python-format -msgid "Instance %d has no host" -msgstr "" - -#: nova/compute/api.py:92 -#, python-format -msgid "Quota exceeeded for %s, tried to run %s instances" -msgstr "" - -#: nova/compute/api.py:94 -#, python-format -msgid "Instance quota exceeded. You can only run %s more instances of this type." -msgstr "" - -#: nova/compute/api.py:109 -msgid "Creating a raw instance" -msgstr "" - -#: nova/compute/api.py:156 -#, python-format -msgid "Going to run %s instances..." -msgstr "" - -#: nova/compute/api.py:180 -#, python-format -msgid "Casting to scheduler for %s/%s's instance %s" -msgstr "" - -#: nova/compute/api.py:279 -#, python-format -msgid "Going to try and terminate %s" -msgstr "" - -#: nova/compute/api.py:283 -#, python-format -msgid "Instance %d was not found during terminate" -msgstr "" - -#: nova/compute/api.py:288 -#, python-format -msgid "Instance %d is already being terminated" -msgstr "" - -#: nova/compute/api.py:450 -#, python-format -msgid "Invalid device specified: %s. Example device: /dev/vdb" -msgstr "" - -#: nova/compute/api.py:465 -msgid "Volume isn't attached to anything!" -msgstr "" - -#: nova/compute/disk.py:71 -#, python-format -msgid "Input partition size not evenly divisible by sector size: %d / %d" -msgstr "" - -#: nova/compute/disk.py:75 -#, python-format -msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" -msgstr "" - -#: nova/compute/disk.py:128 -#, python-format -msgid "Could not attach image to loopback: %s" -msgstr "" - -#: nova/compute/disk.py:136 -#, python-format -msgid "Failed to load partition: %s" -msgstr "" - -#: nova/compute/disk.py:158 -#, python-format -msgid "Failed to mount filesystem: %s" -msgstr "" - -#: nova/compute/instance_types.py:41 -#, python-format -msgid "Unknown instance type: %s" -msgstr "" - -#: nova/compute/manager.py:69 -#, python-format -msgid "check_instance_lock: decorating: |%s|" -msgstr "" - -#: nova/compute/manager.py:71 -#, python-format -msgid "check_instance_lock: arguments: |%s| |%s| |%s|" -msgstr "" - -#: nova/compute/manager.py:75 -#, python-format -msgid "check_instance_lock: locked: |%s|" -msgstr "" - -#: nova/compute/manager.py:77 -#, python-format -msgid "check_instance_lock: admin: |%s|" -msgstr "" - -#: nova/compute/manager.py:82 -#, python-format -msgid "check_instance_lock: executing: |%s|" -msgstr "" - -#: nova/compute/manager.py:86 -#, python-format -msgid "check_instance_lock: not executing |%s|" -msgstr "" - -#: nova/compute/manager.py:157 -msgid "Instance has already been created" -msgstr "" - -#: nova/compute/manager.py:158 -#, python-format -msgid "instance %s: starting..." -msgstr "" - -#: nova/compute/manager.py:197 -#, python-format -msgid "instance %s: Failed to spawn" -msgstr "" - -#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 -#, python-format -msgid "Terminating instance %s" -msgstr "" - -#: nova/compute/manager.py:217 -#, python-format -msgid "Disassociating address %s" -msgstr "" - -#: nova/compute/manager.py:230 -#, python-format -msgid "Deallocating address %s" -msgstr "" - -#: nova/compute/manager.py:243 -#, python-format -msgid "trying to destroy already destroyed instance: %s" -msgstr "" - -#: nova/compute/manager.py:257 -#, python-format -msgid "Rebooting instance %s" -msgstr "" - -#: nova/compute/manager.py:260 -#, python-format -msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" -msgstr "" - -#: nova/compute/manager.py:286 -#, python-format -msgid "instance %s: snapshotting" -msgstr "" - -#: nova/compute/manager.py:289 -#, python-format -msgid "trying to snapshot a non-running instance: %s (state: %s excepted: %s)" -msgstr "" - -#: nova/compute/manager.py:301 -#, python-format -msgid "instance %s: rescuing" -msgstr "" - -#: nova/compute/manager.py:316 -#, python-format -msgid "instance %s: unrescuing" -msgstr "" - -#: nova/compute/manager.py:335 -#, python-format -msgid "instance %s: pausing" -msgstr "" - -#: nova/compute/manager.py:352 -#, python-format -msgid "instance %s: unpausing" -msgstr "" - -#: nova/compute/manager.py:369 -#, python-format -msgid "instance %s: retrieving diagnostics" -msgstr "" - -#: nova/compute/manager.py:382 -#, python-format -msgid "instance %s: suspending" -msgstr "" - -#: nova/compute/manager.py:401 -#, python-format -msgid "instance %s: resuming" -msgstr "" - -#: nova/compute/manager.py:420 -#, python-format -msgid "instance %s: locking" -msgstr "" - -#: nova/compute/manager.py:432 -#, python-format -msgid "instance %s: unlocking" -msgstr "" - -#: nova/compute/manager.py:442 -#, python-format -msgid "instance %s: getting locked state" -msgstr "" - -#: nova/compute/manager.py:462 -#, python-format -msgid "instance %s: attaching volume %s to %s" -msgstr "" - -#: nova/compute/manager.py:478 -#, python-format -msgid "instance %s: attach failed %s, removing" -msgstr "" - -#: nova/compute/manager.py:493 -#, python-format -msgid "Detach volume %s from mountpoint %s on instance %s" -msgstr "" - -#: nova/compute/manager.py:497 -#, python-format -msgid "Detaching volume from unknown instance %s" -msgstr "" - -#: nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "" - -#: nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "" - -#: nova/compute/monitor.py:355 -#, python-format -msgid "Cannot get blockstats for \"%s\" on \"%s\"" -msgstr "" - -#: nova/compute/monitor.py:377 -#, python-format -msgid "Cannot get ifstats for \"%s\" on \"%s\"" -msgstr "" - -#: nova/compute/monitor.py:412 -msgid "unexpected exception getting connection" -msgstr "" - -#: nova/compute/monitor.py:427 -#, python-format -msgid "Found instance: %s" -msgstr "" - -#: nova/db/sqlalchemy/api.py:43 -msgid "Use of empty request context is deprecated" -msgstr "" - -#: nova/db/sqlalchemy/api.py:132 -#, python-format -msgid "No service for id %s" -msgstr "" - -#: nova/db/sqlalchemy/api.py:229 -#, python-format -msgid "No service for %s, %s" -msgstr "" - -#: nova/db/sqlalchemy/api.py:574 -#, python-format -msgid "No floating ip for address %s" -msgstr "" - -#: nova/db/sqlalchemy/api.py:668 -#, python-format -msgid "No instance for id %s" -msgstr "" - -#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 -#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 -#, python-format -msgid "Instance %s not found" -msgstr "" - -#: nova/db/sqlalchemy/api.py:891 -#, python-format -msgid "no keypair for user %s, name %s" -msgstr "" - -#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 -#, python-format -msgid "No network for id %s" -msgstr "" - -#: nova/db/sqlalchemy/api.py:1036 -#, python-format -msgid "No network for bridge %s" -msgstr "" - -#: nova/db/sqlalchemy/api.py:1050 -#, python-format -msgid "No network for instance %s" -msgstr "" - -#: nova/db/sqlalchemy/api.py:1180 -#, python-format -msgid "Token %s does not exist" -msgstr "" - -#: nova/db/sqlalchemy/api.py:1205 -#, python-format -msgid "No quota for project_id %s" -msgstr "" - -#: nova/db/sqlalchemy/api.py:1356 -#, python-format -msgid "No volume for id %s" -msgstr "" - -#: nova/db/sqlalchemy/api.py:1401 -#, python-format -msgid "Volume %s not found" -msgstr "" - -#: nova/db/sqlalchemy/api.py:1413 -#, python-format -msgid "No export device found for volume %s" -msgstr "" - -#: nova/db/sqlalchemy/api.py:1426 -#, python-format -msgid "No target id found for volume %s" -msgstr "" - -#: nova/db/sqlalchemy/api.py:1471 -#, python-format -msgid "No security group with id %s" -msgstr "" - -#: nova/db/sqlalchemy/api.py:1488 -#, python-format -msgid "No security group named %s for project: %s" -msgstr "" - -#: nova/db/sqlalchemy/api.py:1576 -#, python-format -msgid "No secuity group rule with id %s" -msgstr "" - -#: nova/db/sqlalchemy/api.py:1650 -#, python-format -msgid "No user for id %s" -msgstr "" - -#: nova/db/sqlalchemy/api.py:1666 -#, python-format -msgid "No user for access key %s" -msgstr "" - -#: nova/db/sqlalchemy/api.py:1728 -#, python-format -msgid "No project with id %s" -msgstr "" - -#: nova/image/glance.py:78 -#, python-format -msgid "Parallax returned HTTP error %d from request for /images" -msgstr "" - -#: nova/image/glance.py:97 -#, python-format -msgid "Parallax returned HTTP error %d from request for /images/detail" -msgstr "" - -#: nova/image/s3.py:82 -#, python-format -msgid "Image %s could not be found" -msgstr "" - -#: nova/network/api.py:39 -#, python-format -msgid "Quota exceeeded for %s, tried to allocate address" -msgstr "" - -#: nova/network/api.py:42 -msgid "Address quota exceeded. You cannot allocate any more addresses" -msgstr "" - -#: nova/network/linux_net.py:176 -#, python-format -msgid "Starting VLAN inteface %s" -msgstr "" - -#: nova/network/linux_net.py:186 -#, python-format -msgid "Starting Bridge interface for %s" -msgstr "" - -#: nova/network/linux_net.py:254 -#, python-format -msgid "Hupping dnsmasq threw %s" -msgstr "" - -#: nova/network/linux_net.py:256 -#, python-format -msgid "Pid %d is stale, relaunching dnsmasq" -msgstr "" - -#: nova/network/linux_net.py:334 -#, python-format -msgid "Killing dnsmasq threw %s" -msgstr "" - -#: nova/network/manager.py:135 -msgid "setting network host" -msgstr "" - -#: nova/network/manager.py:190 -#, python-format -msgid "Leasing IP %s" -msgstr "" - -#: nova/network/manager.py:194 -#, python-format -msgid "IP %s leased that isn't associated" -msgstr "" - -#: nova/network/manager.py:197 -#, python-format -msgid "IP %s leased to bad mac %s vs %s" -msgstr "" - -#: nova/network/manager.py:205 -#, python-format -msgid "IP %s leased that was already deallocated" -msgstr "" - -#: nova/network/manager.py:214 -#, python-format -msgid "IP %s released that isn't associated" -msgstr "" - -#: nova/network/manager.py:217 -#, python-format -msgid "IP %s released from bad mac %s vs %s" -msgstr "" - -#: nova/network/manager.py:220 -#, python-format -msgid "IP %s released that was not leased" -msgstr "" - -#: nova/network/manager.py:442 -#, python-format -msgid "Dissassociated %s stale fixed ip(s)" -msgstr "" - -#: nova/objectstore/handler.py:106 -#, python-format -msgid "Unknown S3 value type %r" -msgstr "" - -#: nova/objectstore/handler.py:137 -msgid "Authenticated request" -msgstr "" - -#: nova/objectstore/handler.py:182 -msgid "List of buckets requested" -msgstr "" - -#: nova/objectstore/handler.py:209 -#, python-format -msgid "List keys for bucket %s" -msgstr "" - -#: nova/objectstore/handler.py:217 -#, python-format -msgid "Unauthorized attempt to access bucket %s" -msgstr "" - -#: nova/objectstore/handler.py:235 -#, python-format -msgid "Creating bucket %s" -msgstr "" - -#: nova/objectstore/handler.py:245 -#, python-format -msgid "Deleting bucket %s" -msgstr "" - -#: nova/objectstore/handler.py:249 -#, python-format -msgid "Unauthorized attempt to delete bucket %s" -msgstr "" - -#: nova/objectstore/handler.py:271 -#, python-format -msgid "Getting object: %s / %s" -msgstr "" - -#: nova/objectstore/handler.py:274 -#, python-format -msgid "Unauthorized attempt to get object %s from bucket %s" -msgstr "" - -#: nova/objectstore/handler.py:292 -#, python-format -msgid "Putting object: %s / %s" -msgstr "" - -#: nova/objectstore/handler.py:295 -#, python-format -msgid "Unauthorized attempt to upload object %s to bucket %s" -msgstr "" - -#: nova/objectstore/handler.py:314 -#, python-format -msgid "Deleting object: %s / %s" -msgstr "" - -#: nova/objectstore/handler.py:393 -#, python-format -msgid "Not authorized to upload image: invalid directory %s" -msgstr "" - -#: nova/objectstore/handler.py:401 -#, python-format -msgid "Not authorized to upload image: unauthorized bucket %s" -msgstr "" - -#: nova/objectstore/handler.py:406 -#, python-format -msgid "Starting image upload: %s" -msgstr "" - -#: nova/objectstore/handler.py:420 -#, python-format -msgid "Not authorized to update attributes of image %s" -msgstr "" - -#: nova/objectstore/handler.py:428 -#, python-format -msgid "Toggling publicity flag of image %s %r" -msgstr "" - -#: nova/objectstore/handler.py:433 -#, python-format -msgid "Updating user fields on image %s" -msgstr "" - -#: nova/objectstore/handler.py:447 -#, python-format -msgid "Unauthorized attempt to delete image %s" -msgstr "" - -#: nova/objectstore/handler.py:452 -#, python-format -msgid "Deleted image: %s" -msgstr "" - -#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 -#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 -msgid "No hosts found" -msgstr "" - -#: nova/scheduler/driver.py:66 -msgid "Must implement a fallback schedule" -msgstr "" - -#: nova/scheduler/manager.py:69 -#, python-format -msgid "Casting to %s %s for %s" -msgstr "" - -#: nova/scheduler/simple.py:63 -msgid "All hosts have too many cores" -msgstr "" - -#: nova/scheduler/simple.py:95 -msgid "All hosts have too many gigabytes" -msgstr "" - -#: nova/scheduler/simple.py:115 -msgid "All hosts have too many networks" -msgstr "" - -#: nova/tests/test_cloud.py:198 -msgid "Can't test instances without a real virtual env." -msgstr "" - -#: nova/tests/test_cloud.py:210 -#, python-format -msgid "Need to watch instance %s until it's running..." -msgstr "" - -#: nova/tests/test_compute.py:104 -#, python-format -msgid "Running instances: %s" -msgstr "" - -#: nova/tests/test_compute.py:110 -#, python-format -msgid "After terminating instances: %s" -msgstr "" - -#: nova/tests/test_rpc.py:89 -#, python-format -msgid "Nested received %s, %s" -msgstr "" - -#: nova/tests/test_rpc.py:94 -#, python-format -msgid "Nested return %s" -msgstr "" - -#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 -#, python-format -msgid "Received %s" -msgstr "" - -#: nova/tests/test_volume.py:162 -#, python-format -msgid "Target %s allocated" -msgstr "" - -#: nova/virt/connection.py:73 -msgid "Failed to open connection to the hypervisor" -msgstr "" - -#: nova/virt/fake.py:210 -#, python-format -msgid "Instance %s Not Found" -msgstr "" - -#: nova/virt/hyperv.py:118 -msgid "In init host" -msgstr "" - -#: nova/virt/hyperv.py:131 -#, python-format -msgid "Attempt to create duplicate vm %s" -msgstr "" - -#: nova/virt/hyperv.py:148 -#, python-format -msgid "Starting VM %s " -msgstr "" - -#: nova/virt/hyperv.py:150 -#, python-format -msgid "Started VM %s " -msgstr "" - -#: nova/virt/hyperv.py:152 -#, python-format -msgid "spawn vm failed: %s" -msgstr "" - -#: nova/virt/hyperv.py:169 -#, python-format -msgid "Failed to create VM %s" -msgstr "" - -#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 -#, python-format -msgid "Created VM %s..." -msgstr "" - -#: nova/virt/hyperv.py:188 -#, python-format -msgid "Set memory for vm %s..." -msgstr "" - -#: nova/virt/hyperv.py:198 -#, python-format -msgid "Set vcpus for vm %s..." -msgstr "" - -#: nova/virt/hyperv.py:202 -#, python-format -msgid "Creating disk for %s by attaching disk file %s" -msgstr "" - -#: nova/virt/hyperv.py:227 -#, python-format -msgid "Failed to add diskdrive to VM %s" -msgstr "" - -#: nova/virt/hyperv.py:230 -#, python-format -msgid "New disk drive path is %s" -msgstr "" - -#: nova/virt/hyperv.py:247 -#, python-format -msgid "Failed to add vhd file to VM %s" -msgstr "" - -#: nova/virt/hyperv.py:249 -#, python-format -msgid "Created disk for %s" -msgstr "" - -#: nova/virt/hyperv.py:253 -#, python-format -msgid "Creating nic for %s " -msgstr "" - -#: nova/virt/hyperv.py:272 -msgid "Failed creating a port on the external vswitch" -msgstr "" - -#: nova/virt/hyperv.py:273 -#, python-format -msgid "Failed creating port for %s" -msgstr "" - -#: nova/virt/hyperv.py:275 -#, python-format -msgid "Created switch port %s on switch %s" -msgstr "" - -#: nova/virt/hyperv.py:285 -#, python-format -msgid "Failed to add nic to VM %s" -msgstr "" - -#: nova/virt/hyperv.py:287 -#, python-format -msgid "Created nic for %s " -msgstr "" - -#: nova/virt/hyperv.py:320 -#, python-format -msgid "WMI job failed: %s" -msgstr "" - -#: nova/virt/hyperv.py:322 -#, python-format -msgid "WMI job succeeded: %s, Elapsed=%s " -msgstr "" - -#: nova/virt/hyperv.py:358 -#, python-format -msgid "Got request to destroy vm %s" -msgstr "" - -#: nova/virt/hyperv.py:383 -#, python-format -msgid "Failed to destroy vm %s" -msgstr "" - -#: nova/virt/hyperv.py:389 -#, python-format -msgid "Del: disk %s vm %s" -msgstr "" - -#: nova/virt/hyperv.py:405 -#, python-format -msgid "" -"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " -"cpu_time=%s" -msgstr "" - -#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 -#, python-format -msgid "duplicate name found: %s" -msgstr "" - -#: nova/virt/hyperv.py:444 -#, python-format -msgid "Successfully changed vm state of %s to %s" -msgstr "" - -#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 -#, python-format -msgid "Failed to change vm state of %s to %s" -msgstr "" - -#: nova/virt/images.py:70 -#, python-format -msgid "Finished retreving %s -- placed in %s" -msgstr "" - -#: nova/virt/libvirt_conn.py:144 -#, python-format -msgid "Connecting to libvirt: %s" -msgstr "" - -#: nova/virt/libvirt_conn.py:157 -msgid "Connection to libvirt broke" -msgstr "" - -#: nova/virt/libvirt_conn.py:229 -#, python-format -msgid "instance %s: deleting instance files %s" -msgstr "" - -#: nova/virt/libvirt_conn.py:271 -#, python-format -msgid "No disk at %s" -msgstr "" - -#: nova/virt/libvirt_conn.py:278 -msgid "Instance snapshotting is not supported for libvirtat this time" -msgstr "" - -#: nova/virt/libvirt_conn.py:294 -#, python-format -msgid "instance %s: rebooted" -msgstr "" - -#: nova/virt/libvirt_conn.py:297 -#, python-format -msgid "_wait_for_reboot failed: %s" -msgstr "" - -#: nova/virt/libvirt_conn.py:340 -#, python-format -msgid "instance %s: rescued" -msgstr "" - -#: nova/virt/libvirt_conn.py:343 -#, python-format -msgid "_wait_for_rescue failed: %s" -msgstr "" - -#: nova/virt/libvirt_conn.py:370 -#, python-format -msgid "instance %s: is running" -msgstr "" - -#: nova/virt/libvirt_conn.py:381 -#, python-format -msgid "instance %s: booted" -msgstr "" - -#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 -#, python-format -msgid "instance %s: failed to boot" -msgstr "" - -#: nova/virt/libvirt_conn.py:395 -#, python-format -msgid "virsh said: %r" -msgstr "" - -#: nova/virt/libvirt_conn.py:399 -msgid "cool, it's a device" -msgstr "" - -#: nova/virt/libvirt_conn.py:407 -#, python-format -msgid "data: %r, fpath: %r" -msgstr "" - -#: nova/virt/libvirt_conn.py:415 -#, python-format -msgid "Contents of file %s: %r" -msgstr "" - -#: nova/virt/libvirt_conn.py:449 -#, python-format -msgid "instance %s: Creating image" -msgstr "" - -#: nova/virt/libvirt_conn.py:505 -#, python-format -msgid "instance %s: injecting key into image %s" -msgstr "" - -#: nova/virt/libvirt_conn.py:508 -#, python-format -msgid "instance %s: injecting net into image %s" -msgstr "" - -#: nova/virt/libvirt_conn.py:516 -#, python-format -msgid "instance %s: ignoring error injecting data into image %s (%s)" -msgstr "" - -#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 -#, python-format -msgid "instance %s: starting toXML method" -msgstr "" - -#: nova/virt/libvirt_conn.py:589 -#, python-format -msgid "instance %s: finished toXML method" -msgstr "" - -#: nova/virt/xenapi_conn.py:113 -msgid "" -"Must specify xenapi_connection_url, xenapi_connection_username " -"(optionally), and xenapi_connection_password to use " -"connection_type=xenapi" -msgstr "" - -#: nova/virt/xenapi_conn.py:263 -#, python-format -msgid "Task [%s] %s status: success %s" -msgstr "" - -#: nova/virt/xenapi_conn.py:271 -#, python-format -msgid "Task [%s] %s status: %s %s" -msgstr "" - -#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 -#, python-format -msgid "Got exception: %s" -msgstr "" - -#: nova/virt/xenapi/fake.py:72 -#, python-format -msgid "%s: _db_content => %s" -msgstr "" - -#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 -#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 -msgid "Raising NotImplemented" -msgstr "" - -#: nova/virt/xenapi/fake.py:249 -#, python-format -msgid "xenapi.fake does not have an implementation for %s" -msgstr "" - -#: nova/virt/xenapi/fake.py:283 -#, python-format -msgid "Calling %s %s" -msgstr "" - -#: nova/virt/xenapi/fake.py:288 -#, python-format -msgid "Calling getter %s" -msgstr "" - -#: nova/virt/xenapi/fake.py:340 -#, python-format -msgid "" -"xenapi.fake does not have an implementation for %s or it has been called " -"with the wrong number of arguments" -msgstr "" - -#: nova/virt/xenapi/network_utils.py:40 -#, python-format -msgid "Found non-unique network for bridge %s" -msgstr "" - -#: nova/virt/xenapi/network_utils.py:43 -#, python-format -msgid "Found no network for bridge %s" -msgstr "" - -#: nova/virt/xenapi/vm_utils.py:127 -#, python-format -msgid "Created VM %s as %s." -msgstr "" - -#: nova/virt/xenapi/vm_utils.py:147 -#, python-format -msgid "Creating VBD for VM %s, VDI %s ... " -msgstr "" - -#: nova/virt/xenapi/vm_utils.py:149 -#, python-format -msgid "Created VBD %s for VM %s, VDI %s." -msgstr "" - -#: nova/virt/xenapi/vm_utils.py:165 -#, python-format -msgid "VBD not found in instance %s" -msgstr "" - -#: nova/virt/xenapi/vm_utils.py:175 -#, python-format -msgid "Unable to unplug VBD %s" -msgstr "" - -#: nova/virt/xenapi/vm_utils.py:187 -#, python-format -msgid "Unable to destroy VBD %s" -msgstr "" - -#: nova/virt/xenapi/vm_utils.py:202 -#, python-format -msgid "Creating VIF for VM %s, network %s." -msgstr "" - -#: nova/virt/xenapi/vm_utils.py:205 -#, python-format -msgid "Created VIF %s for VM %s, network %s." -msgstr "" - -#: nova/virt/xenapi/vm_utils.py:216 -#, python-format -msgid "Snapshotting VM %s with label '%s'..." -msgstr "" - -#: nova/virt/xenapi/vm_utils.py:229 -#, python-format -msgid "Created snapshot %s from VM %s." -msgstr "" - -#: nova/virt/xenapi/vm_utils.py:243 -#, python-format -msgid "Asking xapi to upload %s as '%s'" -msgstr "" - -#: nova/virt/xenapi/vm_utils.py:261 -#, python-format -msgid "Asking xapi to fetch %s as %s" -msgstr "" - -#: nova/virt/xenapi/vm_utils.py:279 -#, python-format -msgid "Looking up vdi %s for PV kernel" -msgstr "" - -#: nova/virt/xenapi/vm_utils.py:290 -#, python-format -msgid "PV Kernel in VDI:%d" -msgstr "" - -#: nova/virt/xenapi/vm_utils.py:318 -#, python-format -msgid "VDI %s is still available" -msgstr "" - -#: nova/virt/xenapi/vm_utils.py:331 -#, python-format -msgid "(VM_UTILS) xenserver vm state -> |%s|" -msgstr "" - -#: nova/virt/xenapi/vm_utils.py:333 -#, python-format -msgid "(VM_UTILS) xenapi power_state -> |%s|" -msgstr "" - -#: nova/virt/xenapi/vm_utils.py:390 -#, python-format -msgid "VHD %s has parent %s" -msgstr "" - -#: nova/virt/xenapi/vm_utils.py:407 -#, python-format -msgid "Re-scanning SR %s" -msgstr "" - -#: nova/virt/xenapi/vm_utils.py:431 -#, python-format -msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." -msgstr "" - -#: nova/virt/xenapi/vm_utils.py:448 -#, python-format -msgid "No VDIs found for VM %s" -msgstr "" - -#: nova/virt/xenapi/vm_utils.py:452 -#, python-format -msgid "Unexpected number of VDIs (%s) found for VM %s" -msgstr "" - -#: nova/virt/xenapi/vmops.py:62 -#, python-format -msgid "Attempted to create non-unique name %s" -msgstr "" - -#: nova/virt/xenapi/vmops.py:99 -#, python-format -msgid "Starting VM %s..." -msgstr "" - -#: nova/virt/xenapi/vmops.py:101 -#, python-format -msgid "Spawning VM %s created %s." -msgstr "" - -#: nova/virt/xenapi/vmops.py:112 -#, python-format -msgid "Instance %s: booted" -msgstr "" - -#: nova/virt/xenapi/vmops.py:137 -#, python-format -msgid "Instance not present %s" -msgstr "" - -#: nova/virt/xenapi/vmops.py:166 -#, python-format -msgid "Starting snapshot for VM %s" -msgstr "" - -#: nova/virt/xenapi/vmops.py:174 -#, python-format -msgid "Unable to Snapshot %s: %s" -msgstr "" - -#: nova/virt/xenapi/vmops.py:184 -#, python-format -msgid "Finished snapshot and upload for VM %s" -msgstr "" - -#: nova/virt/xenapi/vmops.py:252 -#, python-format -msgid "suspend: instance not present %s" -msgstr "" - -#: nova/virt/xenapi/vmops.py:262 -#, python-format -msgid "resume: instance not present %s" -msgstr "" - -#: nova/virt/xenapi/vmops.py:271 -#, python-format -msgid "Instance not found %s" -msgstr "" - -#: nova/virt/xenapi/volume_utils.py:57 -#, python-format -msgid "Introducing %s..." -msgstr "" - -#: nova/virt/xenapi/volume_utils.py:74 -#, python-format -msgid "Introduced %s as %s." -msgstr "" - -#: nova/virt/xenapi/volume_utils.py:78 -msgid "Unable to create Storage Repository" -msgstr "" - -#: nova/virt/xenapi/volume_utils.py:90 -#, python-format -msgid "Unable to find SR from VBD %s" -msgstr "" - -#: nova/virt/xenapi/volume_utils.py:96 -#, python-format -msgid "Forgetting SR %s ... " -msgstr "" - -#: nova/virt/xenapi/volume_utils.py:101 -#, python-format -msgid "Ignoring exception %s when getting PBDs for %s" -msgstr "" - -#: nova/virt/xenapi/volume_utils.py:107 -#, python-format -msgid "Ignoring exception %s when unplugging PBD %s" -msgstr "" - -#: nova/virt/xenapi/volume_utils.py:111 -#, python-format -msgid "Forgetting SR %s done." -msgstr "" - -#: nova/virt/xenapi/volume_utils.py:113 -#, python-format -msgid "Ignoring exception %s when forgetting SR %s" -msgstr "" - -#: nova/virt/xenapi/volume_utils.py:123 -#, python-format -msgid "Unable to introduce VDI on SR %s" -msgstr "" - -#: nova/virt/xenapi/volume_utils.py:128 -#, python-format -msgid "Unable to get record of VDI %s on" -msgstr "" - -#: nova/virt/xenapi/volume_utils.py:146 -#, python-format -msgid "Unable to introduce VDI for SR %s" -msgstr "" - -#: nova/virt/xenapi/volume_utils.py:175 -#, python-format -msgid "Unable to obtain target information %s, %s" -msgstr "" - -#: nova/virt/xenapi/volume_utils.py:197 -#, python-format -msgid "Mountpoint cannot be translated: %s" -msgstr "" - -#: nova/virt/xenapi/volumeops.py:51 -#, python-format -msgid "Attach_volume: %s, %s, %s" -msgstr "" - -#: nova/virt/xenapi/volumeops.py:69 -#, python-format -msgid "Unable to create VDI on SR %s for instance %s" -msgstr "" - -#: nova/virt/xenapi/volumeops.py:81 -#, python-format -msgid "Unable to use SR %s for instance %s" -msgstr "" - -#: nova/virt/xenapi/volumeops.py:93 -#, python-format -msgid "Unable to attach volume to instance %s" -msgstr "" - -#: nova/virt/xenapi/volumeops.py:95 -#, python-format -msgid "Mountpoint %s attached to instance %s" -msgstr "" - -#: nova/virt/xenapi/volumeops.py:106 -#, python-format -msgid "Detach_volume: %s, %s" -msgstr "" - -#: nova/virt/xenapi/volumeops.py:113 -#, python-format -msgid "Unable to locate volume %s" -msgstr "" - -#: nova/virt/xenapi/volumeops.py:121 -#, python-format -msgid "Unable to detach volume %s" -msgstr "" - -#: nova/virt/xenapi/volumeops.py:128 -#, python-format -msgid "Mountpoint %s detached from instance %s" -msgstr "" - -#: nova/volume/api.py:44 -#, python-format -msgid "Quota exceeeded for %s, tried to create %sG volume" -msgstr "" - -#: nova/volume/api.py:46 -#, python-format -msgid "Volume quota exceeded. You cannot create a volume of size %s" -msgstr "" - -#: nova/volume/api.py:70 nova/volume/api.py:95 -msgid "Volume status must be available" -msgstr "" - -#: nova/volume/api.py:97 -msgid "Volume is already attached" -msgstr "" - -#: nova/volume/api.py:103 -msgid "Volume is already detached" -msgstr "" - -#: nova/volume/driver.py:76 -#, python-format -msgid "Recovering from a failed execute. Try number %s" -msgstr "" - -#: nova/volume/driver.py:85 -#, python-format -msgid "volume group %s doesn't exist" -msgstr "" - -#: nova/volume/driver.py:210 -#, python-format -msgid "FAKE AOE: %s" -msgstr "" - -#: nova/volume/driver.py:315 -#, python-format -msgid "FAKE ISCSI: %s" -msgstr "" - -#: nova/volume/manager.py:85 -#, python-format -msgid "Re-exporting %s volumes" -msgstr "" - -#: nova/volume/manager.py:93 -#, python-format -msgid "volume %s: creating" -msgstr "" - -#: nova/volume/manager.py:102 -#, python-format -msgid "volume %s: creating lv of size %sG" -msgstr "" - -#: nova/volume/manager.py:106 -#, python-format -msgid "volume %s: creating export" -msgstr "" - -#: nova/volume/manager.py:113 -#, python-format -msgid "volume %s: created successfully" -msgstr "" - -#: nova/volume/manager.py:121 -msgid "Volume is still attached" -msgstr "" - -#: nova/volume/manager.py:123 -msgid "Volume is not local to this node" -msgstr "" - -#: nova/volume/manager.py:124 -#, python-format -msgid "volume %s: removing export" -msgstr "" - -#: nova/volume/manager.py:126 -#, python-format -msgid "volume %s: deleting" -msgstr "" - -#: nova/volume/manager.py:129 -#, python-format -msgid "volume %s: deleted successfully" -msgstr "" - diff --git a/po/nova.pot b/po/nova.pot new file mode 100644 index 000000000..576621ce9 --- /dev/null +++ b/po/nova.pot @@ -0,0 +1,2705 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2011-02-09 09:26-0800\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"Language: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=CHARSET\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../nova/scheduler/chance.py:37 ../nova/scheduler/simple.py:75 +#: ../nova/scheduler/simple.py:110 ../nova/scheduler/simple.py:122 +#: ../nova/scheduler/zone.py:55 +msgid "No hosts found" +msgstr "" + +#: ../nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "" + +#: ../nova/exception.py:36 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: ../nova/exception.py:107 +msgid "DB exception wrapped" +msgstr "" + +#. exc_type, exc_value, exc_traceback = sys.exc_info() +#: ../nova/exception.py:120 +msgid "Uncaught exception" +msgstr "" + +#: ../nova/volume/api.py:45 +#, python-format +msgid "Quota exceeeded for %(pid)s, tried to create %(size)sG volume" +msgstr "" + +#: ../nova/volume/api.py:47 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %sG" +msgstr "" + +#: ../nova/volume/api.py:71 ../nova/volume/api.py:96 +msgid "Volume status must be available" +msgstr "" + +#: ../nova/volume/api.py:98 +msgid "Volume is already attached" +msgstr "" + +#: ../nova/volume/api.py:104 +msgid "Volume is already detached" +msgstr "" + +#: ../nova/virt/fake.py:224 +#, python-format +msgid "Instance %s Not Found" +msgstr "" + +#: ../nova/api/openstack/servers.py:138 +#, python-format +msgid "%(param)s property not found for image %(_image_id)s" +msgstr "" + +#: ../nova/api/openstack/servers.py:219 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:234 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:248 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:259 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:270 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:281 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:292 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: ../nova/twistd.py:159 +msgid "Wrong number of arguments." +msgstr "" + +#: ../nova/twistd.py:211 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "" + +#: ../nova/twistd.py:223 +msgid "No such process" +msgstr "" + +#: ../nova/twistd.py:232 ../nova/service.py:224 +#, python-format +msgid "Serving %s" +msgstr "" + +#: ../nova/twistd.py:264 ../nova/service.py:225 +msgid "Full set of FLAGS:" +msgstr "" + +#: ../nova/twistd.py:268 +#, python-format +msgid "Starting %s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 +#: ../nova/db/sqlalchemy/api.py:709 ../nova/virt/libvirt_conn.py:741 +#: ../nova/api/ec2/__init__.py:322 +#, python-format +msgid "Instance %s not found" +msgstr "" + +#. NOTE: No Resource Pool concept so far +#: ../nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %(instance_name)s, %(device_path)s, %(mountpoint)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:80 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:91 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "" + +#. Detach VBD from VM +#: ../nova/virt/xenapi/volumeops.py:104 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:112 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:120 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:127 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "" + +#: ../nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "" + +#: ../nova/crypto.py:46 +msgid "Filename of root CA" +msgstr "" + +#: ../nova/crypto.py:49 +msgid "Filename of private key" +msgstr "" + +#: ../nova/crypto.py:51 +msgid "Filename of root Certificate Revokation List" +msgstr "" + +#: ../nova/crypto.py:53 +msgid "Where we keep our keys" +msgstr "" + +#: ../nova/crypto.py:55 +msgid "Where we keep our root CA" +msgstr "" + +#: ../nova/crypto.py:57 +msgid "Should we use a CA for each project?" +msgstr "" + +#: ../nova/crypto.py:61 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" + +#: ../nova/crypto.py:66 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "" + +#: ../nova/crypto.py:71 +#, python-format +msgid "Subject for certificate for vpns, %s for project, timestamp" +msgstr "" + +#: ../nova/crypto.py:258 +#, python-format +msgid "Flags path: %s" +msgstr "" + +#: ../nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %(topic)s %(host)s for %(method)s" +msgstr "" + +#: ../nova/compute/manager.py:77 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: ../nova/compute/manager.py:79 +#, python-format +msgid "" +"check_instance_lock: arguments: |%(self)s| |%(context)s| |%(instance_id)s|" +msgstr "" + +#: ../nova/compute/manager.py:83 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: ../nova/compute/manager.py:85 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: ../nova/compute/manager.py:90 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: ../nova/compute/manager.py:94 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: ../nova/compute/manager.py:177 +msgid "Instance has already been created" +msgstr "" + +#: ../nova/compute/manager.py:178 +#, python-format +msgid "instance %s: starting..." +msgstr "" + +#. pylint: disable-msg=W0702 +#: ../nova/compute/manager.py:217 +#, python-format +msgid "instance %s: Failed to spawn" +msgstr "" + +#: ../nova/compute/manager.py:231 ../nova/tests/test_cloud.py:286 +#, python-format +msgid "Terminating instance %s" +msgstr "" + +#: ../nova/compute/manager.py:253 +#, python-format +msgid "Deallocating address %s" +msgstr "" + +#: ../nova/compute/manager.py:266 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "" + +#: ../nova/compute/manager.py:280 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: ../nova/compute/manager.py:285 +#, python-format +msgid "" +"trying to reboot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" +msgstr "" + +#: ../nova/compute/manager.py:309 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: ../nova/compute/manager.py:314 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" +msgstr "" + +#: ../nova/compute/manager.py:355 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: ../nova/compute/manager.py:370 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: ../nova/compute/manager.py:389 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: ../nova/compute/manager.py:406 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: ../nova/compute/manager.py:423 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: ../nova/compute/manager.py:436 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: ../nova/compute/manager.py:455 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: ../nova/compute/manager.py:474 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: ../nova/compute/manager.py:486 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: ../nova/compute/manager.py:496 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: ../nova/compute/manager.py:506 ../nova/api/ec2/cloud.py:513 +#, python-format +msgid "Get console output for instance %s" +msgstr "" + +#: ../nova/compute/manager.py:514 +#, python-format +msgid "instance %s: getting ajax console" +msgstr "" + +#: ../nova/compute/manager.py:524 +#, python-format +msgid "" +"instance %(instance_id)s: attaching volume %(volume_id)s to %(mountpoint)s" +msgstr "" + +#. pylint: disable-msg=W0702 +#. NOTE(vish): The inline callback eats the exception info so we +#. log the traceback here and reraise the same +#. ecxception below. +#: ../nova/compute/manager.py:540 +#, python-format +msgid "instance %(instance_id)s: attach failed %(mountpoint)s, removing" +msgstr "" + +#: ../nova/compute/manager.py:556 +#, python-format +msgid "" +"Detach volume %(volume_id)s from mountpoint %(mp)s on instance " +"%(instance_id)s" +msgstr "" + +#: ../nova/compute/manager.py:559 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: ../nova/scheduler/simple.py:53 +#, python-format +msgid "Host %s is not alive" +msgstr "" + +#: ../nova/scheduler/simple.py:65 +msgid "All hosts have too many cores" +msgstr "" + +#: ../nova/scheduler/simple.py:87 +#, python-format +msgid "Host %s not available" +msgstr "" + +#: ../nova/scheduler/simple.py:99 +msgid "All hosts have too many gigabytes" +msgstr "" + +#: ../nova/scheduler/simple.py:119 +msgid "All hosts have too many networks" +msgstr "" + +#: ../nova/volume/manager.py:85 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: ../nova/volume/manager.py:90 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: ../nova/volume/manager.py:96 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: ../nova/volume/manager.py:108 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: ../nova/volume/manager.py:112 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: ../nova/volume/manager.py:123 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: ../nova/volume/manager.py:131 +msgid "Volume is still attached" +msgstr "" + +#: ../nova/volume/manager.py:133 +msgid "Volume is not local to this node" +msgstr "" + +#: ../nova/volume/manager.py:136 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: ../nova/volume/manager.py:138 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: ../nova/volume/manager.py:147 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:74 +#, python-format +msgid "%(text)s: _db_content => %(content)s" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:300 ../nova/virt/xenapi/fake.py:400 +#: ../nova/virt/xenapi/fake.py:418 ../nova/virt/xenapi/fake.py:474 +msgid "Raising NotImplemented" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:302 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:337 +#, python-format +msgid "Calling %(localname)s %(impl)s" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:342 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:402 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: ../nova/tests/test_cloud.py:256 +msgid "Can't test instances without a real virtual env." +msgstr "" + +#: ../nova/tests/test_cloud.py:268 +#, python-format +msgid "Need to watch instance %s until it's running..." +msgstr "" + +#: ../nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: ../nova/network/linux_net.py:181 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: ../nova/network/linux_net.py:202 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#. pylint: disable-msg=W0703 +#: ../nova/network/linux_net.py:308 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: ../nova/network/linux_net.py:310 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#. pylint: disable-msg=W0703 +#: ../nova/network/linux_net.py:352 +#, python-format +msgid "killing radvd threw %s" +msgstr "" + +#: ../nova/network/linux_net.py:354 +#, python-format +msgid "Pid %d is stale, relaunching radvd" +msgstr "" + +#. pylint: disable-msg=W0703 +#: ../nova/network/linux_net.py:443 +#, python-format +msgid "Killing dnsmasq threw %s" +msgstr "" + +#: ../nova/utils.py:56 +#, python-format +msgid "Inner Exception: %s" +msgstr "" + +#: ../nova/utils.py:57 +#, python-format +msgid "Class %s cannot be found" +msgstr "" + +#: ../nova/utils.py:116 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: ../nova/utils.py:128 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: ../nova/utils.py:141 +#, python-format +msgid "Result was %s" +msgstr "" + +#: ../nova/utils.py:179 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: ../nova/utils.py:184 +#, python-format +msgid "Running %s" +msgstr "" + +#: ../nova/utils.py:215 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: ../nova/utils.py:218 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: ../nova/utils.py:316 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: ../nova/utils.py:327 +#, python-format +msgid "backend %s" +msgstr "" + +#: ../nova/fakerabbit.py:49 +#, python-format +msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +msgstr "" + +#: ../nova/fakerabbit.py:54 +#, python-format +msgid "Publishing to route %s" +msgstr "" + +#: ../nova/fakerabbit.py:84 +#, python-format +msgid "Declaring queue %s" +msgstr "" + +#: ../nova/fakerabbit.py:90 +#, python-format +msgid "Declaring exchange %s" +msgstr "" + +#: ../nova/fakerabbit.py:96 +#, python-format +msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +msgstr "" + +#: ../nova/fakerabbit.py:121 +#, python-format +msgid "Getting from %(queue)s: %(message)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:135 ../nova/virt/hyperv.py:171 +#, python-format +msgid "Created VM %s..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:138 +#, python-format +msgid "Created VM %(instance_name)s as %(vm_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:158 +#, python-format +msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:161 +#, python-format +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:177 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:199 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:214 +#, python-format +msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:217 +#, python-format +msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:236 +#, python-format +msgid "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) on " +"%(sr_ref)s." +msgstr "" + +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vm_utils.py:248 +#, python-format +msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:262 +#, python-format +msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:276 +#, python-format +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:317 +#, python-format +msgid "Size for image %(image)s:%(virtual_size)d" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:322 +#, python-format +msgid "Glance image %s" +msgstr "" + +#. we need to invoke a plugin for copying VDI's +#. content into proper path +#: ../nova/virt/xenapi/vm_utils.py:332 +#, python-format +msgid "Copying VDI %s to /boot/guest on dom0" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:342 +#, python-format +msgid "Kernel/Ramdisk VDI %s destroyed" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:351 +#, python-format +msgid "Asking xapi to fetch %(url)s as %(access)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:376 ../nova/virt/xenapi/vm_utils.py:392 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:387 +#, python-format +msgid "PV Kernel in VDI:%d" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:395 +#, python-format +msgid "Running pygrub against %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:401 +#, python-format +msgid "Found Xen kernel %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:403 +msgid "No Xen kernel found. Booting HVM." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:415 ../nova/virt/hyperv.py:431 +#, python-format +msgid "duplicate name found: %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:432 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:445 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:447 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:507 +#, python-format +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:524 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:549 +#, python-format +msgid "" +"VHD coalesce attempts exceeded (%(counter)d > %(max_attempts)d), giving up..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:556 +#, python-format +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:572 +#, python-format +msgid "No VDIs found for VM %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:576 +#, python-format +msgid "Unexpected number of VDIs (%(num_vdis)s) found for VM %(vm_ref)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:635 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:188 +#, python-format +msgid "Creating VBD for VDI %s ... " +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:637 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:190 +#, python-format +msgid "Creating VBD for VDI %s done." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:639 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:192 +#, python-format +msgid "Plugging VBD %s ... " +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:641 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:194 +#, python-format +msgid "Plugging VBD %s done." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:643 +#, python-format +msgid "VBD %(vbd)s plugged as %(orig_dev)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:646 +#, python-format +msgid "VBD %(vbd)s plugged into wrong dev, remapping to %(dev)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:650 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:197 +#, python-format +msgid "Destroying VBD for VDI %s ... " +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:653 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:200 +#, python-format +msgid "Destroying VBD for VDI %s done." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:665 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:211 +msgid "VBD.unplug successful first time." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:670 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:216 +msgid "VBD.unplug rejected: retrying..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:674 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:220 +msgid "VBD.unplug successful eventually." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:677 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:223 +#, python-format +msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:686 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:66 +#, python-format +msgid "Ignoring XenAPI.Failure %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:717 +#, python-format +msgid "" +"Writing partition table %(primary_first)d %(primary_last)d to %(dest)s..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:729 +#, python-format +msgid "Writing partition table %s done." +msgstr "" + +#: ../nova/tests/test_rpc.py:89 +#, python-format +msgid "Nested received %(queue)s, %(value)s" +msgstr "" + +#: ../nova/tests/test_rpc.py:95 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: ../nova/tests/test_rpc.py:120 ../nova/tests/test_rpc.py:126 +#, python-format +msgid "Received %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:44 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:133 +#, python-format +msgid "No service for id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:251 +#, python-format +msgid "No service for %(host)s, %(binary)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:597 +#, python-format +msgid "No floating ip for address %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:933 +#, python-format +msgid "no keypair for user %(user_id)s, name %(name)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1048 ../nova/db/sqlalchemy/api.py:1106 +#, python-format +msgid "No network for id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1078 +#, python-format +msgid "No network for bridge %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1092 +#, python-format +msgid "No network for instance %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1227 +#, python-format +msgid "Token %s does not exist" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1252 +#, python-format +msgid "No quota for project_id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1405 ../nova/db/sqlalchemy/api.py:1451 +#: ../nova/api/ec2/__init__.py:328 +#, python-format +msgid "Volume %s not found" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1464 +#, python-format +msgid "No export device found for volume %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1477 +#, python-format +msgid "No target id found for volume %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1522 +#, python-format +msgid "No security group with id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1539 +#, python-format +msgid "No security group named %(group_name)s for project: %(project_id)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1632 +#, python-format +msgid "No secuity group rule with id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1706 +#, python-format +msgid "No user for id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1722 +#, python-format +msgid "No user for access key %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1784 +#, python-format +msgid "No project with id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1929 +#, python-format +msgid "No console pool with id %(pool_id)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1946 +#, python-format +msgid "" +"No console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1985 +#, python-format +msgid "No console for instance %(instance_id)s in pool %(pool_id)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:2007 +#, python-format +msgid "on instance %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:2008 +#, python-format +msgid "No console with id %(console_id)s %(idesc)s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:160 +#, python-format +msgid "Checking state of %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:165 +#, python-format +msgid "Current state of %(name)s was %(state)s." +msgstr "" + +#: ../nova/virt/libvirt_conn.py:183 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:196 +msgid "Connection to libvirt broke" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:258 +#, python-format +msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:283 +#, python-format +msgid "Invalid device path %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:313 +#, python-format +msgid "No disk at %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:320 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:336 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:339 +#, python-format +msgid "_wait_for_reboot failed: %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:382 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:385 +#, python-format +msgid "_wait_for_rescue failed: %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:411 +#, python-format +msgid "instance %s: is running" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:422 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:425 ../nova/virt/xenapi/vmops.py:124 +#, python-format +msgid "instance %s: failed to boot" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:436 +#, python-format +msgid "virsh said: %r" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:440 +msgid "cool, it's a device" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:448 +#, python-format +msgid "data: %(data)r, fpath: %(fpath)r" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:456 +#, python-format +msgid "Contents of file %(fpath)s: %(contents)r" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:489 +msgid "Unable to find an open port" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:563 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:646 +#, python-format +msgid "instance %(inst_name)s: injecting key into image %(img_id)s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:649 +#, python-format +msgid "instance %(inst_name)s: injecting net into image %(img_id)s" +msgstr "" + +#. This could be a windows image, or a vmdk format disk +#: ../nova/virt/libvirt_conn.py:657 +#, python-format +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image %(img_id)s " +"(%(e)s)" +msgstr "" + +#. TODO(termie): cache? +#: ../nova/virt/libvirt_conn.py:665 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:732 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:751 +msgid "diagnostics are not supported for libvirt" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:1225 +#, python-format +msgid "Attempted to unfilter instance %s which is not filtered" +msgstr "" + +#: ../nova/api/ec2/metadatarequesthandler.py:76 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: ../nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: ../nova/network/api.py:39 +#, python-format +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "" + +#: ../nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" + +#: ../nova/tests/test_volume.py:162 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: ../nova/virt/images.py:70 +#, python-format +msgid "Finished retreving %(url)s -- placed in %(path)s" +msgstr "" + +#: ../nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "" + +#: ../nova/console/manager.py:70 +msgid "Adding console" +msgstr "" + +#: ../nova/console/manager.py:90 +#, python-format +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "" + +#: ../nova/api/direct.py:149 +msgid "not available" +msgstr "" + +#: ../nova/api/ec2/cloud.py:62 +#, python-format +msgid "The key_pair %s already exists" +msgstr "" + +#. TODO(vish): Do this with M2Crypto instead +#: ../nova/api/ec2/cloud.py:118 +#, python-format +msgid "Generating root CA: %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:303 +#, python-format +msgid "Create key pair %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:311 +#, python-format +msgid "Delete key pair %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:384 +#, python-format +msgid "%s is not a valid ipProtocol" +msgstr "" + +#: ../nova/api/ec2/cloud.py:388 +msgid "Invalid port range" +msgstr "" + +#: ../nova/api/ec2/cloud.py:419 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:428 ../nova/api/ec2/cloud.py:457 +msgid "Not enough parameters to build a valid rule." +msgstr "" + +#: ../nova/api/ec2/cloud.py:441 +msgid "No rule for the specified parameters." +msgstr "" + +#: ../nova/api/ec2/cloud.py:448 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:462 +#, python-format +msgid "This rule already exists in group %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:490 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:493 +#, python-format +msgid "group %s already exists" +msgstr "" + +#: ../nova/api/ec2/cloud.py:505 +#, python-format +msgid "Delete security group %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:582 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: ../nova/api/ec2/cloud.py:610 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:627 +#, python-format +msgid "Detach volume %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:759 +msgid "Allocate address" +msgstr "" + +#: ../nova/api/ec2/cloud.py:764 +#, python-format +msgid "Release address %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:769 +#, python-format +msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:778 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:805 +msgid "Going to start terminating instances" +msgstr "" + +#: ../nova/api/ec2/cloud.py:813 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: ../nova/api/ec2/cloud.py:850 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:858 +#, python-format +msgid "Registered image %(image_location)s with id %(image_id)s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:865 ../nova/api/ec2/cloud.py:880 +#, python-format +msgid "attribute not supported: %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:870 +#, python-format +msgid "invalid id: %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:883 +msgid "user or group not specified" +msgstr "" + +#: ../nova/api/ec2/cloud.py:885 +msgid "only group \"all\" is supported" +msgstr "" + +#: ../nova/api/ec2/cloud.py:887 +msgid "operation_type must be add or remove" +msgstr "" + +#: ../nova/api/ec2/cloud.py:888 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: ../bin/nova-api.py:52 +#, python-format +msgid "Using paste.deploy config at: %s" +msgstr "" + +#: ../bin/nova-api.py:57 +#, python-format +msgid "No paste configuration for app: %s" +msgstr "" + +#: ../bin/nova-api.py:59 +#, python-format +msgid "" +"App Config: %(api)s\n" +"%(config)r" +msgstr "" + +#: ../bin/nova-api.py:64 +#, python-format +msgid "Running %s API" +msgstr "" + +#: ../bin/nova-api.py:69 +#, python-format +msgid "No known API applications configured in %s." +msgstr "" + +#: ../bin/nova-api.py:83 +#, python-format +msgid "Starting nova-api node (version %s)" +msgstr "" + +#: ../bin/nova-api.py:89 +#, python-format +msgid "No paste configuration found for: %s" +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:84 +#, python-format +msgid "Argument %(key)s value %(value)s is too short." +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:89 +#, python-format +msgid "Argument %(key)s value %(value)s contains invalid characters." +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:94 +#, python-format +msgid "Argument %(key)s value %(value)s starts with a hyphen." +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:102 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:130 +#, python-format +msgid "Argument %s is required." +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:117 +#, python-format +msgid "" +"Argument %(key)s may not take value %(value)s. Valid values are ['true', " +"'false']." +msgstr "" + +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:163 +#, python-format +msgid "" +"Created VDI %(vdi_ref)s (%(label)s, %(size)s, %(read_only)s) on %(sr_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:67 +#, python-format +msgid "Attempted to create non-unique name %s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:105 +#, python-format +msgid "Starting VM %s..." +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:108 +#, python-format +msgid "Spawning VM %(instance_name)s created %(vm_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:120 +#, python-format +msgid "Instance %s: booted" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:164 +#, python-format +msgid "Instance not present %s" +msgstr "" + +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vmops.py:193 +#, python-format +msgid "Starting snapshot for VM %s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:201 +#, python-format +msgid "Unable to Snapshot %(vm_ref)s: %(exc)s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:212 +#, python-format +msgid "Finished snapshot and upload for VM %s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:262 +#, python-format +msgid "VM %(vm)s already halted, skipping shutdown..." +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:444 +#, python-format +msgid "" +"TIMEOUT: The call to %(method)s timed out. VM id=%(instance_id)s; args=" +"%(strargs)s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:447 +#, python-format +msgid "" +"The call to %(method)s returned an error: %(e)s. VM id=%(instance_id)s; args=" +"%(strargs)s" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:638 +#, python-format +msgid "OpenSSL error: %s" +msgstr "" + +#: ../nova/tests/test_compute.py:148 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: ../nova/tests/test_compute.py:154 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:97 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: ../nova/image/s3.py:89 +#, python-format +msgid "Image %s could not be found" +msgstr "" + +#: ../nova/api/ec2/__init__.py:126 +msgid "Too many failed authentications." +msgstr "" + +#: ../nova/api/ec2/__init__.py:136 +#, python-format +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and " +"will be locked out for %(lock_mins)d minutes." +msgstr "" + +#: ../nova/api/ec2/__init__.py:174 ../nova/objectstore/handler.py:140 +#, python-format +msgid "Authentication Failure: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:187 +#, python-format +msgid "Authenticated Request For %(uname)s:%(pname)s)" +msgstr "" + +#: ../nova/api/ec2/__init__.py:212 +#, python-format +msgid "action: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:214 +#, python-format +msgid "arg: %(key)s\t\tval: %(value)s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:286 +#, python-format +msgid "" +"Unauthorized request for controller=%(controller)s and action=%(action)s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:319 +#, python-format +msgid "InstanceNotFound raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:325 +#, python-format +msgid "VolumeNotFound raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:331 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:334 +#, python-format +msgid "ApiError raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:343 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:348 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: ../nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" +msgstr "" + +#: ../nova/auth/dbdriver.py:106 ../nova/auth/ldapdriver.py:206 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "" + +#: ../nova/auth/dbdriver.py:122 ../nova/auth/ldapdriver.py:217 +#, python-format +msgid "Project can't be created because user %s doesn't exist" +msgstr "" + +#: ../nova/auth/dbdriver.py:135 ../nova/auth/ldapdriver.py:203 +#, python-format +msgid "Project can't be created because project %s already exists" +msgstr "" + +#: ../nova/auth/dbdriver.py:157 ../nova/auth/ldapdriver.py:241 +#, python-format +msgid "Project can't be modified because manager %s doesn't exist" +msgstr "" + +#: ../nova/auth/dbdriver.py:245 +#, python-format +msgid "User \"%s\" not found" +msgstr "" + +#: ../nova/auth/dbdriver.py:248 +#, python-format +msgid "Project \"%s\" not found" +msgstr "" + +#: ../nova/virt/xenapi_conn.py:129 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" +msgstr "" + +#: ../nova/virt/xenapi_conn.py:301 +#, python-format +msgid "Task [%(name)s] %(task)s status: success %(result)s" +msgstr "" + +#: ../nova/virt/xenapi_conn.py:307 +#, python-format +msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +msgstr "" + +#: ../nova/virt/xenapi_conn.py:321 ../nova/virt/xenapi_conn.py:334 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: ../nova/compute/monitor.py:259 +#, python-format +msgid "updating %s..." +msgstr "" + +#: ../nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "" + +#: ../nova/compute/monitor.py:356 +#, python-format +msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" +msgstr "" + +#: ../nova/compute/monitor.py:379 +#, python-format +msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" +msgstr "" + +#: ../nova/compute/monitor.py:414 +msgid "unexpected exception getting connection" +msgstr "" + +#: ../nova/compute/monitor.py:429 +#, python-format +msgid "Found instance: %s" +msgstr "" + +#: ../nova/api/ec2/apirequest.py:99 +#, python-format +msgid "" +"Unsupported API request: controller = %(controller)s, action = %(action)s" +msgstr "" + +#: ../nova/api/openstack/__init__.py:54 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: ../nova/api/openstack/__init__.py:75 +msgid "Including admin operations in API." +msgstr "" + +#: ../nova/console/xvp.py:99 +msgid "Rebuilding xvp conf" +msgstr "" + +#: ../nova/console/xvp.py:116 +#, python-format +msgid "Re-wrote %s" +msgstr "" + +#: ../nova/console/xvp.py:121 +msgid "Stopping xvp" +msgstr "" + +#: ../nova/console/xvp.py:134 +msgid "Starting xvp" +msgstr "" + +#: ../nova/console/xvp.py:141 +#, python-format +msgid "Error starting xvp: %s" +msgstr "" + +#: ../nova/console/xvp.py:144 +msgid "Restarting xvp" +msgstr "" + +#: ../nova/console/xvp.py:146 +msgid "xvp not running..." +msgstr "" + +#: ../bin/nova-manage.py:272 +msgid "" +"The above error may show that the database has not been created.\n" +"Please create a database using nova-manage sync db before running this " +"command." +msgstr "" + +#: ../bin/nova-manage.py:426 +msgid "" +"No more networks available. If this is a new installation, you need\n" +"to call something like this:\n" +"\n" +" nova-manage network create 10.0.0.0/8 10 64\n" +"\n" +msgstr "" + +#: ../bin/nova-manage.py:431 +msgid "" +"The above error may show that the certificate db has not been created.\n" +"Please create a database by running a nova-api server on this host." +msgstr "" + +#: ../nova/virt/disk.py:69 +#, python-format +msgid "Failed to load partition: %s" +msgstr "" + +#: ../nova/virt/disk.py:91 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: ../nova/virt/disk.py:124 +#, python-format +msgid "nbd device %s did not show up" +msgstr "" + +#: ../nova/virt/disk.py:128 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: ../nova/virt/disk.py:151 +msgid "No free nbd devices" +msgstr "" + +#: ../doc/ext/nova_todo.py:46 +#, python-format +msgid "%(filename)s, line %(line_info)d" +msgstr "" + +#. FIXME(chiradeep): implement this +#: ../nova/virt/hyperv.py:118 +msgid "In init host" +msgstr "" + +#: ../nova/virt/hyperv.py:131 +#, python-format +msgid "Attempt to create duplicate vm %s" +msgstr "" + +#: ../nova/virt/hyperv.py:148 +#, python-format +msgid "Starting VM %s " +msgstr "" + +#: ../nova/virt/hyperv.py:150 +#, python-format +msgid "Started VM %s " +msgstr "" + +#: ../nova/virt/hyperv.py:152 +#, python-format +msgid "spawn vm failed: %s" +msgstr "" + +#: ../nova/virt/hyperv.py:169 +#, python-format +msgid "Failed to create VM %s" +msgstr "" + +#: ../nova/virt/hyperv.py:188 +#, python-format +msgid "Set memory for vm %s..." +msgstr "" + +#: ../nova/virt/hyperv.py:198 +#, python-format +msgid "Set vcpus for vm %s..." +msgstr "" + +#: ../nova/virt/hyperv.py:202 +#, python-format +msgid "Creating disk for %(vm_name)s by attaching disk file %(vhdfile)s" +msgstr "" + +#: ../nova/virt/hyperv.py:227 +#, python-format +msgid "Failed to add diskdrive to VM %s" +msgstr "" + +#: ../nova/virt/hyperv.py:230 +#, python-format +msgid "New disk drive path is %s" +msgstr "" + +#: ../nova/virt/hyperv.py:247 +#, python-format +msgid "Failed to add vhd file to VM %s" +msgstr "" + +#: ../nova/virt/hyperv.py:249 +#, python-format +msgid "Created disk for %s" +msgstr "" + +#: ../nova/virt/hyperv.py:253 +#, python-format +msgid "Creating nic for %s " +msgstr "" + +#: ../nova/virt/hyperv.py:272 +msgid "Failed creating a port on the external vswitch" +msgstr "" + +#: ../nova/virt/hyperv.py:273 +#, python-format +msgid "Failed creating port for %s" +msgstr "" + +#: ../nova/virt/hyperv.py:276 +#, python-format +msgid "Created switch port %(vm_name)s on switch %(ext_path)s" +msgstr "" + +#: ../nova/virt/hyperv.py:286 +#, python-format +msgid "Failed to add nic to VM %s" +msgstr "" + +#: ../nova/virt/hyperv.py:288 +#, python-format +msgid "Created nic for %s " +msgstr "" + +#: ../nova/virt/hyperv.py:321 +#, python-format +msgid "WMI job failed: %s" +msgstr "" + +#: ../nova/virt/hyperv.py:325 +#, python-format +msgid "WMI job succeeded: %(desc)s, Elapsed=%(elap)s " +msgstr "" + +#: ../nova/virt/hyperv.py:361 +#, python-format +msgid "Got request to destroy vm %s" +msgstr "" + +#: ../nova/virt/hyperv.py:386 +#, python-format +msgid "Failed to destroy vm %s" +msgstr "" + +#: ../nova/virt/hyperv.py:393 +#, python-format +msgid "Del: disk %(vhdfile)s vm %(instance_name)s" +msgstr "" + +#: ../nova/virt/hyperv.py:415 +#, python-format +msgid "" +"Got Info for vm %(instance_id)s: state=%(state)s, mem=%(memusage)s, num_cpu=" +"%(numprocs)s, cpu_time=%(uptime)s" +msgstr "" + +#: ../nova/virt/hyperv.py:451 +#, python-format +msgid "Successfully changed vm state of %(vm_name)s to %(req_state)s" +msgstr "" + +#: ../nova/virt/hyperv.py:454 +#, python-format +msgid "Failed to change vm state of %(vm_name)s to %(req_state)s" +msgstr "" + +#: ../nova/compute/api.py:71 +#, python-format +msgid "Instance %d was not found in get_network_topic" +msgstr "" + +#: ../nova/compute/api.py:77 +#, python-format +msgid "Instance %d has no host" +msgstr "" + +#: ../nova/compute/api.py:96 +#, python-format +msgid "Quota exceeeded for %(pid)s, tried to run %(min_count)s instances" +msgstr "" + +#: ../nova/compute/api.py:98 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." +msgstr "" + +#: ../nova/compute/api.py:113 +msgid "Creating a raw instance" +msgstr "" + +#: ../nova/compute/api.py:162 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: ../nova/compute/api.py:189 +#, python-format +msgid "Casting to scheduler for %(pid)s/%(uid)s's instance %(instance_id)s" +msgstr "" + +#: ../nova/compute/api.py:293 +#, python-format +msgid "Going to try to terminate %s" +msgstr "" + +#: ../nova/compute/api.py:297 +#, python-format +msgid "Instance %d was not found during terminate" +msgstr "" + +#: ../nova/compute/api.py:302 +#, python-format +msgid "Instance %d is already being terminated" +msgstr "" + +#: ../nova/compute/api.py:471 +#, python-format +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "" + +#: ../nova/compute/api.py:486 +msgid "Volume isn't attached to anything!" +msgstr "" + +#: ../nova/rpc.py:95 +#, python-format +msgid "" +"AMQP server on %(fl_host)s:%(fl_port)d is unreachable. Trying again in " +"%(fl_intv)d seconds." +msgstr "" + +#: ../nova/rpc.py:100 +#, python-format +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "" + +#: ../nova/rpc.py:119 +msgid "Reconnected to queue" +msgstr "" + +#: ../nova/rpc.py:126 +msgid "Failed to fetch message from queue" +msgstr "" + +#: ../nova/rpc.py:156 +#, python-format +msgid "Initing the Adapter Consumer for %s" +msgstr "" + +#: ../nova/rpc.py:171 +#, python-format +msgid "received %s" +msgstr "" + +#. NOTE(vish): we may not want to ack here, but that means that bad +#. messages stay in the queue indefinitely, so for now +#. we just log the message and send an error string +#. back to the caller +#: ../nova/rpc.py:184 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: ../nova/rpc.py:185 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: ../nova/rpc.py:246 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: ../nova/rpc.py:287 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: ../nova/rpc.py:306 +msgid "Making asynchronous call..." +msgstr "" + +#: ../nova/rpc.py:309 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: ../nova/rpc.py:347 +msgid "Making asynchronous cast..." +msgstr "" + +#: ../nova/rpc.py:357 +#, python-format +msgid "response %s" +msgstr "" + +#: ../nova/rpc.py:366 +#, python-format +msgid "topic is %s" +msgstr "" + +#: ../nova/rpc.py:367 +#, python-format +msgid "message %s" +msgstr "" + +#: ../nova/volume/driver.py:78 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: ../nova/volume/driver.py:87 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: ../nova/volume/driver.py:220 +#, python-format +msgid "FAKE AOE: %s" +msgstr "" + +#: ../nova/volume/driver.py:233 +msgid "Skipping ensure_export. No iscsi_target " +msgstr "" + +#: ../nova/volume/driver.py:279 ../nova/volume/driver.py:288 +msgid "Skipping remove_export. No iscsi_target " +msgstr "" + +#: ../nova/volume/driver.py:347 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: ../nova/volume/driver.py:359 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: ../nova/volume/driver.py:414 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: ../nova/volume/driver.py:416 +msgid "Sheepdog is not working" +msgstr "" + +#: ../nova/wsgi.py:68 +#, python-format +msgid "Starting %(arg0)s on %(host)s:%(port)s" +msgstr "" + +#: ../nova/wsgi.py:147 +msgid "You must implement __call__" +msgstr "" + +#: ../bin/nova-instancemonitor.py:55 +msgid "Starting instance monitor" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:58 +msgid "leasing ip" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:73 +msgid "Adopted old lease or got a change of mac/hostname" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:80 +msgid "releasing ip" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:123 +#, python-format +msgid "" +"Called %(action)s for mac %(mac)s with ip %(ip)s and hostname %(hostname)s " +"on interface %(interface)s" +msgstr "" + +#: ../nova/network/manager.py:139 +msgid "setting network host" +msgstr "" + +#: ../nova/network/manager.py:194 +#, python-format +msgid "Leasing IP %s" +msgstr "" + +#: ../nova/network/manager.py:198 +#, python-format +msgid "IP %s leased that isn't associated" +msgstr "" + +#: ../nova/network/manager.py:202 +#, python-format +msgid "IP %(address)s leased to bad mac %(inst_addr)s vs %(mac)s" +msgstr "" + +#: ../nova/network/manager.py:210 +#, python-format +msgid "IP %s leased that was already deallocated" +msgstr "" + +#: ../nova/network/manager.py:215 +#, python-format +msgid "Releasing IP %s" +msgstr "" + +#: ../nova/network/manager.py:219 +#, python-format +msgid "IP %s released that isn't associated" +msgstr "" + +#: ../nova/network/manager.py:223 +#, python-format +msgid "IP %(address)s released from bad mac %(inst_addr)s vs %(mac)s" +msgstr "" + +#: ../nova/network/manager.py:226 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: ../nova/network/manager.py:461 +#, python-format +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:57 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:74 +#, python-format +msgid "Introduced %(label)s as %(sr_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:90 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:96 +#, python-format +msgid "Forgetting SR %s ... " +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:101 +#, python-format +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:107 +#, python-format +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:111 +#, python-format +msgid "Forgetting SR %s done." +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:113 +#, python-format +msgid "Ignoring exception %(exc)s when forgetting SR %(sr_ref)s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:123 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:128 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:146 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:175 +#, python-format +msgid "Unable to obtain target information %(device_path)s, %(mountpoint)s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:197 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: ../nova/objectstore/image.py:262 +#, python-format +msgid "Failed to decrypt private key: %s" +msgstr "" + +#: ../nova/objectstore/image.py:269 +#, python-format +msgid "Failed to decrypt initialization vector: %s" +msgstr "" + +#: ../nova/objectstore/image.py:277 +#, python-format +msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgstr "" + +#: ../nova/objectstore/handler.py:106 +#, python-format +msgid "Unknown S3 value type %r" +msgstr "" + +#: ../nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "" + +#: ../nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "" + +#: ../nova/objectstore/handler.py:209 +#, python-format +msgid "List keys for bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:217 +#, python-format +msgid "Unauthorized attempt to access bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:235 +#, python-format +msgid "Creating bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:245 +#, python-format +msgid "Deleting bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:249 +#, python-format +msgid "Unauthorized attempt to delete bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:273 +#, python-format +msgid "Getting object: %(bname)s / %(nm)s" +msgstr "" + +#: ../nova/objectstore/handler.py:276 +#, python-format +msgid "Unauthorized attempt to get object %(nm)s from bucket %(bname)s" +msgstr "" + +#: ../nova/objectstore/handler.py:296 +#, python-format +msgid "Putting object: %(bname)s / %(nm)s" +msgstr "" + +#: ../nova/objectstore/handler.py:299 +#, python-format +msgid "Unauthorized attempt to upload object %(nm)s to bucket %(bname)s" +msgstr "" + +#: ../nova/objectstore/handler.py:318 +#, python-format +msgid "Deleting object: %(bname)s / %(nm)s" +msgstr "" + +#: ../nova/objectstore/handler.py:322 +#, python-format +msgid "Unauthorized attempt to delete object %(nm)s from bucket %(bname)s" +msgstr "" + +#: ../nova/objectstore/handler.py:396 +#, python-format +msgid "Not authorized to upload image: invalid directory %s" +msgstr "" + +#: ../nova/objectstore/handler.py:404 +#, python-format +msgid "Not authorized to upload image: unauthorized bucket %s" +msgstr "" + +#: ../nova/objectstore/handler.py:409 +#, python-format +msgid "Starting image upload: %s" +msgstr "" + +#: ../nova/objectstore/handler.py:423 +#, python-format +msgid "Not authorized to update attributes of image %s" +msgstr "" + +#: ../nova/objectstore/handler.py:431 +#, python-format +msgid "Toggling publicity flag of image %(image_id)s %(newstatus)r" +msgstr "" + +#. other attributes imply update +#: ../nova/objectstore/handler.py:436 +#, python-format +msgid "Updating user fields on image %s" +msgstr "" + +#: ../nova/objectstore/handler.py:450 +#, python-format +msgid "Unauthorized attempt to delete image %s" +msgstr "" + +#: ../nova/objectstore/handler.py:455 +#, python-format +msgid "Deleted image: %s" +msgstr "" + +#: ../nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: ../nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: ../nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" +msgstr "" + +#: ../nova/auth/manager.py:270 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: ../nova/auth/manager.py:277 +#, python-format +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgstr "" + +#: ../nova/auth/manager.py:279 +#, python-format +msgid "No project called %s could be found" +msgstr "" + +#: ../nova/auth/manager.py:287 +#, python-format +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" +msgstr "" + +#: ../nova/auth/manager.py:289 +#, python-format +msgid "User %(uid)s is not a member of project %(pjid)s" +msgstr "" + +#: ../nova/auth/manager.py:298 ../nova/auth/manager.py:309 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: ../nova/auth/manager.py:299 ../nova/auth/manager.py:310 +msgid "Signature does not match" +msgstr "" + +#: ../nova/auth/manager.py:380 +msgid "Must specify project" +msgstr "" + +#: ../nova/auth/manager.py:414 +#, python-format +msgid "The %s role can not be found" +msgstr "" + +#: ../nova/auth/manager.py:416 +#, python-format +msgid "The %s role is global only" +msgstr "" + +#: ../nova/auth/manager.py:420 +#, python-format +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +msgstr "" + +#: ../nova/auth/manager.py:423 +#, python-format +msgid "Adding sitewide role %(role)s to user %(uid)s" +msgstr "" + +#: ../nova/auth/manager.py:448 +#, python-format +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgstr "" + +#: ../nova/auth/manager.py:451 +#, python-format +msgid "Removing sitewide role %(role)s from user %(uid)s" +msgstr "" + +#: ../nova/auth/manager.py:515 +#, python-format +msgid "Created project %(name)s with manager %(manager_user)s" +msgstr "" + +#: ../nova/auth/manager.py:533 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: ../nova/auth/manager.py:545 +#, python-format +msgid "Adding user %(uid)s to project %(pid)s" +msgstr "" + +#: ../nova/auth/manager.py:566 +#, python-format +msgid "Remove user %(uid)s from project %(pid)s" +msgstr "" + +#: ../nova/auth/manager.py:592 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: ../nova/auth/manager.py:650 +#, python-format +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgstr "" + +#: ../nova/auth/manager.py:659 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: ../nova/auth/manager.py:669 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: ../nova/auth/manager.py:671 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: ../nova/auth/manager.py:673 +#, python-format +msgid "Admin status set to %(admin)r for user %(uid)s" +msgstr "" + +#: ../nova/auth/manager.py:722 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: ../nova/service.py:161 +#, python-format +msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgstr "" + +#: ../nova/service.py:174 +msgid "Service killed that has no database entry" +msgstr "" + +#: ../nova/service.py:195 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: ../nova/service.py:207 +msgid "Recovered model server connection!" +msgstr "" + +#: ../nova/service.py:213 +msgid "model server went away" +msgstr "" + +#: ../nova/auth/ldapdriver.py:149 +#, python-format +msgid "LDAP user %s already exists" +msgstr "" + +#: ../nova/auth/ldapdriver.py:180 +#, python-format +msgid "LDAP object for %s doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:313 +#, python-format +msgid "User %s doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:435 +#, python-format +msgid "Group can't be created because group %s already exists" +msgstr "" + +#: ../nova/auth/ldapdriver.py:441 +#, python-format +msgid "Group can't be created because user %s doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:458 +#, python-format +msgid "User %s can't be searched in group because the user doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:470 +#, python-format +msgid "User %s can't be added to the group because the user doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:473 ../nova/auth/ldapdriver.py:484 +#, python-format +msgid "The group at dn %s doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:476 +#, python-format +msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgstr "" + +#: ../nova/auth/ldapdriver.py:487 +#, python-format +msgid "User %s can't be removed from the group because the user doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:491 +#, python-format +msgid "User %s is not a member of the group" +msgstr "" + +#: ../nova/auth/ldapdriver.py:505 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: ../nova/auth/ldapdriver.py:512 +#, python-format +msgid "User %s can't be removed from all because the user doesn't exist" +msgstr "" + +#: ../nova/auth/ldapdriver.py:527 +#, python-format +msgid "Group at dn %s doesn't exist" +msgstr "" + +#: ../nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: ../nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:97 +#, python-format +msgid "Creating new user: %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:105 +#, python-format +msgid "Deleting user: %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:127 +#, python-format +msgid "Adding role %(role)s to user %(user)s for project %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:131 +#, python-format +msgid "Adding sitewide role %(role)s to user %(user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:137 +#, python-format +msgid "Removing role %(role)s from user %(user)s for project %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:141 +#, python-format +msgid "Removing sitewide role %(role)s from user %(user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:146 ../nova/api/ec2/admin.py:223 +msgid "operation must be add or remove" +msgstr "" + +#: ../nova/api/ec2/admin.py:159 +#, python-format +msgid "Getting x509 for user: %(name)s on project: %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:177 +#, python-format +msgid "Create project %(name)s managed by %(manager_user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:190 +#, python-format +msgid "Modify project: %(name)s managed by %(manager_user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:200 +#, python-format +msgid "Delete project: %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:214 +#, python-format +msgid "Adding user %(user)s to project %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:218 +#, python-format +msgid "Removing user %(user)s from project %(project)s" +msgstr "" diff --git a/setup.py b/setup.py index e3c45ce3e..c9393c508 100644 --- a/setup.py +++ b/setup.py @@ -19,9 +19,17 @@ import os import subprocess -from setuptools import setup, find_packages +from setuptools import find_packages from setuptools.command.sdist import sdist +try: + import DistUtilsExtra.auto +except ImportError: + print >> sys.stderr, 'To build nova you need https://launchpad.net/python-distutils-extra' + sys.exit(1) +assert DistUtilsExtra.auto.__version__ >= '2.18', 'needs DistUtilsExtra.auto >= 2.18' + + from nova.utils import parse_mailmap, str_dict_replace from nova import version @@ -75,7 +83,7 @@ try: except: pass -setup(name='nova', +DistUtilsExtra.auto.setup(name='nova', version=version.canonical_version_string(), description='cloud computing fabric controller', author='OpenStack', -- cgit From 9c0862b5f84cdb09b7ab0aafca669d30f261a666 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Mon, 14 Feb 2011 10:21:16 -0600 Subject: support for multiple IPs per network --- nova/virt/xenapi/vmops.py | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 575e53f80..db05a24ff 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -96,22 +96,36 @@ class VMOps(object): # write network info admin_context = context.get_admin_context() - #network = db.network_get_by_instance(admin_context, - # instance['id']) + # TODO(tr3buchet) - remove comment in multi-nic + # I've decided to go ahead and consider multiple IPs and networks + # at this stage even though they aren't implemented because these will + # be needed for multi-nic and there was no sense writing it for single + # network/single IP and then having to turn around and re-write it + IPs = db.fixed_ip_get_all_by_instance(admin_context, instance['id']) for network in db.network_get_all_by_instance(admin_context, instance['id']): + network_IPs = [ip for ip in IPs if ip.network_id == network.id] + + def ip_dict(ip): + return {'netmask': network['netmask'], + 'enabled': '1', + 'ip': ip.address} + mac_id = instance.mac_address.replace(':', '') location = 'vm-data/networking/%s' % mac_id mapping = {'label': network['label'], 'gateway': network['gateway'], 'mac': instance.mac_address, 'dns': [network['dns']], - 'ips': [{'netmask': network['netmask'], - 'enabled': '1', - 'ip': '192.168.3.3'}]} # <===== CHANGE!!!! + 'ips': [ip_dict(ip) for ip in network_IPs]} self.write_to_param_xenstore(vm_ref, {location: mapping}) + # TODO(tr3buchet) - remove comment in multi-nic + # this bit here about creating the vifs will be updated + # in multi-nic to handle multiple IPs on the same network + # and multiple networks + # for now it works as there is only one of each bridge = network['bridge'] network_ref = \ NetworkHelper.find_network_with_bridge(self._session, bridge) -- cgit From ee26d0827b7ad3e4d7869614835fe58abe32dfc8 Mon Sep 17 00:00:00 2001 From: Ricardo Carrillo Cruz Date: Mon, 14 Feb 2011 17:43:39 +0100 Subject: Got rid of BadParameter, just using standard python ValueError --- nova/network/manager.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/nova/network/manager.py b/nova/network/manager.py index 8eb9f041b..d911844a1 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -505,6 +505,12 @@ class VlanManager(NetworkManager): def create_networks(self, context, cidr, num_networks, network_size, cidr_v6, vlan_start, vpn_start): """Create networks based on parameters.""" + # Check that num_networks + vlan_start is not > 4094, fixes lp708025 + if num_networks + vlan_start > 4094: + raise ValueError(_('The sum between the number of networks and' + ' the vlan start cannot be greater' + ' than 4094')) + fixed_net = IPy.IP(cidr) fixed_net_v6 = IPy.IP(cidr_v6) network_size_v6 = 1 << 64 -- cgit From 3f96e6dbf12533355aa6722eeb498814df076aea Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Mon, 14 Feb 2011 12:32:33 -0600 Subject: added call to reset_network from openstack api down to vmops --- nova/api/openstack/servers.py | 14 ++++++++++++++ nova/compute/api.py | 9 ++++++++- nova/compute/manager.py | 12 ++++++++++++ nova/virt/xenapi_conn.py | 4 ++++ 4 files changed, 38 insertions(+), 1 deletion(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 8cbcebed2..c604bd215 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -242,6 +242,20 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() + def reset_network(self, req, id): + """ + admin only operation which resets networking on an instance + + """ + context = req.environ['nova.context'] + try: + self.compute_api.reset_network(context, id) + except: + readable = traceback.format_exc() + LOG.exception(_("Compute.api::reset_network %s"), readable) + return faults.Fault(exc.HTTPUnprocessableEntity()) + return exc.HTTPAccepted() + def pause(self, req, id): """ Permit Admins to Pause the server. """ ctxt = req.environ['nova.context'] diff --git a/nova/compute/api.py b/nova/compute/api.py index 6a3fe08b6..43332ed27 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -1,4 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# vim: tabstop=5 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. @@ -463,6 +463,13 @@ class API(base.Base): instance = self.get(context, instance_id) return instance['locked'] + def reset_network(self, context, instance_id): + """ + resets networking on the instance + + """ + self._cast_compute_message('reset_network', context, instance_id) + def attach_volume(self, context, instance_id, volume_id, device): if not re.match("^/dev/[a-z]d[a-z]+$", device): raise exception.ApiError(_("Invalid device specified: %s. " diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 6f09ce674..b03f58693 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -494,6 +494,18 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_get(context, instance_id) return instance_ref['locked'] + @checks_instance_lock + def reset_network(self, context, instance_id): + """ + resets the networking on the instance + + """ + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + LOG.debug(_('instance %s: reset network'), instance_id, + context=context) + self.driver.reset_network(instance_ref) + @exception.wrap_exception def get_console_output(self, context, instance_id): """Send the console output for an instance.""" diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 927f5905b..4e5442aa6 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -188,6 +188,10 @@ class XenAPIConnection(object): """resume the specified instance""" self._vmops.resume(instance, callback) + def reset_network(self, instance): + """reset networking for specified instance""" + self._vmops.reset_network(instance) + def get_info(self, instance_id): """Return data about VM instance""" return self._vmops.get_info(instance_id) -- cgit From ee4cba7779daa5b2e7415fb69cabc698b7dd60da Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Mon, 14 Feb 2011 12:59:46 -0600 Subject: corrected model for table lookup --- nova/db/sqlalchemy/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index f20f4e266..827f81ae2 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -609,7 +609,7 @@ def fixed_ip_get_instance(context, address): @require_context def fixed_ip_get_all_by_instance(context, instance_id): session = get_session() - rv = session.query(models.Network.fixed_ips).\ + rv = session.query(models.FixedIp).\ filter_by(instance_id=instance_id).\ filter_by(deleted=False) if not rv: -- cgit From 6147a606cbe6b7e764865d2471d86f503437051b Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Mon, 14 Feb 2011 14:52:58 -0800 Subject: fixed template and added migration --- nova/auth/novarc.template | 6 +-- .../sqlalchemy/migrate_repo/versions/003_cactus.py | 62 ++++++++++++++++++++++ 2 files changed, 65 insertions(+), 3 deletions(-) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template index 702df3bb0..cda2ecc28 100644 --- a/nova/auth/novarc.template +++ b/nova/auth/novarc.template @@ -10,6 +10,6 @@ export NOVA_CERT=${NOVA_KEY_DIR}/%(nova)s export EUCALYPTUS_CERT=${NOVA_CERT} # euca-bundle-image seems to require this set alias ec2-bundle-image="ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_PRIVATE_KEY} --user 42 --ec2cert ${NOVA_CERT}" alias ec2-upload-bundle="ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}" -export NOVA_TOOLS_API_KEY="%(access)s" -export NOVA_TOOLS_USERNAME="%(user)s" -export NOVA_TOOLS_URL="%(os)s" +export NOVA_API_KEY="%(access)s" +export NOVA_USERNAME="%(user)s" +export NOVA_URL="%(os)s" diff --git a/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py b/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py new file mode 100644 index 000000000..eb3287077 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py @@ -0,0 +1,62 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +# +# New Tables +# +child_zones = Table('child_zones', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('api_url', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('username', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('password', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +# +# Tables to alter +# + +# (none currently) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + for table in (child_zones, ): + try: + table.create() + except Exception: + logging.info(repr(table)) -- cgit From 1da5dcc0644a13cfb99852f3438649f710feb2bc Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Mon, 14 Feb 2011 14:54:04 -0800 Subject: removed debugging --- nova/api/openstack/auth.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index a383ef086..473071738 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -79,7 +79,6 @@ class AuthMiddleware(wsgi.Middleware): except KeyError: return faults.Fault(webob.exc.HTTPUnauthorized()) - logging.debug("**** USERNAME %s, PASSWORD %s" % (username, key)) token, user = self._authorize_user(username, key, req) if user and token: res = webob.Response() -- cgit From e65291cf34894322bd0f3f6661907e48e7a6a0b5 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Mon, 14 Feb 2011 20:11:29 -0400 Subject: fixed nova-combined debug hack and renamed ChildZone to Zone --- bin/nova-combined | 4 ++-- nova/db/api.py | 10 +++++----- nova/db/sqlalchemy/api.py | 10 +++++----- nova/db/sqlalchemy/migration.py | 2 +- nova/db/sqlalchemy/models.py | 6 +++--- 5 files changed, 16 insertions(+), 16 deletions(-) diff --git a/bin/nova-combined b/bin/nova-combined index a0f552d64..913c866bf 100755 --- a/bin/nova-combined +++ b/bin/nova-combined @@ -53,11 +53,11 @@ if __name__ == '__main__': compute = service.Service.create(binary='nova-compute') network = service.Service.create(binary='nova-network') - #volume = service.Service.create(binary='nova-volume') + volume = service.Service.create(binary='nova-volume') scheduler = service.Service.create(binary='nova-scheduler') #objectstore = service.Service.create(binary='nova-objectstore') - service.serve(compute, network, scheduler) + service.serve(compute, network, volume, scheduler) apps = [] paste_config_file = wsgi.paste_config_file('nova-api.conf') diff --git a/nova/db/api.py b/nova/db/api.py index fa73d86ad..939f1a069 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -986,25 +986,25 @@ def console_get(context, console_id, instance_id=None): def zone_create(context, values): - """Create a new ChildZone entry in this Zone.""" + """Create a new child Zone entry.""" return IMPL.zone_create(context, values) def zone_update(context, zone_id, values): - """Update a ChildZone entry in this Zone.""" + """Update a child Zone entry.""" return IMPL.zone_update(context, values) def zone_delete(context, zone_id): - """Delete a ChildZone.""" + """Delete a child Zone.""" return IMPL.zone_delete(context, zone_id) def zone_get(context, zone_id): - """Get a specific ChildZone.""" + """Get a specific child Zone.""" return IMPL.zone_get(context, zone_id) def zone_get_all(context): - """Get all ChildZone's.""" + """Get all child Zones.""" return IMPL.zone_get_all(context) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index b3320c819..abd65b67b 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -2021,7 +2021,7 @@ def console_get(context, console_id, instance_id=None): @require_admin_context def zone_create(context, values): - zone = models.ChildZone() + zone = models.Zone() zone.update(values) zone.save() return zone @@ -2029,7 +2029,7 @@ def zone_create(context, values): @require_admin_context def zone_update(context, zone_id, values): - zone = session.query(models.ChildZone).filter_by(id=zone_id).first() + zone = session.query(models.Zone).filter_by(id=zone_id).first() if not zone: raise exception.NotFound(_("No zone with id %(zone_id)s") % locals()) zone.update(values) @@ -2041,14 +2041,14 @@ def zone_update(context, zone_id, values): def zone_delete(context, zone_id): session = get_session() with session.begin(): - session.execute('delete from child_zones ' + session.execute('delete from zones ' 'where id=:id', {'id': zone_id}) @require_admin_context def zone_get(context, zone_id): session = get_session() - result = session.query(models.ChildZone).filter_by(id=zone_id).first() + result = session.query(models.Zone).filter_by(id=zone_id).first() if not result: raise exception.NotFound(_("No zone with id %(zone_id)s") % locals()) return result @@ -2057,4 +2057,4 @@ def zone_get(context, zone_id): @require_admin_context def zone_get_all(context): session = get_session() - return session.query(models.ChildZone).all() + return session.query(models.Zone).all() diff --git a/nova/db/sqlalchemy/migration.py b/nova/db/sqlalchemy/migration.py index 194ecc627..1d9c041f5 100644 --- a/nova/db/sqlalchemy/migration.py +++ b/nova/db/sqlalchemy/migration.py @@ -45,7 +45,7 @@ def db_version(): engine = sqlalchemy.create_engine(FLAGS.sql_connection, echo=False) meta.reflect(bind=engine) try: - for table in ('auth_tokens', 'child_zones', 'export_devices', + for table in ('auth_tokens', 'zones', 'export_devices', 'fixed_ips', 'floating_ips', 'instances', 'key_pairs', 'networks', 'projects', 'quotas', 'security_group_instance_association', diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 071d57fca..4930c7a2a 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -535,9 +535,9 @@ class Console(BASE, NovaBase): pool = relationship(ConsolePool, backref=backref('consoles')) -class ChildZone(BASE, NovaBase): +class Zone(BASE, NovaBase): """Represents a child zone of this zone.""" - __tablename__ = 'child_zones' + __tablename__ = 'zones' id = Column(Integer, primary_key=True) api_url = Column(String(255)) username = Column(String(255)) @@ -556,7 +556,7 @@ def register_models(): Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp, Network, SecurityGroup, SecurityGroupIngressRule, SecurityGroupInstanceAssociation, AuthToken, User, - Project, Certificate, ConsolePool, Console, ChildZone) + Project, Certificate, ConsolePool, Console, Zone) engine = create_engine(FLAGS.sql_connection, echo=False) for model in models: model.metadata.create_all(engine) -- cgit From c15289a63c90218a573d5e75833985ec2ad8691e Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Mon, 14 Feb 2011 23:02:26 -0400 Subject: better filtering --- nova/api/openstack/zones.py | 13 ++++++++----- nova/tests/api/openstack/test_zones.py | 10 +++------- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py index 2c93c0c7b..830464ffd 100644 --- a/nova/api/openstack/zones.py +++ b/nova/api/openstack/zones.py @@ -33,6 +33,10 @@ def _filter_keys(item, keys): return dict((k, v) for k, v in item.iteritems() if k in keys) +def _scrub_zone(zone): + return _filter_keys(zone, ('id', 'api_url')) + + class Controller(wsgi.Controller): _serialization_metadata = { @@ -44,7 +48,7 @@ class Controller(wsgi.Controller): """Return all zones in brief""" items = db.zone_get_all(req.environ['nova.context']) items = common.limited(items, req) - items = [_filter_keys(item, ('id', 'api_url')) for item in items] + items = [_scrub_zone(item) for item in items] return dict(zones=items) def detail(self, req): @@ -55,8 +59,7 @@ class Controller(wsgi.Controller): """Return data about the given zone id""" zone_id = int(id) zone = db.zone_get(req.environ['nova.context'], zone_id) - zone = _filter_keys(zone, ('id', 'api_url')) - return dict(zone=zone) + return dict(zone=_scrub_zone(zone)) def delete(self, req, id): zone_id = int(id) @@ -67,11 +70,11 @@ class Controller(wsgi.Controller): context = req.environ['nova.context'] env = self._deserialize(req.body, req) zone = db.zone_create(context, env["zone"]) - return dict(zone=zone) + return dict(zone=_scrub_zone(zone)) def update(self, req, id): context = req.environ['nova.context'] env = self._deserialize(req.body, req) zone_id = int(id) zone = db.zone_update(context, zone_id, env["zone"]) - return dict(zone=zone) + return dict(zone=_scrub_zone(zone)) diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py index 8dbdffa41..5542a1cf3 100644 --- a/nova/tests/api/openstack/test_zones.py +++ b/nova/tests/api/openstack/test_zones.py @@ -98,9 +98,7 @@ class ZonesTest(unittest.TestCase): self.assertEqual(res_dict['zone']['id'], 1) self.assertEqual(res_dict['zone']['api_url'], 'http://foo.com') - self.assertEqual(res_dict['zone']['username'], 'bob') - self.assertEqual(res_dict['zone']['password'], 'xxx') - + self.assertFalse('password' in res_dict['zone']) self.assertEqual(res.status_int, 200) def test_zone_delete(self): @@ -122,8 +120,7 @@ class ZonesTest(unittest.TestCase): self.assertEqual(res.status_int, 200) self.assertEqual(res_dict['zone']['id'], 1) self.assertEqual(res_dict['zone']['api_url'], 'http://blah.zoo') - self.assertEqual(res_dict['zone']['username'], 'fred') - self.assertEqual(res_dict['zone']['password'], 'fubar') + self.assertFalse('username' in res_dict['zone']) def test_zone_update(self): body = dict(zone=dict(username='zeb', password='sneaky')) @@ -137,8 +134,7 @@ class ZonesTest(unittest.TestCase): self.assertEqual(res.status_int, 200) self.assertEqual(res_dict['zone']['id'], 1) self.assertEqual(res_dict['zone']['api_url'], 'http://foo.com') - self.assertEqual(res_dict['zone']['username'], 'zeb') - self.assertEqual(res_dict['zone']['password'], 'sneaky') + self.assertFalse('username' in res_dict['zone']) if __name__ == '__main__': -- cgit From e90e0355b6edcd381ea4cf1977fdcf1481fdf703 Mon Sep 17 00:00:00 2001 From: Launchpad Translations on behalf of nova-core <> Date: Tue, 15 Feb 2011 05:12:01 +0000 Subject: Launchpad automatic translations update. --- locale/zh_CN.po | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/locale/zh_CN.po b/locale/zh_CN.po index 01b8dc378..a39383497 100644 --- a/locale/zh_CN.po +++ b/locale/zh_CN.po @@ -14,7 +14,7 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-02-14 05:22+0000\n" +"X-Launchpad-Export-Date: 2011-02-15 05:12+0000\n" "X-Generator: Launchpad (build 12351)\n" #: nova/twistd.py:268 -- cgit From bb2ab1211cbe1528e510fa45b189ba4bd04ac2f9 Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Tue, 15 Feb 2011 11:06:28 -0500 Subject: Update .pot file with source file and line numbers after running python setup.py build --- po/nova.pot | 264 ++++++++++++++++++++++++++++++++---------------------------- 1 file changed, 141 insertions(+), 123 deletions(-) diff --git a/po/nova.pot b/po/nova.pot index 576621ce9..f747ae0f7 100644 --- a/po/nova.pot +++ b/po/nova.pot @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: PACKAGE VERSION\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2011-02-09 09:26-0800\n" +"POT-Creation-Date: 2011-02-15 11:05-0500\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -17,9 +17,9 @@ msgstr "" "Content-Type: text/plain; charset=CHARSET\n" "Content-Transfer-Encoding: 8bit\n" -#: ../nova/scheduler/chance.py:37 ../nova/scheduler/simple.py:75 -#: ../nova/scheduler/simple.py:110 ../nova/scheduler/simple.py:122 -#: ../nova/scheduler/zone.py:55 +#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 +#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 +#: ../nova/scheduler/simple.py:122 msgid "No hosts found" msgstr "" @@ -68,11 +68,6 @@ msgstr "" msgid "Volume is already detached" msgstr "" -#: ../nova/virt/fake.py:224 -#, python-format -msgid "Instance %s Not Found" -msgstr "" - #: ../nova/api/openstack/servers.py:138 #, python-format msgid "%(param)s property not found for image %(_image_id)s" @@ -142,7 +137,7 @@ msgstr "" #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:709 ../nova/virt/libvirt_conn.py:741 -#: ../nova/api/ec2/__init__.py:322 +#: ../nova/api/ec2/__init__.py:321 #, python-format msgid "Instance %s not found" msgstr "" @@ -383,7 +378,7 @@ msgstr "" msgid "instance %s: getting locked state" msgstr "" -#: ../nova/compute/manager.py:506 ../nova/api/ec2/cloud.py:513 +#: ../nova/compute/manager.py:506 ../nova/api/ec2/cloud.py:515 #, python-format msgid "Get console output for instance %s" msgstr "" @@ -540,40 +535,40 @@ msgstr "" msgid "Failed to open connection to the hypervisor" msgstr "" -#: ../nova/network/linux_net.py:181 +#: ../nova/network/linux_net.py:187 #, python-format msgid "Starting VLAN inteface %s" msgstr "" -#: ../nova/network/linux_net.py:202 +#: ../nova/network/linux_net.py:208 #, python-format msgid "Starting Bridge interface for %s" msgstr "" #. pylint: disable-msg=W0703 -#: ../nova/network/linux_net.py:308 +#: ../nova/network/linux_net.py:314 #, python-format msgid "Hupping dnsmasq threw %s" msgstr "" -#: ../nova/network/linux_net.py:310 +#: ../nova/network/linux_net.py:316 #, python-format msgid "Pid %d is stale, relaunching dnsmasq" msgstr "" #. pylint: disable-msg=W0703 -#: ../nova/network/linux_net.py:352 +#: ../nova/network/linux_net.py:358 #, python-format msgid "killing radvd threw %s" msgstr "" -#: ../nova/network/linux_net.py:354 +#: ../nova/network/linux_net.py:360 #, python-format msgid "Pid %d is stale, relaunching radvd" msgstr "" #. pylint: disable-msg=W0703 -#: ../nova/network/linux_net.py:443 +#: ../nova/network/linux_net.py:449 #, python-format msgid "Killing dnsmasq threw %s" msgstr "" @@ -598,37 +593,42 @@ msgstr "" msgid "Running cmd (subprocess): %s" msgstr "" -#: ../nova/utils.py:141 +#: ../nova/utils.py:141 ../nova/utils.py:181 #, python-format msgid "Result was %s" msgstr "" -#: ../nova/utils.py:179 +#: ../nova/utils.py:157 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: ../nova/utils.py:215 #, python-format msgid "debug in callback: %s" msgstr "" -#: ../nova/utils.py:184 +#: ../nova/utils.py:220 #, python-format msgid "Running %s" msgstr "" -#: ../nova/utils.py:215 +#: ../nova/utils.py:251 #, python-format msgid "Link Local address is not found.:%s" msgstr "" -#: ../nova/utils.py:218 +#: ../nova/utils.py:254 #, python-format msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" msgstr "" -#: ../nova/utils.py:316 +#: ../nova/utils.py:352 #, python-format msgid "Invalid backend: %s" msgstr "" -#: ../nova/utils.py:327 +#: ../nova/utils.py:363 #, python-format msgid "backend %s" msgstr "" @@ -793,129 +793,129 @@ msgstr "" msgid "VDI %s is still available" msgstr "" -#: ../nova/virt/xenapi/vm_utils.py:445 +#: ../nova/virt/xenapi/vm_utils.py:453 #, python-format msgid "(VM_UTILS) xenserver vm state -> |%s|" msgstr "" -#: ../nova/virt/xenapi/vm_utils.py:447 +#: ../nova/virt/xenapi/vm_utils.py:455 #, python-format msgid "(VM_UTILS) xenapi power_state -> |%s|" msgstr "" -#: ../nova/virt/xenapi/vm_utils.py:507 +#: ../nova/virt/xenapi/vm_utils.py:515 #, python-format msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" msgstr "" -#: ../nova/virt/xenapi/vm_utils.py:524 +#: ../nova/virt/xenapi/vm_utils.py:532 #, python-format msgid "Re-scanning SR %s" msgstr "" -#: ../nova/virt/xenapi/vm_utils.py:549 +#: ../nova/virt/xenapi/vm_utils.py:557 #, python-format msgid "" "VHD coalesce attempts exceeded (%(counter)d > %(max_attempts)d), giving up..." msgstr "" -#: ../nova/virt/xenapi/vm_utils.py:556 +#: ../nova/virt/xenapi/vm_utils.py:564 #, python-format msgid "" "Parent %(parent_uuid)s doesn't match original parent " "%(original_parent_uuid)s, waiting for coalesce..." msgstr "" -#: ../nova/virt/xenapi/vm_utils.py:572 +#: ../nova/virt/xenapi/vm_utils.py:580 #, python-format msgid "No VDIs found for VM %s" msgstr "" -#: ../nova/virt/xenapi/vm_utils.py:576 +#: ../nova/virt/xenapi/vm_utils.py:584 #, python-format msgid "Unexpected number of VDIs (%(num_vdis)s) found for VM %(vm_ref)s" msgstr "" -#: ../nova/virt/xenapi/vm_utils.py:635 +#: ../nova/virt/xenapi/vm_utils.py:643 #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:188 #, python-format msgid "Creating VBD for VDI %s ... " msgstr "" -#: ../nova/virt/xenapi/vm_utils.py:637 +#: ../nova/virt/xenapi/vm_utils.py:645 #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:190 #, python-format msgid "Creating VBD for VDI %s done." msgstr "" -#: ../nova/virt/xenapi/vm_utils.py:639 +#: ../nova/virt/xenapi/vm_utils.py:647 #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:192 #, python-format msgid "Plugging VBD %s ... " msgstr "" -#: ../nova/virt/xenapi/vm_utils.py:641 +#: ../nova/virt/xenapi/vm_utils.py:649 #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:194 #, python-format msgid "Plugging VBD %s done." msgstr "" -#: ../nova/virt/xenapi/vm_utils.py:643 +#: ../nova/virt/xenapi/vm_utils.py:651 #, python-format msgid "VBD %(vbd)s plugged as %(orig_dev)s" msgstr "" -#: ../nova/virt/xenapi/vm_utils.py:646 +#: ../nova/virt/xenapi/vm_utils.py:654 #, python-format msgid "VBD %(vbd)s plugged into wrong dev, remapping to %(dev)s" msgstr "" -#: ../nova/virt/xenapi/vm_utils.py:650 +#: ../nova/virt/xenapi/vm_utils.py:658 #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:197 #, python-format msgid "Destroying VBD for VDI %s ... " msgstr "" -#: ../nova/virt/xenapi/vm_utils.py:653 +#: ../nova/virt/xenapi/vm_utils.py:661 #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:200 #, python-format msgid "Destroying VBD for VDI %s done." msgstr "" -#: ../nova/virt/xenapi/vm_utils.py:665 +#: ../nova/virt/xenapi/vm_utils.py:673 #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:211 msgid "VBD.unplug successful first time." msgstr "" -#: ../nova/virt/xenapi/vm_utils.py:670 +#: ../nova/virt/xenapi/vm_utils.py:678 #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:216 msgid "VBD.unplug rejected: retrying..." msgstr "" -#: ../nova/virt/xenapi/vm_utils.py:674 +#: ../nova/virt/xenapi/vm_utils.py:682 #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:220 msgid "VBD.unplug successful eventually." msgstr "" -#: ../nova/virt/xenapi/vm_utils.py:677 +#: ../nova/virt/xenapi/vm_utils.py:685 #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:223 #, python-format msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" msgstr "" -#: ../nova/virt/xenapi/vm_utils.py:686 +#: ../nova/virt/xenapi/vm_utils.py:694 #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:66 #, python-format msgid "Ignoring XenAPI.Failure %s" msgstr "" -#: ../nova/virt/xenapi/vm_utils.py:717 +#: ../nova/virt/xenapi/vm_utils.py:725 #, python-format msgid "" "Writing partition table %(primary_first)d %(primary_last)d to %(dest)s..." msgstr "" -#: ../nova/virt/xenapi/vm_utils.py:729 +#: ../nova/virt/xenapi/vm_utils.py:737 #, python-format msgid "Writing partition table %s done." msgstr "" @@ -954,105 +954,105 @@ msgstr "" msgid "No floating ip for address %s" msgstr "" -#: ../nova/db/sqlalchemy/api.py:933 +#: ../nova/db/sqlalchemy/api.py:939 #, python-format msgid "no keypair for user %(user_id)s, name %(name)s" msgstr "" -#: ../nova/db/sqlalchemy/api.py:1048 ../nova/db/sqlalchemy/api.py:1106 +#: ../nova/db/sqlalchemy/api.py:1054 ../nova/db/sqlalchemy/api.py:1112 #, python-format msgid "No network for id %s" msgstr "" -#: ../nova/db/sqlalchemy/api.py:1078 +#: ../nova/db/sqlalchemy/api.py:1084 #, python-format msgid "No network for bridge %s" msgstr "" -#: ../nova/db/sqlalchemy/api.py:1092 +#: ../nova/db/sqlalchemy/api.py:1098 #, python-format msgid "No network for instance %s" msgstr "" -#: ../nova/db/sqlalchemy/api.py:1227 +#: ../nova/db/sqlalchemy/api.py:1233 #, python-format msgid "Token %s does not exist" msgstr "" -#: ../nova/db/sqlalchemy/api.py:1252 +#: ../nova/db/sqlalchemy/api.py:1258 #, python-format msgid "No quota for project_id %s" msgstr "" -#: ../nova/db/sqlalchemy/api.py:1405 ../nova/db/sqlalchemy/api.py:1451 -#: ../nova/api/ec2/__init__.py:328 +#: ../nova/db/sqlalchemy/api.py:1411 ../nova/db/sqlalchemy/api.py:1457 +#: ../nova/api/ec2/__init__.py:327 #, python-format msgid "Volume %s not found" msgstr "" -#: ../nova/db/sqlalchemy/api.py:1464 +#: ../nova/db/sqlalchemy/api.py:1470 #, python-format msgid "No export device found for volume %s" msgstr "" -#: ../nova/db/sqlalchemy/api.py:1477 +#: ../nova/db/sqlalchemy/api.py:1483 #, python-format msgid "No target id found for volume %s" msgstr "" -#: ../nova/db/sqlalchemy/api.py:1522 +#: ../nova/db/sqlalchemy/api.py:1528 #, python-format msgid "No security group with id %s" msgstr "" -#: ../nova/db/sqlalchemy/api.py:1539 +#: ../nova/db/sqlalchemy/api.py:1545 #, python-format msgid "No security group named %(group_name)s for project: %(project_id)s" msgstr "" -#: ../nova/db/sqlalchemy/api.py:1632 +#: ../nova/db/sqlalchemy/api.py:1638 #, python-format msgid "No secuity group rule with id %s" msgstr "" -#: ../nova/db/sqlalchemy/api.py:1706 +#: ../nova/db/sqlalchemy/api.py:1712 #, python-format msgid "No user for id %s" msgstr "" -#: ../nova/db/sqlalchemy/api.py:1722 +#: ../nova/db/sqlalchemy/api.py:1728 #, python-format msgid "No user for access key %s" msgstr "" -#: ../nova/db/sqlalchemy/api.py:1784 +#: ../nova/db/sqlalchemy/api.py:1790 #, python-format msgid "No project with id %s" msgstr "" -#: ../nova/db/sqlalchemy/api.py:1929 +#: ../nova/db/sqlalchemy/api.py:1935 #, python-format msgid "No console pool with id %(pool_id)s" msgstr "" -#: ../nova/db/sqlalchemy/api.py:1946 +#: ../nova/db/sqlalchemy/api.py:1952 #, python-format msgid "" "No console pool of type %(console_type)s for compute host %(compute_host)s " "on proxy host %(host)s" msgstr "" -#: ../nova/db/sqlalchemy/api.py:1985 +#: ../nova/db/sqlalchemy/api.py:1991 #, python-format msgid "No console for instance %(instance_id)s in pool %(pool_id)s" msgstr "" -#: ../nova/db/sqlalchemy/api.py:2007 +#: ../nova/db/sqlalchemy/api.py:2013 #, python-format msgid "on instance %s" msgstr "" -#: ../nova/db/sqlalchemy/api.py:2008 +#: ../nova/db/sqlalchemy/api.py:2014 #, python-format msgid "No console with id %(console_id)s %(idesc)s" msgstr "" @@ -1262,129 +1262,129 @@ msgstr "" msgid "Delete key pair %s" msgstr "" -#: ../nova/api/ec2/cloud.py:384 +#: ../nova/api/ec2/cloud.py:386 #, python-format msgid "%s is not a valid ipProtocol" msgstr "" -#: ../nova/api/ec2/cloud.py:388 +#: ../nova/api/ec2/cloud.py:390 msgid "Invalid port range" msgstr "" -#: ../nova/api/ec2/cloud.py:419 +#: ../nova/api/ec2/cloud.py:421 #, python-format msgid "Revoke security group ingress %s" msgstr "" -#: ../nova/api/ec2/cloud.py:428 ../nova/api/ec2/cloud.py:457 +#: ../nova/api/ec2/cloud.py:430 ../nova/api/ec2/cloud.py:459 msgid "Not enough parameters to build a valid rule." msgstr "" -#: ../nova/api/ec2/cloud.py:441 +#: ../nova/api/ec2/cloud.py:443 msgid "No rule for the specified parameters." msgstr "" -#: ../nova/api/ec2/cloud.py:448 +#: ../nova/api/ec2/cloud.py:450 #, python-format msgid "Authorize security group ingress %s" msgstr "" -#: ../nova/api/ec2/cloud.py:462 +#: ../nova/api/ec2/cloud.py:464 #, python-format msgid "This rule already exists in group %s" msgstr "" -#: ../nova/api/ec2/cloud.py:490 +#: ../nova/api/ec2/cloud.py:492 #, python-format msgid "Create Security Group %s" msgstr "" -#: ../nova/api/ec2/cloud.py:493 +#: ../nova/api/ec2/cloud.py:495 #, python-format msgid "group %s already exists" msgstr "" -#: ../nova/api/ec2/cloud.py:505 +#: ../nova/api/ec2/cloud.py:507 #, python-format msgid "Delete security group %s" msgstr "" -#: ../nova/api/ec2/cloud.py:582 +#: ../nova/api/ec2/cloud.py:584 #, python-format msgid "Create volume of %s GB" msgstr "" -#: ../nova/api/ec2/cloud.py:610 +#: ../nova/api/ec2/cloud.py:612 #, python-format msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" msgstr "" -#: ../nova/api/ec2/cloud.py:627 +#: ../nova/api/ec2/cloud.py:629 #, python-format msgid "Detach volume %s" msgstr "" -#: ../nova/api/ec2/cloud.py:759 +#: ../nova/api/ec2/cloud.py:761 msgid "Allocate address" msgstr "" -#: ../nova/api/ec2/cloud.py:764 +#: ../nova/api/ec2/cloud.py:766 #, python-format msgid "Release address %s" msgstr "" -#: ../nova/api/ec2/cloud.py:769 +#: ../nova/api/ec2/cloud.py:771 #, python-format msgid "Associate address %(public_ip)s to instance %(instance_id)s" msgstr "" -#: ../nova/api/ec2/cloud.py:778 +#: ../nova/api/ec2/cloud.py:780 #, python-format msgid "Disassociate address %s" msgstr "" -#: ../nova/api/ec2/cloud.py:805 +#: ../nova/api/ec2/cloud.py:807 msgid "Going to start terminating instances" msgstr "" -#: ../nova/api/ec2/cloud.py:813 +#: ../nova/api/ec2/cloud.py:815 #, python-format msgid "Reboot instance %r" msgstr "" -#: ../nova/api/ec2/cloud.py:850 +#: ../nova/api/ec2/cloud.py:867 #, python-format msgid "De-registering image %s" msgstr "" -#: ../nova/api/ec2/cloud.py:858 +#: ../nova/api/ec2/cloud.py:875 #, python-format msgid "Registered image %(image_location)s with id %(image_id)s" msgstr "" -#: ../nova/api/ec2/cloud.py:865 ../nova/api/ec2/cloud.py:880 +#: ../nova/api/ec2/cloud.py:882 ../nova/api/ec2/cloud.py:900 #, python-format msgid "attribute not supported: %s" msgstr "" -#: ../nova/api/ec2/cloud.py:870 +#: ../nova/api/ec2/cloud.py:890 #, python-format msgid "invalid id: %s" msgstr "" -#: ../nova/api/ec2/cloud.py:883 +#: ../nova/api/ec2/cloud.py:903 msgid "user or group not specified" msgstr "" -#: ../nova/api/ec2/cloud.py:885 +#: ../nova/api/ec2/cloud.py:905 msgid "only group \"all\" is supported" msgstr "" -#: ../nova/api/ec2/cloud.py:887 +#: ../nova/api/ec2/cloud.py:907 msgid "operation_type must be add or remove" msgstr "" -#: ../nova/api/ec2/cloud.py:888 +#: ../nova/api/ec2/cloud.py:908 #, python-format msgid "Updating image %s publicity" msgstr "" @@ -1507,21 +1507,29 @@ msgstr "" msgid "VM %(vm)s already halted, skipping shutdown..." msgstr "" -#: ../nova/virt/xenapi/vmops.py:444 +#: ../nova/virt/xenapi/vmops.py:295 +msgid "Removing kernel/ramdisk files" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:305 +msgid "kernel/ramdisk files removed" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:459 #, python-format msgid "" "TIMEOUT: The call to %(method)s timed out. VM id=%(instance_id)s; args=" "%(strargs)s" msgstr "" -#: ../nova/virt/xenapi/vmops.py:447 +#: ../nova/virt/xenapi/vmops.py:462 #, python-format msgid "" "The call to %(method)s returned an error: %(e)s. VM id=%(instance_id)s; args=" "%(strargs)s" msgstr "" -#: ../nova/virt/xenapi/vmops.py:638 +#: ../nova/virt/xenapi/vmops.py:653 #, python-format msgid "OpenSSL error: %s" msgstr "" @@ -1553,74 +1561,74 @@ msgstr "" msgid "Launching VPN for %s" msgstr "" -#: ../nova/image/s3.py:89 +#: ../nova/image/s3.py:99 #, python-format msgid "Image %s could not be found" msgstr "" -#: ../nova/api/ec2/__init__.py:126 +#: ../nova/api/ec2/__init__.py:125 msgid "Too many failed authentications." msgstr "" -#: ../nova/api/ec2/__init__.py:136 +#: ../nova/api/ec2/__init__.py:135 #, python-format msgid "" "Access key %(access_key)s has had %(failures)d failed authentications and " "will be locked out for %(lock_mins)d minutes." msgstr "" -#: ../nova/api/ec2/__init__.py:174 ../nova/objectstore/handler.py:140 +#: ../nova/api/ec2/__init__.py:173 ../nova/objectstore/handler.py:140 #, python-format msgid "Authentication Failure: %s" msgstr "" -#: ../nova/api/ec2/__init__.py:187 +#: ../nova/api/ec2/__init__.py:186 #, python-format msgid "Authenticated Request For %(uname)s:%(pname)s)" msgstr "" -#: ../nova/api/ec2/__init__.py:212 +#: ../nova/api/ec2/__init__.py:211 #, python-format msgid "action: %s" msgstr "" -#: ../nova/api/ec2/__init__.py:214 +#: ../nova/api/ec2/__init__.py:213 #, python-format msgid "arg: %(key)s\t\tval: %(value)s" msgstr "" -#: ../nova/api/ec2/__init__.py:286 +#: ../nova/api/ec2/__init__.py:285 #, python-format msgid "" "Unauthorized request for controller=%(controller)s and action=%(action)s" msgstr "" -#: ../nova/api/ec2/__init__.py:319 +#: ../nova/api/ec2/__init__.py:318 #, python-format msgid "InstanceNotFound raised: %s" msgstr "" -#: ../nova/api/ec2/__init__.py:325 +#: ../nova/api/ec2/__init__.py:324 #, python-format msgid "VolumeNotFound raised: %s" msgstr "" -#: ../nova/api/ec2/__init__.py:331 +#: ../nova/api/ec2/__init__.py:330 #, python-format msgid "NotFound raised: %s" msgstr "" -#: ../nova/api/ec2/__init__.py:334 +#: ../nova/api/ec2/__init__.py:333 #, python-format msgid "ApiError raised: %s" msgstr "" -#: ../nova/api/ec2/__init__.py:343 +#: ../nova/api/ec2/__init__.py:342 #, python-format msgid "Unexpected error raised: %s" msgstr "" -#: ../nova/api/ec2/__init__.py:348 +#: ../nova/api/ec2/__init__.py:347 msgid "An unknown error has occurred. Please try your request again." msgstr "" @@ -1708,6 +1716,11 @@ msgstr "" msgid "Found instance: %s" msgstr "" +#: ../nova/volume/san.py:67 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + #: ../nova/api/ec2/apirequest.py:99 #, python-format msgid "" @@ -2155,51 +2168,56 @@ msgid "" "on interface %(interface)s" msgstr "" -#: ../nova/network/manager.py:139 +#: ../nova/virt/fake.py:224 +#, python-format +msgid "Instance %s Not Found" +msgstr "" + +#: ../nova/network/manager.py:143 msgid "setting network host" msgstr "" -#: ../nova/network/manager.py:194 +#: ../nova/network/manager.py:198 #, python-format msgid "Leasing IP %s" msgstr "" -#: ../nova/network/manager.py:198 +#: ../nova/network/manager.py:202 #, python-format msgid "IP %s leased that isn't associated" msgstr "" -#: ../nova/network/manager.py:202 +#: ../nova/network/manager.py:206 #, python-format msgid "IP %(address)s leased to bad mac %(inst_addr)s vs %(mac)s" msgstr "" -#: ../nova/network/manager.py:210 +#: ../nova/network/manager.py:214 #, python-format msgid "IP %s leased that was already deallocated" msgstr "" -#: ../nova/network/manager.py:215 +#: ../nova/network/manager.py:219 #, python-format msgid "Releasing IP %s" msgstr "" -#: ../nova/network/manager.py:219 +#: ../nova/network/manager.py:223 #, python-format msgid "IP %s released that isn't associated" msgstr "" -#: ../nova/network/manager.py:223 +#: ../nova/network/manager.py:227 #, python-format msgid "IP %(address)s released from bad mac %(inst_addr)s vs %(mac)s" msgstr "" -#: ../nova/network/manager.py:226 +#: ../nova/network/manager.py:230 #, python-format msgid "IP %s released that was not leased" msgstr "" -#: ../nova/network/manager.py:461 +#: ../nova/network/manager.py:464 #, python-format msgid "Dissassociated %s stale fixed ip(s)" msgstr "" -- cgit From 5a988eb393c306097250a7f17ea65f0919fd9219 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Tue, 15 Feb 2011 12:43:29 -0400 Subject: fixed / renamed migration scripts --- .../migrate_repo/versions/003_add_zone_tables.py | 62 ++++++++++++++++++++++ .../sqlalchemy/migrate_repo/versions/003_cactus.py | 62 ---------------------- 2 files changed, 62 insertions(+), 62 deletions(-) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/003_add_zone_tables.py delete mode 100644 nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/003_add_zone_tables.py b/nova/db/sqlalchemy/migrate_repo/versions/003_add_zone_tables.py new file mode 100644 index 000000000..d2b6b9570 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/003_add_zone_tables.py @@ -0,0 +1,62 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +# +# New Tables +# +zones = Table('zones', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('api_url', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('username', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('password', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +# +# Tables to alter +# + +# (none currently) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + for table in (zones, ): + try: + table.create() + except Exception: + logging.info(repr(table)) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py b/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py deleted file mode 100644 index eb3287077..000000000 --- a/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import * -from migrate import * - -from nova import log as logging - - -meta = MetaData() - - -# -# New Tables -# -child_zones = Table('child_zones', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('api_url', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('username', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('password', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - ) - - -# -# Tables to alter -# - -# (none currently) - - -def upgrade(migrate_engine): - # Upgrade operations go here. Don't create your own engine; - # bind migrate_engine to your metadata - meta.bind = migrate_engine - for table in (child_zones, ): - try: - table.create() - except Exception: - logging.info(repr(table)) -- cgit From f02c41a7fe332b215421320d041a944e4b9ee9ee Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Tue, 15 Feb 2011 12:39:17 -0500 Subject: Replace placeholders in nova.pot with some actual values. --- po/nova.pot | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/po/nova.pot b/po/nova.pot index f747ae0f7..6cbb2f129 100644 --- a/po/nova.pot +++ b/po/nova.pot @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: PACKAGE VERSION\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2011-02-15 11:05-0500\n" +"POT-Creation-Date: 2011-02-15 12:38-0500\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" -- cgit From cbe4ee9f3a6bf207d475ca230032ced9325c3b7a Mon Sep 17 00:00:00 2001 From: brian-lamar Date: Tue, 15 Feb 2011 15:33:17 -0500 Subject: Added teammate Naveed to authors file for his help. --- Authors | 1 + 1 file changed, 1 insertion(+) diff --git a/Authors b/Authors index e8bb58456..03f30084d 100644 --- a/Authors +++ b/Authors @@ -43,6 +43,7 @@ Monty Taylor MORITA Kazutaka Muneyuki Noguchi Nachi Ueno +Naveed Massjouni Paul Voccio Ricardo Carrillo Cruz Rick Clark -- cgit From 745b7b22f7b22a09e6c3bbc1cd8591eb3aa7f554 Mon Sep 17 00:00:00 2001 From: Christian Berendt Date: Tue, 15 Feb 2011 21:38:47 +0100 Subject: removed flag --pidfile from nova/services.py --- nova/service.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/nova/service.py b/nova/service.py index 59648adf2..8b1b91e90 100644 --- a/nova/service.py +++ b/nova/service.py @@ -50,10 +50,6 @@ flags.DEFINE_integer('periodic_interval', 60, 'seconds between running periodic tasks', lower_bound=1) -flags.DEFINE_string('pidfile', None, - 'pidfile to use for this service') - - flags.DEFINE_flag(flags.HelpFlag()) flags.DEFINE_flag(flags.HelpshortFlag()) flags.DEFINE_flag(flags.HelpXMLFlag()) -- cgit From 503749849df73df1732583bc9452e7952bf78ac2 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Tue, 15 Feb 2011 15:25:48 -0600 Subject: moved reset network to after boot durrrrr... --- nova/virt/xenapi/vmops.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 00028cdaa..dd9f48ddf 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -134,9 +134,6 @@ class VMOps(object): VMHelper.create_vif(self._session, vm_ref, network_ref, instance.mac_address) - # call reset networking - self.reset_network(instance) - LOG.debug(_('Starting VM %s...'), vm_ref) self._session.call_xenapi('VM.start', vm_ref, False, False) instance_name = instance.name @@ -164,6 +161,10 @@ class VMOps(object): timer.stop() timer.f = _wait_for_boot + + # call reset networking + self.reset_network(instance) + return timer.start(interval=0.5, now=True) def _get_vm_opaque_ref(self, instance_or_vm): -- cgit From 1b9413e11ba1b4b49b50965e3f812e636f2319d5 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Tue, 15 Feb 2011 18:20:44 -0600 Subject: stubbed out reset networkin xenapi VM tests to solve domid problem --- nova/tests/test_xenapi.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index d5660c5d1..6b8efc9d8 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -32,6 +32,7 @@ from nova.virt import xenapi_conn from nova.virt.xenapi import fake as xenapi_fake from nova.virt.xenapi import volume_utils from nova.virt.xenapi.vmops import SimpleDH +from nova.virt.xenapi.vmops import VMOps from nova.tests.db import fakes as db_fakes from nova.tests.xenapi import stubs from nova.tests.glance import stubs as glance_stubs @@ -141,6 +142,10 @@ class XenAPIVolumeTestCase(test.TestCase): self.stubs.UnsetAll() +def reset_network(*args): + pass + + class XenAPIVMTestCase(test.TestCase): """ Unit tests for VM operations @@ -162,6 +167,7 @@ class XenAPIVMTestCase(test.TestCase): stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) stubs.stubout_get_this_vm_uuid(self.stubs) stubs.stubout_stream_disk(self.stubs) + self.stubs.Set(VMOps, 'reset_network', reset_network) glance_stubs.stubout_glance_client(self.stubs, glance_stubs.FakeGlance) self.conn = xenapi_conn.get_connection(False) -- cgit From c6b8f129ae57da2ea0cd844150e58d4fac7eb71d Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 16 Feb 2011 14:12:54 -0600 Subject: added test for reset_network to openstack api tests, tabstop 5 to 4, renamed migration --- nova/api/openstack/__init__.py | 1 + nova/compute/api.py | 2 +- .../versions/003_add_label_to_networks.py | 52 ++++++++++++++++++++++ .../sqlalchemy/migrate_repo/versions/003_cactus.py | 52 ---------------------- nova/tests/api/openstack/test_servers.py | 12 +++++ 5 files changed, 66 insertions(+), 53 deletions(-) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py delete mode 100644 nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index 056c7dd27..dc3738d4a 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -79,6 +79,7 @@ class APIRouter(wsgi.Router): server_members["actions"] = "GET" server_members['suspend'] = 'POST' server_members['resume'] = 'POST' + server_members['reset_network'] = 'POST' mapper.resource("server", "servers", controller=servers.Controller(), collection={'detail': 'GET'}, diff --git a/nova/compute/api.py b/nova/compute/api.py index 857028605..71879b5b7 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -1,4 +1,4 @@ -# vim: tabstop=5 shiftwidth=4 softtabstop=4 +# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. diff --git a/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py b/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py new file mode 100644 index 000000000..ddfe114cb --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py @@ -0,0 +1,52 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +networks = Table('networks', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + + +# +# New Tables +# + + +# +# Tables to alter +# + +networks_label = Column( + 'label', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + networks.create_column(networks_label) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py b/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py deleted file mode 100644 index ddfe114cb..000000000 --- a/nova/db/sqlalchemy/migrate_repo/versions/003_cactus.py +++ /dev/null @@ -1,52 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import * -from migrate import * - -from nova import log as logging - - -meta = MetaData() - - -networks = Table('networks', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - - -# -# New Tables -# - - -# -# Tables to alter -# - -networks_label = Column( - 'label', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)) - - -def upgrade(migrate_engine): - # Upgrade operations go here. Don't create your own engine; - # bind migrate_engine to your metadata - meta.bind = migrate_engine - networks.create_column(networks_label) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 724f14f19..89e192eed 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -281,6 +281,18 @@ class ServersTest(unittest.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 202) + def test_server_reset_network(self): + FLAGS.allow_admin_api = True + body = dict(server=dict( + name='server_test', imageId=2, flavorId=2, metadata={}, + personality={})) + req = webob.Request.blank('/v1.0/servers/1/reset_network') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + def test_server_diagnostics(self): req = webob.Request.blank("/v1.0/servers/1/diagnostics") req.method = "GET" -- cgit From 5f0340504784c1a0847e5b19aa9a317d9be16c20 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 16 Feb 2011 16:19:57 -0500 Subject: Added alternate email to mailmap --- .mailmap | 1 + Authors | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/.mailmap b/.mailmap index c6f6c9a8b..a05520884 100644 --- a/.mailmap +++ b/.mailmap @@ -34,3 +34,4 @@ + diff --git a/Authors b/Authors index c57ca8aed..eb9d540bf 100644 --- a/Authors +++ b/Authors @@ -4,7 +4,6 @@ Anthony Young Antony Messerli Armando Migliaccio Bilal Akhtar -Brian Lamar Chiradeep Vittal Chmouel Boudjnah Chris Behrens -- cgit From 05b96f9ddd0cc54c74c55c170b2037eeeafb527a Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 16 Feb 2011 16:22:16 -0500 Subject: Accidently removed myself from Authors. --- Authors | 1 + 1 file changed, 1 insertion(+) diff --git a/Authors b/Authors index eb9d540bf..c57ca8aed 100644 --- a/Authors +++ b/Authors @@ -4,6 +4,7 @@ Anthony Young Antony Messerli Armando Migliaccio Bilal Akhtar +Brian Lamar Chiradeep Vittal Chmouel Boudjnah Chris Behrens -- cgit From 6b5823f0aa75707fad6ca38dde490a47b740c3da Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 16 Feb 2011 16:40:40 -0500 Subject: Flipped mailmap entries --- .mailmap | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.mailmap b/.mailmap index a05520884..b2287d65f 100644 --- a/.mailmap +++ b/.mailmap @@ -34,4 +34,4 @@ - + -- cgit From 5faa6e59ff9dff02e8d583e6711bd08dd1f821fd Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 16 Feb 2011 14:15:41 -0800 Subject: add periodic disassociate from VlanManager to FlatDHCPManager. --- nova/network/manager.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/nova/network/manager.py b/nova/network/manager.py index 8eb9f041b..a4a4c6064 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -394,6 +394,18 @@ class FlatDHCPManager(FlatManager): like FlatDHCPManager. """ + def periodic_tasks(self, context=None): + """Tasks to be run at a periodic interval.""" + super(FlatDHCPManager, self).periodic_tasks(context) + now = datetime.datetime.utcnow() + timeout = FLAGS.fixed_ip_disassociate_timeout + time = now - datetime.timedelta(seconds=timeout) + num = self.db.fixed_ip_disassociate_all_by_timeout(context, + self.host, + time) + if num: + LOG.debug(_("Dissassociated %s stale fixed ip(s)"), num) + def init_host(self): """Do any initialization that needs to be run if this is a standalone service. -- cgit From f50101fcf845e93637f50e426ceb759641a20b76 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Thu, 17 Feb 2011 13:46:24 +0100 Subject: Make eth0 the default for FLAGS.public_interface. --- nova/network/linux_net.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index c1cbff7d8..535ce87bc 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -44,7 +44,7 @@ flags.DEFINE_string('dhcp_domain', flags.DEFINE_string('networks_path', '$state_path/networks', 'Location to keep network config files') -flags.DEFINE_string('public_interface', 'vlan1', +flags.DEFINE_string('public_interface', 'eth0', 'Interface for public IP addresses') flags.DEFINE_string('vlan_interface', 'eth0', 'network device for vlans') -- cgit From e28ce7f82d1c89ab0c4e5ebfa98c12f502a33138 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Thu, 17 Feb 2011 09:48:16 -0500 Subject: removing superfluous pass statements; replacing list comprehension with for loop; alphabetizing imports --- nova/api/openstack/servers.py | 6 ++---- nova/tests/api/openstack/test_servers.py | 2 +- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 60f3d96e3..312d83ba5 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -72,15 +72,13 @@ def _translate_detail_keys(inst): inst_dict['addresses']['private'].append(private_ip) except KeyError: LOG.debug(_("Failed to read private ip")) - pass # grab all public floating ips try: - [inst_dict['addresses']['public'].append(floating['address']) \ - for floating in inst['fixed_ip']['floating_ips']] + for floating in inst['fixed_ip']['floating_ips']: + inst_dict['addresses']['public'].append(floating['address']) except KeyError: LOG.debug(_("Failed to read public ip(s)")) - pass inst_dict['metadata'] = {} inst_dict['hostId'] = '' diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 816a0ab8c..ace3b6850 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -15,9 +15,9 @@ # License for the specific language governing permissions and limitations # under the License. +import datetime import json import unittest -import datetime import stubout import webob -- cgit From 7bb9e4c598f829a16cc6444346e087ddb506182a Mon Sep 17 00:00:00 2001 From: Christian Berendt Date: Thu, 17 Feb 2011 16:58:00 +0100 Subject: added new functionality to list all defined fixed ips --- bin/nova-manage | 31 +++++++++++++++++++++++++++++++ nova/db/api.py | 5 +++++ nova/db/sqlalchemy/api.py | 11 +++++++++++ 3 files changed, 47 insertions(+) diff --git a/bin/nova-manage b/bin/nova-manage index e4c0684c4..a8c9441e2 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -433,6 +433,37 @@ class ProjectCommands(object): "nova-api server on this host.") +class FixedIpCommands(object): + """Class for managing fixed ip.""" + + def list(self, host=None): + """Lists all fixed ips (optionally by host) arguments: [host]""" + ctxt = context.get_admin_context() + if host == None: + fixed_ips = db.fixed_ip_get_all(ctxt) + else: + fixed_ips = db.fixed_ip_get_all_by_host(ctxt, host) + + print "%-18s\t%-15s\t%-17s\t%-15s\t%s" % (_('network'), + _('IP address'), + _('MAC address'), + _('hostname'), + _('host')) + for fixed_ip in fixed_ips: + hostname = None + host = None + mac_address = None + if fixed_ip['instance']: + instance = fixed_ip['instance'] + hostname = instance['hostname'] + host = instance['host'] + mac_address = instance['mac_address'] + print "%-18s\t%-15s\t%-17s\t%-15s\t%s" % ( + fixed_ip['network']['cidr'], + fixed_ip['address'], + mac_address, hostname, host) + + class FloatingIpCommands(object): """Class for managing floating ip.""" diff --git a/nova/db/api.py b/nova/db/api.py index 789cb8ebb..2b621044a 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -288,6 +288,11 @@ def fixed_ip_disassociate_all_by_timeout(context, host, time): return IMPL.fixed_ip_disassociate_all_by_timeout(context, host, time) +def fixed_ip_get_all(context): + """Get all defined fixed ips.""" + return IMPL.fixed_ip_get_all(context) + + def fixed_ip_get_by_address(context, address): """Get a fixed ip by address or raise if it does not exist.""" return IMPL.fixed_ip_get_by_address(context, address) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 02855e7a9..55f2f28bd 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -583,6 +583,17 @@ def fixed_ip_disassociate_all_by_timeout(_context, host, time): return result.rowcount +@require_admin_context +def fixed_ip_get_all(context, session=None): + if not session: + session = get_session() + result = session.query(models.FixedIp).all() + if not result: + raise exception.NotFound(_('No fixed ips defined')) + + return result + + @require_context def fixed_ip_get_by_address(context, address, session=None): if not session: -- cgit From 0b4641a90e5f51cddccb9886902a90d64ceb3200 Mon Sep 17 00:00:00 2001 From: Christian Berendt Date: Thu, 17 Feb 2011 17:10:51 +0100 Subject: added entry in the category list --- bin/nova-manage | 1 + 1 file changed, 1 insertion(+) diff --git a/bin/nova-manage b/bin/nova-manage index a8c9441e2..ddc78f7e5 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -653,6 +653,7 @@ CATEGORIES = [ ('role', RoleCommands), ('shell', ShellCommands), ('vpn', VpnCommands), + ('fixed', FixedIpCommands), ('floating', FloatingIpCommands), ('network', NetworkCommands), ('service', ServiceCommands), -- cgit From 28b77765fd38038fd7093589170dead48ffc417f Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 17 Feb 2011 12:13:20 -0500 Subject: Switched mailmap entries --- .mailmap | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.mailmap b/.mailmap index b2287d65f..a05520884 100644 --- a/.mailmap +++ b/.mailmap @@ -34,4 +34,4 @@ - + -- cgit From ea4d21b546d9447bac50cf97a62c11129da12d21 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 17 Feb 2011 13:10:37 -0600 Subject: comments + Englilish, changed copyright in migration, removed network_get_all from db.api (vestigial) --- nova/api/openstack/servers.py | 2 +- nova/compute/api.py | 2 +- nova/compute/manager.py | 2 +- nova/db/api.py | 7 +------ .../sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py | 3 +-- nova/virt/xenapi/vmops.py | 6 ++++++ plugins/xenserver/xenapi/etc/xapi.d/plugins/agent | 2 +- 7 files changed, 12 insertions(+), 12 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 8b72704ba..33cc3bbde 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -251,7 +251,7 @@ class Controller(wsgi.Controller): def reset_network(self, req, id): """ - admin only operation which resets networking on an instance + Reset networking on an instance (admin only). """ context = req.environ['nova.context'] diff --git a/nova/compute/api.py b/nova/compute/api.py index 71879b5b7..ed6f0e34a 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -468,7 +468,7 @@ class API(base.Base): def reset_network(self, context, instance_id): """ - resets networking on the instance + Reset networking on the instance. """ self._cast_compute_message('reset_network', context, instance_id) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 1e2b95294..6fab1a41c 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -501,7 +501,7 @@ class ComputeManager(manager.Manager): @checks_instance_lock def reset_network(self, context, instance_id): """ - resets the networking on the instance + Reset networking on the instance. """ context = context.elevated() diff --git a/nova/db/api.py b/nova/db/api.py index 850a5126f..ce3395932 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -505,11 +505,6 @@ def network_get(context, network_id): return IMPL.network_get(context, network_id) -def network_get_all(context): - """Get all networks""" - return IMPL.network_get_all(context) - - # pylint: disable-msg=C0103 def network_get_associated_fixed_ips(context, network_id): """Get all network's ips that have been associated.""" @@ -527,7 +522,7 @@ def network_get_by_instance(context, instance_id): def network_get_all_by_instance(context, instance_id): - """Get all networks by instance id or raise if it does not exist.""" + """Get all networks by instance id or raise if none exist.""" return IMPL.network_get_all_by_instance(context, instance_id) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py b/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py index ddfe114cb..5ba7910f1 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py @@ -1,7 +1,6 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 OpenStack LLC # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index ea99ff626..842e08f22 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -151,6 +151,8 @@ class VMOps(object): % locals()) # NOTE(armando): Do we really need to do this in virt? + # NOTE(tr3buchet): not sure but wherever we do it, we need to call + # reset_network afterwards timer = utils.LoopingCall(f=None) def _wait_for_boot(): @@ -437,6 +439,10 @@ class VMOps(object): return 'http://fakeajaxconsole/fake_url' def reset_network(self, instance): + """ + Creates uuid arg to pass to make_agent_call and calls it. + + """ args = {'id': str(uuid.uuid4())} resp = self._make_agent_call('resetnetwork', instance, '', args) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent index 07c7e4df9..f99ea4082 100755 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent @@ -94,7 +94,7 @@ def password(self, arg_dict): @jsonify def resetnetwork(self, arg_dict): """ - writes a resquest to xenstore that tells the agent to reset the networking + Writes a resquest to xenstore that tells the agent to reset networking. """ arg_dict['value'] = json.dumps({'name': 'resetnetwork', 'value': ''}) -- cgit From c2f585952a67aa0c922d7ec80b387e8617587541 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Thu, 17 Feb 2011 21:27:48 +0100 Subject: Re-alphabetise Authors, move extra addressses into .mailmap. --- .mailmap | 46 ++++++++++++++++++++++++++-------------------- Authors | 10 +++++----- 2 files changed, 31 insertions(+), 25 deletions(-) diff --git a/.mailmap b/.mailmap index a05520884..a839eba6c 100644 --- a/.mailmap +++ b/.mailmap @@ -1,37 +1,43 @@ # Format is: -# - - +# +# + + + + - - + + + + + + - - - - + - + Masumoto + + + - + + + + - + + + + - - - - - - - - + diff --git a/Authors b/Authors index 395c6b9ed..494e614a0 100644 --- a/Authors +++ b/Authors @@ -3,17 +3,17 @@ Anne Gentle Anthony Young Antony Messerli Armando Migliaccio -Brian Waldon Bilal Akhtar Brian Lamar -Brian Schott +Brian Schott +Brian Waldon Chiradeep Vittal Chmouel Boudjnah Chris Behrens Christian Berendt Cory Wright -David Pravec Dan Prince +David Pravec Dean Troyer Devin Carlen Ed Leafe @@ -44,7 +44,7 @@ Monsyne Dragon Monty Taylor MORITA Kazutaka Muneyuki Noguchi -Nachi Ueno +Nachi Ueno Naveed Massjouni Paul Voccio Ricardo Carrillo Cruz @@ -59,7 +59,7 @@ Soren Hansen Thierry Carrez Todd Willey Trey Morris -Tushar Patil +Tushar Patil Vasiliy Shlykov Vishvananda Ishaya Youcef Laribi -- cgit From e3f461b3b1087fa6342942daa764ba6ffb9ae383 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Thu, 17 Feb 2011 22:47:02 +0100 Subject: Add **kwargs to VlanManager's create_networks so that optional args from other managers don't break. --- nova/network/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index b906a83ed..6647692ca 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -509,7 +509,7 @@ class VlanManager(NetworkManager): network_ref['bridge']) def create_networks(self, context, cidr, num_networks, network_size, - cidr_v6, vlan_start, vpn_start): + cidr_v6, vlan_start, vpn_start, **kwargs): """Create networks based on parameters.""" # Check that num_networks + vlan_start is not > 4094, fixes lp708025 if num_networks + vlan_start > 4094: -- cgit From 60ed73265a52f264021bb7452cde9f83181b3dfc Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Thu, 17 Feb 2011 17:51:24 -0400 Subject: copyright notice --- nova/api/openstack/servers.py | 2 -- nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py | 3 +-- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index c7f863764..009ef6db1 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2010 OpenStack LLC. # All Rights Reserved. # diff --git a/nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py b/nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py index d2b6b9570..ade981687 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py @@ -1,5 +1,4 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. +# Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may -- cgit From f6f0135bb320de3cde093f48cb3189380c173b12 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 17 Feb 2011 14:14:07 -0800 Subject: Correctly pass the associate paramater to project_get_network --- nova/db/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/db/api.py b/nova/db/api.py index 52c2bb84d..d7f3746d2 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -576,7 +576,7 @@ def project_get_network(context, project_id, associate=True): """ - return IMPL.project_get_network(context, project_id) + return IMPL.project_get_network(context, project_id, associate) def project_get_network_v6(context, project_id): -- cgit From fe576e28a6ed8e15d4cdb96313d9f58426715bb0 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 17 Feb 2011 14:39:36 -0800 Subject: move periodic tasks to base class based on class variable as per review --- nova/network/manager.py | 36 ++++++++++++++++-------------------- 1 file changed, 16 insertions(+), 20 deletions(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index a4a4c6064..78b7f0ae1 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -110,6 +110,7 @@ class NetworkManager(manager.Manager): This class must be subclassed to support specific topologies. """ + timeout_fixed_ips = True def __init__(self, network_driver=None, *args, **kwargs): if not network_driver: @@ -138,6 +139,19 @@ class NetworkManager(manager.Manager): self.driver.ensure_floating_forward(floating_ip['address'], fixed_address) + def periodic_tasks(self, context=None): + """Tasks to be run at a periodic interval.""" + super(NetworkManager, self).periodic_tasks(context) + if self.timeout_fixed_ips: + now = utils.utcnow() + timeout = FLAGS.fixed_ip_disassociate_timeout + time = now - datetime.timedelta(seconds=timeout) + num = self.db.fixed_ip_disassociate_all_by_timeout(context, + self.host, + time) + if num: + LOG.debug(_("Dissassociated %s stale fixed ip(s)"), num) + def set_network_host(self, context, network_id): """Safely sets the host of the network.""" LOG.debug(_("setting network host"), context=context) @@ -306,6 +320,7 @@ class FlatManager(NetworkManager): not do any setup in this mode, it must be done manually. Requests to 169.254.169.254 port 80 will need to be forwarded to the api server. """ + timeout_fixed_ips = False def allocate_fixed_ip(self, context, instance_id, *args, **kwargs): """Gets a fixed ip from the pool.""" @@ -397,14 +412,7 @@ class FlatDHCPManager(FlatManager): def periodic_tasks(self, context=None): """Tasks to be run at a periodic interval.""" super(FlatDHCPManager, self).periodic_tasks(context) - now = datetime.datetime.utcnow() - timeout = FLAGS.fixed_ip_disassociate_timeout - time = now - datetime.timedelta(seconds=timeout) - num = self.db.fixed_ip_disassociate_all_by_timeout(context, - self.host, - time) - if num: - LOG.debug(_("Dissassociated %s stale fixed ip(s)"), num) + self._disassociate_old_ips(context) def init_host(self): """Do any initialization that needs to be run if this is a @@ -463,18 +471,6 @@ class VlanManager(NetworkManager): instances in its subnet. """ - def periodic_tasks(self, context=None): - """Tasks to be run at a periodic interval.""" - super(VlanManager, self).periodic_tasks(context) - now = datetime.datetime.utcnow() - timeout = FLAGS.fixed_ip_disassociate_timeout - time = now - datetime.timedelta(seconds=timeout) - num = self.db.fixed_ip_disassociate_all_by_timeout(context, - self.host, - time) - if num: - LOG.debug(_("Dissassociated %s stale fixed ip(s)"), num) - def init_host(self): """Do any initialization that needs to be run if this is a standalone service. -- cgit From eef994eb690a9454e187a2b0cdbde85aba4c55cd Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 17 Feb 2011 14:50:29 -0800 Subject: remove leftover periodic tasks --- nova/network/manager.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index 78b7f0ae1..9f37d966a 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -409,11 +409,6 @@ class FlatDHCPManager(FlatManager): like FlatDHCPManager. """ - def periodic_tasks(self, context=None): - """Tasks to be run at a periodic interval.""" - super(FlatDHCPManager, self).periodic_tasks(context) - self._disassociate_old_ips(context) - def init_host(self): """Do any initialization that needs to be run if this is a standalone service. -- cgit From 9a7213b615bcaa2127f76146d594f5247ea0d0a4 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Thu, 17 Feb 2011 15:00:18 -0800 Subject: Initial support for per-instance metadata, though the OpenStack API. Key/value pairs can be specified at instance creation time and are returned in the details view. Support limits based on quota system. --- nova/api/ec2/cloud.py | 6 +- nova/api/openstack/servers.py | 30 +++++++-- nova/compute/api.py | 29 +++++++- nova/db/sqlalchemy/api.py | 2 + .../versions/004_add_instance_metadata.py | 78 ++++++++++++++++++++++ nova/db/sqlalchemy/models.py | 18 ++++- nova/quota.py | 14 +++- nova/tests/api/openstack/test_servers.py | 11 ++- nova/tests/test_quota.py | 24 +++++++ run_tests.sh | 4 +- 10 files changed, 202 insertions(+), 14 deletions(-) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/004_add_instance_metadata.py diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 6919cd8d2..33eba5028 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -783,6 +783,9 @@ class CloudController(object): def run_instances(self, context, **kwargs): max_count = int(kwargs.get('max_count', 1)) + # NOTE(justinsb): the EC2 API doesn't support metadata here, but this + # is needed for the unit tests. Maybe the unit tests shouldn't be + # calling the EC2 code instances = self.compute_api.create(context, instance_type=instance_types.get_by_type( kwargs.get('instance_type', None)), @@ -797,7 +800,8 @@ class CloudController(object): user_data=kwargs.get('user_data'), security_group=kwargs.get('security_group'), availability_zone=kwargs.get('placement', {}).get( - 'AvailabilityZone')) + 'AvailabilityZone'), + metadata=kwargs.get('metadata', [])) return self._format_run_instances(context, instances[0]['reservation_id']) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 009ef6db1..49611703a 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -78,9 +78,14 @@ def _translate_detail_keys(inst): except KeyError: LOG.debug(_("Failed to read public ip(s)")) - inst_dict['metadata'] = {} inst_dict['hostId'] = '' + # Return the metadata as a dictionary + metadata = {} + for item in inst['metadata']: + metadata[item['key']] = item['value'] + inst_dict['metadata'] = metadata + return dict(server=inst_dict) @@ -162,14 +167,26 @@ class Controller(wsgi.Controller): if not env: return faults.Fault(exc.HTTPUnprocessableEntity()) - key_pair = auth_manager.AuthManager.get_key_pairs( - req.environ['nova.context'])[0] + context = req.environ['nova.context'] + + key_pair = auth_manager.AuthManager.get_key_pairs(context)[0] image_id = common.get_image_id_from_image_hash(self._image_service, - req.environ['nova.context'], env['server']['imageId']) + context, env['server']['imageId']) kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( req, image_id) + + # Metadata is a list, not a Dictionary, because we allow duplicate keys + # (even though JSON can't encode this) + # In future, we may not allow duplicate keys. + # However, the CloudServers API is not definitive on this front, + # and we want to be compatible. + metadata = [] + if env['server']['metadata']: + for k, v in env['server']['metadata'].items(): + metadata.append({'key': k, 'value': v}) + instances = self.compute_api.create( - req.environ['nova.context'], + context, instance_types.get_by_flavor_id(env['server']['flavorId']), image_id, kernel_id=kernel_id, @@ -177,7 +194,8 @@ class Controller(wsgi.Controller): display_name=env['server']['name'], display_description=env['server']['name'], key_name=key_pair['name'], - key_data=key_pair['public_key']) + key_data=key_pair['public_key'], + metadata=metadata) return _translate_keys(instances[0]) def update(self, req, id): diff --git a/nova/compute/api.py b/nova/compute/api.py index ed6f0e34a..cad167f4d 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -85,7 +85,7 @@ class API(base.Base): min_count=1, max_count=1, display_name='', display_description='', key_name=None, key_data=None, security_group='default', - availability_zone=None, user_data=None): + availability_zone=None, user_data=None, metadata=[]): """Create the number of instances requested if quota and other arguments check out ok.""" @@ -99,6 +99,30 @@ class API(base.Base): "run %s more instances of this type.") % num_instances, "InstanceLimitExceeded") + num_metadata = len(metadata) + quota_metadata = quota.allowed_metadata_items(context, num_metadata) + if quota_metadata < num_metadata: + pid = context.project_id + msg = (_("Quota exceeeded for %(pid)s," + " tried to set %(num_metadata)s metadata properties") + % locals()) + LOG.warn(msg) + raise quota.QuotaError(msg, "MetadataLimitExceeded") + + # Because metadata is stored in the DB, we hard-code the size limits + # In future, we may support more variable length strings, so we act + # as if this is quota-controlled for forwards compatibility + for metadata_item in metadata: + k = metadata_item['key'] + v = metadata_item['value'] + if len(k) > 255 or len(v) > 255: + pid = context.project_id + msg = (_("Quota exceeeded for %(pid)s," + " metadata property key or value too long") + % locals()) + LOG.warn(msg) + raise quota.QuotaError(msg, "MetadataLimitExceeded") + is_vpn = image_id == FLAGS.vpn_image_id if not is_vpn: image = self.image_service.show(context, image_id) @@ -155,7 +179,8 @@ class API(base.Base): 'key_name': key_name, 'key_data': key_data, 'locked': False, - 'availability_zone': availability_zone} + 'availability_zone': availability_zone, + 'metadata': metadata} elevated = context.elevated() instances = [] diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 2697fac73..a6b8066b9 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -715,6 +715,7 @@ def instance_get(context, instance_id, session=None): options(joinedload_all('security_groups.rules')).\ options(joinedload('volumes')).\ options(joinedload_all('fixed_ip.network')).\ + options(joinedload('metadata')).\ filter_by(id=instance_id).\ filter_by(deleted=can_read_deleted(context)).\ first() @@ -723,6 +724,7 @@ def instance_get(context, instance_id, session=None): options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload_all('security_groups.rules')).\ options(joinedload('volumes')).\ + options(joinedload('metadata')).\ filter_by(project_id=context.project_id).\ filter_by(id=instance_id).\ filter_by(deleted=False).\ diff --git a/nova/db/sqlalchemy/migrate_repo/versions/004_add_instance_metadata.py b/nova/db/sqlalchemy/migrate_repo/versions/004_add_instance_metadata.py new file mode 100644 index 000000000..4cb07e0d8 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/004_add_instance_metadata.py @@ -0,0 +1,78 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of instances or services. +instances = Table('instances', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +quotas = Table('quotas', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + + +# +# New Tables +# + +instance_metadata_table = Table('instance_metadata', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_id', + Integer(), + ForeignKey('instances.id'), + nullable=False), + Column('key', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('value', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False))) + + +# +# New columns +# +quota_metadata_items = Column('metadata_items', Integer()) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + for table in (instance_metadata_table, ): + try: + table.create() + except Exception: + logging.info(repr(table)) + logging.exception('Exception while creating table') + raise + + quotas.create_column(quota_metadata_items) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 40a96fc17..a842e4cc4 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -256,6 +256,7 @@ class Quota(BASE, NovaBase): volumes = Column(Integer) gigabytes = Column(Integer) floating_ips = Column(Integer) + metadata_items = Column(Integer) class ExportDevice(BASE, NovaBase): @@ -536,6 +537,20 @@ class Console(BASE, NovaBase): pool = relationship(ConsolePool, backref=backref('consoles')) +class InstanceMetadata(BASE, NovaBase): + """Represents a metadata key/value pair for an instance""" + __tablename__ = 'instance_metadata' + id = Column(Integer, primary_key=True) + key = Column(String(255)) + value = Column(String(255)) + instance_id = Column(Integer, ForeignKey('instances.id'), nullable=False) + instance = relationship(Instance, backref="metadata", + foreign_keys=instance_id, + primaryjoin='and_(' + 'InstanceMetadata.instance_id == Instance.id,' + 'InstanceMetadata.deleted == False)') + + class Zone(BASE, NovaBase): """Represents a child zone of this zone.""" __tablename__ = 'zones' @@ -557,7 +572,8 @@ def register_models(): Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp, Network, SecurityGroup, SecurityGroupIngressRule, SecurityGroupInstanceAssociation, AuthToken, User, - Project, Certificate, ConsolePool, Console, Zone) + Project, Certificate, ConsolePool, Console, Zone, + InstanceMetadata) engine = create_engine(FLAGS.sql_connection, echo=False) for model in models: model.metadata.create_all(engine) diff --git a/nova/quota.py b/nova/quota.py index 3884eb308..6b52a97fa 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -35,6 +35,8 @@ flags.DEFINE_integer('quota_gigabytes', 1000, 'number of volume gigabytes allowed per project') flags.DEFINE_integer('quota_floating_ips', 10, 'number of floating ips allowed per project') +flags.DEFINE_integer('quota_metadata_items', 128, + 'number of metadata items allowed per instance') def get_quota(context, project_id): @@ -42,7 +44,8 @@ def get_quota(context, project_id): 'cores': FLAGS.quota_cores, 'volumes': FLAGS.quota_volumes, 'gigabytes': FLAGS.quota_gigabytes, - 'floating_ips': FLAGS.quota_floating_ips} + 'floating_ips': FLAGS.quota_floating_ips, + 'metadata_items': FLAGS.quota_metadata_items} try: quota = db.quota_get(context, project_id) for key in rval.keys(): @@ -94,6 +97,15 @@ def allowed_floating_ips(context, num_floating_ips): return min(num_floating_ips, allowed_floating_ips) +def allowed_metadata_items(context, num_metadata_items): + """Check quota; return min(num_metadata_items,allowed_metadata_items)""" + project_id = context.project_id + context = context.elevated() + quota = get_quota(context, project_id) + num_allowed_metadata_items = quota['metadata_items'] + return min(num_metadata_items, num_allowed_metadata_items) + + class QuotaError(exception.ApiError): """Quota Exceeeded""" pass diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index a7be0796e..7eb81c2b8 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -28,6 +28,7 @@ import nova.api.openstack from nova.api.openstack import servers import nova.db.api from nova.db.sqlalchemy.models import Instance +from nova.db.sqlalchemy.models import InstanceMetadata import nova.rpc from nova.tests.api.openstack import fakes @@ -64,6 +65,9 @@ def instance_address(context, instance_id): def stub_instance(id, user_id=1, private_address=None, public_addresses=None): + metadata = [] + metadata.append(InstanceMetadata(key='seq', value=id)) + if public_addresses == None: public_addresses = list() @@ -95,7 +99,8 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None): "availability_zone": "", "display_name": "server%s" % id, "display_description": "", - "locked": False} + "locked": False, + "metadata": metadata} instance["fixed_ip"] = { "address": private_address, @@ -214,7 +219,8 @@ class ServersTest(unittest.TestCase): "get_image_id_from_image_hash", image_id_from_hash) body = dict(server=dict( - name='server_test', imageId=2, flavorId=2, metadata={}, + name='server_test', imageId=2, flavorId=2, + metadata={'hello': 'world', 'open': 'stack'}, personality={})) req = webob.Request.blank('/v1.0/servers') req.method = 'POST' @@ -291,6 +297,7 @@ class ServersTest(unittest.TestCase): self.assertEqual(s['id'], i) self.assertEqual(s['name'], 'server%d' % i) self.assertEqual(s['imageId'], 10) + self.assertEqual(s['metadata']['seq'], i) i += 1 def test_server_pause(self): diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py index 9548a8c13..36ccc273e 100644 --- a/nova/tests/test_quota.py +++ b/nova/tests/test_quota.py @@ -87,6 +87,18 @@ class QuotaTestCase(test.TestCase): num_instances = quota.allowed_instances(self.context, 100, instance_types.INSTANCE_TYPES['m1.small']) self.assertEqual(num_instances, 10) + + # metadata_items + too_many_items = FLAGS.quota_metadata_items + 1000 + num_metadata_items = quota.allowed_metadata_items(self.context, + too_many_items) + self.assertEqual(num_metadata_items, FLAGS.quota_metadata_items) + db.quota_update(self.context, self.project.id, {'metadata_items': 5}) + num_metadata_items = quota.allowed_metadata_items(self.context, + too_many_items) + self.assertEqual(num_metadata_items, 5) + + # Cleanup db.quota_destroy(self.context, self.project.id) def test_too_many_instances(self): @@ -151,3 +163,15 @@ class QuotaTestCase(test.TestCase): self.assertRaises(quota.QuotaError, self.cloud.allocate_address, self.context) db.floating_ip_destroy(context.get_admin_context(), address) + + def test_too_many_metadata_items(self): + metadata = {} + for i in range(FLAGS.quota_metadata_items + 1): + metadata['key%s' % i] = 'value%s' % i + self.assertRaises(quota.QuotaError, self.cloud.run_instances, + self.context, + min_count=1, + max_count=1, + instance_type='m1.small', + image_id='fake', + metadata=metadata) diff --git a/run_tests.sh b/run_tests.sh index 4e21fe945..58e92c06b 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -73,7 +73,9 @@ fi if [ -z "$noseargs" ]; then - run_tests && pep8 --repeat --show-pep8 --show-source --exclude=vcsversion.py bin/* nova setup.py || exit 1 + srcfiles=`find bin -type f ! -name "nova.conf*"` + srcfiles+=" nova setup.py" + run_tests && pep8 --repeat --show-pep8 --show-source --exclude=vcsversion.py ${srcfiles} || exit 1 else run_tests fi -- cgit From f0d58ea141116ccfd1c977b0f3e5fc669c0ea8a9 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 17 Feb 2011 18:06:34 -0600 Subject: moved inject network info to a function which accepts only instance, and call it from reset network --- nova/virt/xenapi/vmops.py | 96 +++++++++++++++++++++++++++-------------------- 1 file changed, 55 insertions(+), 41 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 842e08f22..d1ef95c6c 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -104,46 +104,6 @@ class VMOps(object): instance, kernel, ramdisk, pv_kernel) VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True) - # write network info - admin_context = context.get_admin_context() - - # TODO(tr3buchet) - remove comment in multi-nic - # I've decided to go ahead and consider multiple IPs and networks - # at this stage even though they aren't implemented because these will - # be needed for multi-nic and there was no sense writing it for single - # network/single IP and then having to turn around and re-write it - IPs = db.fixed_ip_get_all_by_instance(admin_context, instance['id']) - for network in db.network_get_all_by_instance(admin_context, - instance['id']): - network_IPs = [ip for ip in IPs if ip.network_id == network.id] - - def ip_dict(ip): - return {'netmask': network['netmask'], - 'enabled': '1', - 'ip': ip.address} - - mac_id = instance.mac_address.replace(':', '') - location = 'vm-data/networking/%s' % mac_id - mapping = {'label': network['label'], - 'gateway': network['gateway'], - 'mac': instance.mac_address, - 'dns': [network['dns']], - 'ips': [ip_dict(ip) for ip in network_IPs]} - self.write_to_param_xenstore(vm_ref, {location: mapping}) - - # TODO(tr3buchet) - remove comment in multi-nic - # this bit here about creating the vifs will be updated - # in multi-nic to handle multiple IPs on the same network - # and multiple networks - # for now it works as there is only one of each - bridge = network['bridge'] - network_ref = \ - NetworkHelper.find_network_with_bridge(self._session, bridge) - - if network_ref: - VMHelper.create_vif(self._session, vm_ref, - network_ref, instance.mac_address) - LOG.debug(_('Starting VM %s...'), vm_ref) self._session.call_xenapi('VM.start', vm_ref, False, False) instance_name = instance.name @@ -174,7 +134,7 @@ class VMOps(object): timer.f = _wait_for_boot - # call reset networking + # call to reset network to inject network info and configure self.reset_network(instance) return timer.start(interval=0.5, now=True) @@ -438,11 +398,65 @@ class VMOps(object): # TODO: implement this! return 'http://fakeajaxconsole/fake_url' + def inject_network_info(self, instance): + """ + Generate the network info and make calls to place it into the + xenstore and the xenstore param list + + """ + # TODO(tr3buchet) - remove comment in multi-nic + # I've decided to go ahead and consider multiple IPs and networks + # at this stage even though they aren't implemented because these will + # be needed for multi-nic and there was no sense writing it for single + # network/single IP and then having to turn around and re-write it + vm_opaque_ref = self._get_vm_opaque_ref(instance.id) + logging.debug(_("injecting network info to xenstore for vm: |%s|"), + vm_opaque_ref) + admin_context = context.get_admin_context() + IPs = db.fixed_ip_get_all_by_instance(admin_context, instance['id']) + for network in db.network_get_all_by_instance(admin_context, + instance['id']): + network_IPs = [ip for ip in IPs if ip.network_id == network.id] + + def ip_dict(ip): + return {'netmask': network['netmask'], + 'enabled': '1', + 'ip': ip.address} + + mac_id = instance.mac_address.replace(':', '') + location = 'vm-data/networking/%s' % mac_id + mapping = {'label': network['label'], + 'gateway': network['gateway'], + 'mac': instance.mac_address, + 'dns': [network['dns']], + 'ips': [ip_dict(ip) for ip in network_IPs]} + self.write_to_param_xenstore(vm_opaque_ref, {location: mapping}) + try: + self.write_to_xenstore(vm_opaque_ref, location, + mapping['location']) + except KeyError: + # catch KeyError for domid if instance isn't running + pass + + # TODO(tr3buchet) - remove comment in multi-nic + # this bit here about creating the vifs will be updated + # in multi-nic to handle multiple IPs on the same network + # and multiple networks + # for now it works as there is only one of each + bridge = network['bridge'] + network_ref = \ + NetworkHelper.find_network_with_bridge(self._session, bridge) + + if network_ref: + VMHelper.create_vif(self._session, vm_opaque_ref, + network_ref, instance.mac_address) + def reset_network(self, instance): """ Creates uuid arg to pass to make_agent_call and calls it. """ + self.inject_network_info(instance) args = {'id': str(uuid.uuid4())} resp = self._make_agent_call('resetnetwork', instance, '', args) -- cgit From 982ac6b348981fa26ef6b70b8673da45477a6b36 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Fri, 18 Feb 2011 10:24:55 +0100 Subject: Use WatchedFileHandler instead of RotatingFileHandler. --- nova/log.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/log.py b/nova/log.py index 87a6dd51b..6b201ffcc 100644 --- a/nova/log.py +++ b/nova/log.py @@ -94,7 +94,7 @@ critical = logging.critical log = logging.log # handlers StreamHandler = logging.StreamHandler -RotatingFileHandler = logging.handlers.RotatingFileHandler +WatchedFileHandler = logging.handlers.WatchedFileHandler # logging.SysLogHandler is nicer than logging.logging.handler.SysLogHandler. SysLogHandler = logging.handlers.SysLogHandler @@ -139,7 +139,7 @@ def basicConfig(): logging.root.addHandler(syslog) logpath = get_log_file_path() if logpath: - logfile = RotatingFileHandler(logpath) + logfile = WatchedFileHandler(logpath) logfile.setFormatter(_formatter) logging.root.addHandler(logfile) -- cgit From debfca945627323c160b4ad9aa9b63b364deff99 Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Fri, 18 Feb 2011 16:02:55 +0100 Subject: Switch to API_listen and API_listen_port, drop wsgi.paste_config_to_flags --- bin/nova-api | 18 ++++++++++++------ etc/nova-api.conf | 3 --- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/bin/nova-api b/bin/nova-api index eb59d0191..e7ee6f6fe 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -47,6 +47,12 @@ FLAGS = flags.FLAGS API_ENDPOINTS = ['ec2', 'osapi'] +for api in API_ENDPOINTS: + flags.DEFINE_string("%s_api_listen" % api, "0.0.0.0", + "IP address to listen to for API %s" % api) + flags.DEFINE_integer("%s_api_listen_port" % api, + getattr(FLAGS, "%s_port" % api), + "Port to listen to for API %s" % api) def run_app(paste_config_file): LOG.debug(_("Using paste.deploy config at: %s"), paste_config_file) @@ -57,14 +63,10 @@ def run_app(paste_config_file): LOG.debug(_("No paste configuration for app: %s"), api) continue LOG.debug(_("App Config: %(api)s\n%(config)r") % locals()) - wsgi.paste_config_to_flags(config, { - "verbose": FLAGS.verbose, - "%s_host" % api: getattr(FLAGS, "%s_host" % api), - "%s_port" % api: getattr(FLAGS, "%s_port" % api)}) LOG.info(_("Running %s API"), api) app = wsgi.load_paste_app(paste_config_file, api) - apps.append((app, getattr(FLAGS, "%s_port" % api), - config.get('listen', '0.0.0.0'))) + apps.append((app, getattr(FLAGS, "%s_api_listen_port" % api), + getattr(FLAGS, "%s_api_listen" % api))) if len(apps) == 0: LOG.error(_("No known API applications configured in %s."), paste_config_file) @@ -82,6 +84,10 @@ if __name__ == '__main__': FLAGS(sys.argv) LOG.audit(_("Starting nova-api node (version %s)"), version.version_string_with_vcs()) + LOG.debug(_("Full set of FLAGS:")) + for flag in FLAGS: + flag_get = FLAGS.get(flag, None) + LOG.debug("%(flag)s : %(flag_get)s" % locals()) conf = wsgi.paste_config_file('nova-api.conf') if conf: run_app(conf) diff --git a/etc/nova-api.conf b/etc/nova-api.conf index f0e749805..9f7e93d4c 100644 --- a/etc/nova-api.conf +++ b/etc/nova-api.conf @@ -1,6 +1,3 @@ -[DEFAULT] -verbose = 1 - ####### # EC2 # ####### -- cgit From a0145eed239a7afb545def17f25a08e8e4c68824 Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Fri, 18 Feb 2011 16:08:33 +0100 Subject: Set up logging once FLAGS properly read, no need to redo logging config anymore (was inoperant anyway) --- bin/nova-api | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/bin/nova-api b/bin/nova-api index e7ee6f6fe..46f695248 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -39,10 +39,6 @@ from nova import log as logging from nova import version from nova import wsgi -logging.basicConfig() -LOG = logging.getLogger('nova.api') -LOG.setLevel(logging.DEBUG) - FLAGS = flags.FLAGS API_ENDPOINTS = ['ec2', 'osapi'] @@ -72,8 +68,6 @@ def run_app(paste_config_file): paste_config_file) return - # NOTE(todd): redo logging config, verbose could be set in paste config - logging.basicConfig() server = wsgi.Server() for app in apps: server.start(*app) @@ -82,6 +76,8 @@ def run_app(paste_config_file): if __name__ == '__main__': FLAGS(sys.argv) + logging.basicConfig() + LOG = logging.getLogger('nova.api') LOG.audit(_("Starting nova-api node (version %s)"), version.version_string_with_vcs()) LOG.debug(_("Full set of FLAGS:")) -- cgit From 27c2de313a41bced77f7a4769deae089a70f5385 Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Fri, 18 Feb 2011 16:21:14 +0100 Subject: Port changes to nova-combined, rename flags to API_listen and API_listen_port --- bin/nova-api | 8 ++++---- bin/nova-combined | 20 ++++++++++++-------- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/bin/nova-api b/bin/nova-api index 46f695248..8d47a656e 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -44,9 +44,9 @@ FLAGS = flags.FLAGS API_ENDPOINTS = ['ec2', 'osapi'] for api in API_ENDPOINTS: - flags.DEFINE_string("%s_api_listen" % api, "0.0.0.0", + flags.DEFINE_string("%s_listen" % api, "0.0.0.0", "IP address to listen to for API %s" % api) - flags.DEFINE_integer("%s_api_listen_port" % api, + flags.DEFINE_integer("%s_listen_port" % api, getattr(FLAGS, "%s_port" % api), "Port to listen to for API %s" % api) @@ -61,8 +61,8 @@ def run_app(paste_config_file): LOG.debug(_("App Config: %(api)s\n%(config)r") % locals()) LOG.info(_("Running %s API"), api) app = wsgi.load_paste_app(paste_config_file, api) - apps.append((app, getattr(FLAGS, "%s_api_listen_port" % api), - getattr(FLAGS, "%s_api_listen" % api))) + apps.append((app, getattr(FLAGS, "%s_listen_port" % api), + getattr(FLAGS, "%s_listen" % api))) if len(apps) == 0: LOG.error(_("No known API applications configured in %s."), paste_config_file) diff --git a/bin/nova-combined b/bin/nova-combined index 889600eb7..40dc2945d 100755 --- a/bin/nova-combined +++ b/bin/nova-combined @@ -45,6 +45,14 @@ from nova import wsgi FLAGS = flags.FLAGS +API_ENDPOINTS = ['ec2', 'osapi'] + +for api in API_ENDPOINTS: + flags.DEFINE_string("%s_listen" % api, "0.0.0.0", + "IP address to listen to for API %s" % api) + flags.DEFINE_integer("%s_listen_port" % api, + getattr(FLAGS, "%s_port" % api), + "Port to listen to for API %s" % api) if __name__ == '__main__': utils.default_flagfile() @@ -57,21 +65,17 @@ if __name__ == '__main__': scheduler = service.Service.create(binary='nova-scheduler') #objectstore = service.Service.create(binary='nova-objectstore') - service.serve(compute, network, volume, scheduler) +# service.serve(compute, network, volume, scheduler) apps = [] paste_config_file = wsgi.paste_config_file('nova-api.conf') - for api in ['osapi', 'ec2']: + for api in API_ENDPOINTS: config = wsgi.load_paste_configuration(paste_config_file, api) if config is None: continue - wsgi.paste_config_to_flags(config, { - "verbose": FLAGS.verbose, - "%s_host" % api: getattr(FLAGS, "%s_host" % api), - "%s_port" % api: getattr(FLAGS, "%s_port" % api)}) app = wsgi.load_paste_app(paste_config_file, api) - apps.append((app, getattr(FLAGS, "%s_port" % api), - config.get('listen', '0.0.0.0'))) + apps.append((app, getattr(FLAGS, "%s_listen_port" % api), + getattr(FLAGS, "%s_listen" % api))) if len(apps) > 0: logging.basicConfig() server = wsgi.Server() -- cgit From a02af158f781b61dba67c454afb59b34f27ca5d7 Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Fri, 18 Feb 2011 16:23:15 +0100 Subject: Remove paste_config_to_flags since it's now unused --- nova/wsgi.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/nova/wsgi.py b/nova/wsgi.py index e01cc1e1e..d9b1cae86 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -515,10 +515,3 @@ def load_paste_app(filename, appname): except LookupError: pass return app - - -def paste_config_to_flags(config, mixins): - for k, v in mixins.iteritems(): - value = config.get(k, v) - converted_value = FLAGS[k].parser.Parse(value) - setattr(FLAGS, k, converted_value) -- cgit From 15d140992a980d959be6633b59d178a5ea273cdc Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Fri, 18 Feb 2011 16:23:52 +0100 Subject: PEP8 fix --- bin/nova-api | 1 + 1 file changed, 1 insertion(+) diff --git a/bin/nova-api b/bin/nova-api index 8d47a656e..1228f723c 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -50,6 +50,7 @@ for api in API_ENDPOINTS: getattr(FLAGS, "%s_port" % api), "Port to listen to for API %s" % api) + def run_app(paste_config_file): LOG.debug(_("Using paste.deploy config at: %s"), paste_config_file) apps = [] -- cgit From bf570ca5f199091d505d96b91a3dc3acfbfc9fc7 Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Fri, 18 Feb 2011 16:37:00 +0100 Subject: Fixed testing mode leftover --- bin/nova-combined | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/nova-combined b/bin/nova-combined index 40dc2945d..dd21c8df4 100755 --- a/bin/nova-combined +++ b/bin/nova-combined @@ -65,7 +65,7 @@ if __name__ == '__main__': scheduler = service.Service.create(binary='nova-scheduler') #objectstore = service.Service.create(binary='nova-objectstore') -# service.serve(compute, network, volume, scheduler) + service.serve(compute, network, volume, scheduler) apps = [] paste_config_file = wsgi.paste_config_file('nova-api.conf') -- cgit From bef44d7621db516a0f5d407655f5e76adfd5c06d Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Fri, 18 Feb 2011 10:14:56 -0800 Subject: Rename migration 004 => 005 --- .../versions/004_add_instance_metadata.py | 78 ---------------------- .../versions/005_add_instance_metadata.py | 78 ++++++++++++++++++++++ 2 files changed, 78 insertions(+), 78 deletions(-) delete mode 100644 nova/db/sqlalchemy/migrate_repo/versions/004_add_instance_metadata.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/004_add_instance_metadata.py b/nova/db/sqlalchemy/migrate_repo/versions/004_add_instance_metadata.py deleted file mode 100644 index 4cb07e0d8..000000000 --- a/nova/db/sqlalchemy/migrate_repo/versions/004_add_instance_metadata.py +++ /dev/null @@ -1,78 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import * -from migrate import * - -from nova import log as logging - - -meta = MetaData() - - -# Just for the ForeignKey and column creation to succeed, these are not the -# actual definitions of instances or services. -instances = Table('instances', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - -quotas = Table('quotas', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - - -# -# New Tables -# - -instance_metadata_table = Table('instance_metadata', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('instance_id', - Integer(), - ForeignKey('instances.id'), - nullable=False), - Column('key', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('value', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False))) - - -# -# New columns -# -quota_metadata_items = Column('metadata_items', Integer()) - - -def upgrade(migrate_engine): - # Upgrade operations go here. Don't create your own engine; - # bind migrate_engine to your metadata - meta.bind = migrate_engine - for table in (instance_metadata_table, ): - try: - table.create() - except Exception: - logging.info(repr(table)) - logging.exception('Exception while creating table') - raise - - quotas.create_column(quota_metadata_items) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py b/nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py new file mode 100644 index 000000000..4cb07e0d8 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py @@ -0,0 +1,78 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of instances or services. +instances = Table('instances', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +quotas = Table('quotas', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + + +# +# New Tables +# + +instance_metadata_table = Table('instance_metadata', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_id', + Integer(), + ForeignKey('instances.id'), + nullable=False), + Column('key', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('value', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False))) + + +# +# New columns +# +quota_metadata_items = Column('metadata_items', Integer()) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + for table in (instance_metadata_table, ): + try: + table.create() + except Exception: + logging.info(repr(table)) + logging.exception('Exception while creating table') + raise + + quotas.create_column(quota_metadata_items) -- cgit From f66840390bf6f4660a091663d306417e760735bc Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Fri, 18 Feb 2011 10:57:19 -0800 Subject: Support for HP SAN --- .../versions/006_add_provider_data_to_volumes.py | 72 ++++++ nova/db/sqlalchemy/models.py | 3 + nova/volume/driver.py | 144 +++++++++-- nova/volume/manager.py | 8 +- nova/volume/san.py | 287 ++++++++++++++++++--- 5 files changed, 459 insertions(+), 55 deletions(-) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py b/nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py new file mode 100644 index 000000000..705fc8ff3 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py @@ -0,0 +1,72 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +# Table stub-definitions +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of instances or services. +# +volumes = Table('volumes', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + + +# +# New Tables +# +# None + +# +# Tables to alter +# +# None + +# +# Columns to add to existing tables +# + +volumes_provider_location = Column('provider_location', + String(length=256, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + +volumes_provider_auth = Column('provider_auth', + String(length=256, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + # Add columns to existing tables + volumes.create_column(volumes_provider_location) + volumes.create_column(volumes_provider_auth) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 40a96fc17..4485ee9e4 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -243,6 +243,9 @@ class Volume(BASE, NovaBase): display_name = Column(String(255)) display_description = Column(String(255)) + provider_location = Column(String(255)) + provider_auth = Column(String(255)) + class Quota(BASE, NovaBase): """Represents quota overrides for a project.""" diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 82f4c2f54..f172e2fdc 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -21,6 +21,7 @@ Drivers for volumes. """ import time +import os from nova import exception from nova import flags @@ -36,6 +37,8 @@ flags.DEFINE_string('aoe_eth_dev', 'eth0', 'Which device to export the volumes on') flags.DEFINE_string('num_shell_tries', 3, 'number of times to attempt to run flakey shell commands') +flags.DEFINE_string('num_iscsi_scan_tries', 3, + 'number of times to rescan iSCSI target to find volume') flags.DEFINE_integer('num_shelves', 100, 'Number of vblade shelves') @@ -294,40 +297,133 @@ class ISCSIDriver(VolumeDriver): self._execute("sudo ietadm --op delete --tid=%s" % iscsi_target) - def _get_name_and_portal(self, volume): - """Gets iscsi name and portal from volume name and host.""" + def _do_iscsi_discovery(self, volume): + #TODO(justinsb): Deprecate discovery and use stored info + #NOTE(justinsb): Discovery won't work with CHAP-secured targets (?) + LOG.warn(_("ISCSI provider_location not stored, using discovery")) + volume_name = volume['name'] - host = volume['host'] + (out, _err) = self._execute("sudo iscsiadm -m discovery -t " - "sendtargets -p %s" % host) + "sendtargets -p %s" % (volume['host'])) for target in out.splitlines(): if FLAGS.iscsi_ip_prefix in target and volume_name in target: - (location, _sep, iscsi_name) = target.partition(" ") - break - iscsi_portal = location.split(",")[0] - return (iscsi_name, iscsi_portal) + return target + return None + + def _get_iscsi_properties(self, volume): + """Gets iscsi configuration, ideally from saved information in the + volume entity, but falling back to discovery if need be.""" + + properties = {} + + location = volume['provider_location'] + + if location: + # provider_location is the same format as iSCSI discovery output + properties['target_discovered'] = False + else: + location = self._do_iscsi_discovery(volume) + + if not location: + raise exception.Error(_("Could not find iSCSI export " + " for volume %s") % + (volume['name'])) + + LOG.debug(_("ISCSI Discovery: Found %s") % (location)) + properties['target_discovered'] = True + + (iscsi_target, _sep, iscsi_name) = location.partition(" ") + + iscsi_portal = iscsi_target.split(",")[0] + + properties['target_iqn'] = iscsi_name + properties['target_portal'] = iscsi_portal + + auth = volume['provider_auth'] + + if auth: + (auth_method, auth_username, auth_secret) = auth.split() + + properties['auth_method'] = auth_method + properties['auth_username'] = auth_username + properties['auth_password'] = auth_secret + + return properties + + def _run_iscsiadm(self, iscsi_properties, iscsi_command): + command = ("sudo iscsiadm -m node -T %s -p %s %s" % + (iscsi_properties['target_iqn'], + iscsi_properties['target_portal'], + iscsi_command)) + (out, err) = self._execute(command) + LOG.debug("iscsiadm %s: stdout=%s stderr=%s" % + (iscsi_command, out, err)) + return (out, err) + + def _iscsiadm_update(self, iscsi_properties, property_key, property_value): + iscsi_command = ("--op update -n %s -v %s" % + (property_key, property_value)) + return self._run_iscsiadm(iscsi_properties, iscsi_command) def discover_volume(self, volume): """Discover volume on a remote host.""" - iscsi_name, iscsi_portal = self._get_name_and_portal(volume) - self._execute("sudo iscsiadm -m node -T %s -p %s --login" % - (iscsi_name, iscsi_portal)) - self._execute("sudo iscsiadm -m node -T %s -p %s --op update " - "-n node.startup -v automatic" % - (iscsi_name, iscsi_portal)) - return "/dev/disk/by-path/ip-%s-iscsi-%s-lun-0" % (iscsi_portal, - iscsi_name) + iscsi_properties = self._get_iscsi_properties(volume) + + if not iscsi_properties['target_discovered']: + self._run_iscsiadm(iscsi_properties, "--op new") + + if iscsi_properties.get('auth_method'): + self._iscsiadm_update(iscsi_properties, + "node.session.auth.authmethod", + iscsi_properties['auth_method']) + self._iscsiadm_update(iscsi_properties, + "node.session.auth.username", + iscsi_properties['auth_username']) + self._iscsiadm_update(iscsi_properties, + "node.session.auth.password", + iscsi_properties['auth_password']) + + self._run_iscsiadm(iscsi_properties, "--login") + + self._iscsiadm_update(iscsi_properties, "node.startup", "automatic") + + mount_device = ("/dev/disk/by-path/ip-%s-iscsi-%s-lun-0" % + (iscsi_properties['target_portal'], + iscsi_properties['target_iqn'])) + + # The /dev/disk/by-path/... node is not always present immediately + # TODO(justinsb): This retry-with-delay is a pattern, move to utils? + tries = 0 + while not os.path.exists(mount_device): + if tries >= FLAGS.num_iscsi_scan_tries: + raise exception.Error(_("iSCSI device not found at %s") % + (mount_device)) + + LOG.warn(_("ISCSI volume not yet found at: %(mount_device)s. " + "Will rescan & retry. Try number: %(tries)s") % + locals()) + + # The rescan isn't documented as being necessary(?), but it helps + self._run_iscsiadm(iscsi_properties, "--rescan") + + tries = tries + 1 + if not os.path.exists(mount_device): + time.sleep(tries ** 2) + + if tries != 0: + LOG.debug(_("Found iSCSI node %(mount_device)s " + "(after %(tries)s rescans)") % + locals()) + + return mount_device def undiscover_volume(self, volume): """Undiscover volume on a remote host.""" - iscsi_name, iscsi_portal = self._get_name_and_portal(volume) - self._execute("sudo iscsiadm -m node -T %s -p %s --op update " - "-n node.startup -v manual" % - (iscsi_name, iscsi_portal)) - self._execute("sudo iscsiadm -m node -T %s -p %s --logout " % - (iscsi_name, iscsi_portal)) - self._execute("sudo iscsiadm -m node --op delete " - "--targetname %s" % iscsi_name) + iscsi_properties = self._get_iscsi_properties(volume) + self._iscsiadm_update(iscsi_properties, "node.startup", "manual") + self._run_iscsiadm(iscsi_properties, "--logout") + self._run_iscsiadm(iscsi_properties, "--op delete") class FakeISCSIDriver(ISCSIDriver): diff --git a/nova/volume/manager.py b/nova/volume/manager.py index d2f02e4e0..7193ece14 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -107,10 +107,14 @@ class VolumeManager(manager.Manager): vol_size = volume_ref['size'] LOG.debug(_("volume %(vol_name)s: creating lv of" " size %(vol_size)sG") % locals()) - self.driver.create_volume(volume_ref) + db_update = self.driver.create_volume(volume_ref) + if db_update: + self.db.volume_update(context, volume_ref['id'], db_update) LOG.debug(_("volume %s: creating export"), volume_ref['name']) - self.driver.create_export(context, volume_ref) + db_update = self.driver.create_export(context, volume_ref) + if db_update: + self.db.volume_update(context, volume_ref['id'], db_update) except Exception: self.db.volume_update(context, volume_ref['id'], {'status': 'error'}) diff --git a/nova/volume/san.py b/nova/volume/san.py index 26d6125e7..911ad096f 100644 --- a/nova/volume/san.py +++ b/nova/volume/san.py @@ -23,6 +23,8 @@ The unique thing about a SAN is that we don't expect that we can run the volume import os import paramiko +from xml.etree import ElementTree + from nova import exception from nova import flags from nova import log as logging @@ -41,37 +43,15 @@ flags.DEFINE_string('san_password', '', 'Password for SAN controller') flags.DEFINE_string('san_privatekey', '', 'Filename of private key to use for SSH authentication') +flags.DEFINE_string('san_clustername', '', + 'Cluster name to use for creating volumes') +flags.DEFINE_integer('san_ssh_port', 22, + 'SSH port to use with SAN') class SanISCSIDriver(ISCSIDriver): """ Base class for SAN-style storage volumes (storage providers we access over SSH)""" - #Override because SAN ip != host ip - def _get_name_and_portal(self, volume): - """Gets iscsi name and portal from volume name and host.""" - volume_name = volume['name'] - - # TODO(justinsb): store in volume, remerge with generic iSCSI code - host = FLAGS.san_ip - - (out, _err) = self._execute("sudo iscsiadm -m discovery -t " - "sendtargets -p %s" % host) - - location = None - find_iscsi_name = self._build_iscsi_target_name(volume) - for target in out.splitlines(): - if find_iscsi_name in target: - (location, _sep, iscsi_name) = target.partition(" ") - break - if not location: - raise exception.Error(_("Could not find iSCSI export " - " for volume %s") % - volume_name) - - iscsi_portal = location.split(",")[0] - LOG.debug("iscsi_name=%s, iscsi_portal=%s" % - (iscsi_name, iscsi_portal)) - return (iscsi_name, iscsi_portal) def _build_iscsi_target_name(self, volume): return "%s%s" % (FLAGS.iscsi_target_prefix, volume['name']) @@ -85,6 +65,7 @@ class SanISCSIDriver(ISCSIDriver): ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) if FLAGS.san_password: ssh.connect(FLAGS.san_ip, + port=FLAGS.san_ssh_port, username=FLAGS.san_login, password=FLAGS.san_password) elif FLAGS.san_privatekey: @@ -92,10 +73,11 @@ class SanISCSIDriver(ISCSIDriver): # It sucks that paramiko doesn't support DSA keys privatekey = paramiko.RSAKey.from_private_key_file(privatekeyfile) ssh.connect(FLAGS.san_ip, + port=FLAGS.san_ssh_port, username=FLAGS.san_login, pkey=privatekey) else: - raise exception.Error("Specify san_password or san_privatekey") + raise exception.Error(_("Specify san_password or san_privatekey")) return ssh def _run_ssh(self, command, check_exit_code=True): @@ -124,10 +106,10 @@ class SanISCSIDriver(ISCSIDriver): def check_for_setup_error(self): """Returns an error if prerequisites aren't met""" if not (FLAGS.san_password or FLAGS.san_privatekey): - raise exception.Error("Specify san_password or san_privatekey") + raise exception.Error(_("Specify san_password or san_privatekey")) if not (FLAGS.san_ip): - raise exception.Error("san_ip must be set") + raise exception.Error(_("san_ip must be set")) def _collect_lines(data): @@ -306,6 +288,17 @@ class SolarisISCSIDriver(SanISCSIDriver): self._run_ssh("pfexec /usr/sbin/stmfadm add-view -t %s %s" % (target_group_name, luid)) + #TODO(justinsb): Is this always 1? Does it matter? + iscsi_portal_interface = '1' + iscsi_portal = FLAGS.san_ip + ":3260," + iscsi_portal_interface + + db_update = {} + db_update['provider_location'] = ("%s %s" % + (iscsi_portal, + iscsi_name)) + + return db_update + def remove_export(self, context, volume): """Removes an export for a logical volume.""" @@ -333,3 +326,239 @@ class SolarisISCSIDriver(SanISCSIDriver): if self._is_lu_created(volume): self._run_ssh("pfexec /usr/sbin/sbdadm delete-lu %s" % (luid)) + + +class HpSanISCSIDriver(SanISCSIDriver): + """Executes commands relating to HP/Lefthand SAN ISCSI volumes. + We use the CLIQ interface, over SSH. + + Rough overview of CLIQ commands used: + CLIQ createVolume (creates the volume) + CLIQ getVolumeInfo (to discover the IQN etc) + CLIQ getClusterInfo (to discover the iSCSI target IP address) + CLIQ assignVolumeChap (exports it with CHAP security) + + The 'trick' here is that the HP SAN enforces security by default, so + normally a volume mount would need both to configure the SAN in the volume + layer and do the mount on the compute layer. Multi-layer operations are + not catered for at the moment in the nova architecture, so instead we + share the volume using CHAP at volume creation time. Then the mount need + only use those CHAP credentials, so can take place exclusively in the + compute layer""" + + def _cliq_run(self, verb, cliq_args): + """Runs a CLIQ command over SSH, without doing any result parsing""" + cliq_arg_strings = [] + for k, v in cliq_args.items(): + cliq_arg_strings.append(" %s=%s" % (k, v)) + cmd = verb + ''.join(cliq_arg_strings) + + return self._run_ssh(cmd) + + def _cliq_run_xml(self, verb, cliq_args, check_cliq_result=True): + """Runs a CLIQ command over SSH, parsing and checking the output""" + cliq_args['output'] = 'XML' + (out, _err) = self._cliq_run(verb, cliq_args) + + LOG.debug(_("CLIQ command returned %s"), out) + + result_xml = ElementTree.fromstring(out) + if check_cliq_result: + response_node = result_xml.find("response") + if response_node is None: + msg = (_("Malformed response to CLIQ command " + "%(verb)s %(cliq_args)s. Result=%(out)s") % + locals()) + raise exception.Error(msg) + + result_code = response_node.attrib.get("result") + + if result_code != "0": + msg = (_("Error running CLIQ command %(verb)s %(cliq_args)s. " + " Result=%(out)s") % + locals()) + raise exception.Error(msg) + + return result_xml + + def _cliq_get_cluster_info(self, cluster_name): + """Queries for info about the cluster (including IP)""" + cliq_args = {} + cliq_args['clusterName'] = cluster_name + cliq_args['searchDepth'] = '1' + cliq_args['verbose'] = '0' + + result_xml = self._cliq_run_xml("getClusterInfo", cliq_args) + + return result_xml + + def _cliq_get_cluster_vip(self, cluster_name): + """Gets the IP on which a cluster shares iSCSI volumes""" + cluster_xml = self._cliq_get_cluster_info(cluster_name) + + vips = [] + for vip in cluster_xml.findall("response/cluster/vip"): + vips.append(vip.attrib.get('ipAddress')) + + if len(vips) == 1: + return vips[0] + + _xml = ElementTree.tostring(cluster_xml) + msg = (_("Unexpected number of virtual ips for cluster " + " %(cluster_name)s. Result=%(_xml)s") % + locals()) + raise exception.Error(msg) + + def _cliq_get_volume_info(self, volume_name): + """Gets the volume info, including IQN""" + cliq_args = {} + cliq_args['volumeName'] = volume_name + result_xml = self._cliq_run_xml("getVolumeInfo", cliq_args) + + # Result looks like this: + # + # + # + # + # + # + # + # + + # Flatten the nodes into a dictionary; use prefixes to avoid collisions + volume_attributes = {} + + volume_node = result_xml.find("response/volume") + for k, v in volume_node.attrib.items(): + volume_attributes["volume." + k] = v + + status_node = volume_node.find("status") + if not status_node is None: + for k, v in status_node.attrib.items(): + volume_attributes["status." + k] = v + + # We only consider the first permission node + permission_node = volume_node.find("permission") + if not permission_node is None: + for k, v in status_node.attrib.items(): + volume_attributes["permission." + k] = v + + LOG.debug(_("Volume info: %(volume_name)s => %(volume_attributes)s") % + locals()) + return volume_attributes + + def create_volume(self, volume): + """Creates a volume.""" + cliq_args = {} + cliq_args['clusterName'] = FLAGS.san_clustername + #TODO(justinsb): Should we default to inheriting thinProvision? + cliq_args['thinProvision'] = '1' if FLAGS.san_thin_provision else '0' + cliq_args['volumeName'] = volume['name'] + if int(volume['size']) == 0: + cliq_args['size'] = '100MB' + else: + cliq_args['size'] = '%sGB' % volume['size'] + + self._cliq_run_xml("createVolume", cliq_args) + + volume_info = self._cliq_get_volume_info(volume['name']) + cluster_name = volume_info['volume.clusterName'] + iscsi_iqn = volume_info['volume.iscsiIqn'] + + #TODO(justinsb): Is this always 1? Does it matter? + cluster_interface = '1' + + cluster_vip = self._cliq_get_cluster_vip(cluster_name) + iscsi_portal = cluster_vip + ":3260," + cluster_interface + + db_update = {} + db_update['provider_location'] = ("%s %s" % + (iscsi_portal, + iscsi_iqn)) + + return db_update + + def delete_volume(self, volume): + """Deletes a volume.""" + cliq_args = {} + cliq_args['volumeName'] = volume['name'] + cliq_args['prompt'] = 'false' # Don't confirm + + self._cliq_run_xml("deleteVolume", cliq_args) + + def local_path(self, volume): + # TODO(justinsb): Is this needed here? + raise exception.Error(_("local_path not supported")) + + def ensure_export(self, context, volume): + """Synchronously recreates an export for a logical volume.""" + return self._do_export(context, volume, force_create=False) + + def create_export(self, context, volume): + return self._do_export(context, volume, force_create=True) + + def _do_export(self, context, volume, force_create): + """Supports ensure_export and create_export""" + volume_info = self._cliq_get_volume_info(volume['name']) + + is_shared = 'permission.authGroup' in volume_info + + db_update = {} + + should_export = False + + if force_create or not is_shared: + should_export = True + # Check that we have a project_id + project_id = volume['project_id'] + if not project_id: + project_id = context.project_id + + if project_id: + #TODO(justinsb): Use a real per-project password here + chap_username = 'proj_' + project_id + # HP/Lefthand requires that the password be >= 12 characters + chap_password = 'project_secret_' + project_id + else: + msg = (_("Could not determine project for volume %s, " + "can't export") % + (volume['name'])) + if force_create: + raise exception.Error(msg) + else: + LOG.warn(msg) + should_export = False + + if should_export: + cliq_args = {} + cliq_args['volumeName'] = volume['name'] + cliq_args['chapName'] = chap_username + cliq_args['targetSecret'] = chap_password + + self._cliq_run_xml("assignVolumeChap", cliq_args) + + db_update['provider_auth'] = ("CHAP %s %s" % + (chap_username, chap_password)) + + return db_update + + def remove_export(self, context, volume): + """Removes an export for a logical volume.""" + cliq_args = {} + cliq_args['volumeName'] = volume['name'] + + self._cliq_run_xml("unassignVolume", cliq_args) -- cgit From fa29dc0433384d5aa47f5ac069a8dc650e23ccae Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 18 Feb 2011 15:48:49 -0600 Subject: moved creating vifs to its own function, moved inject network to its own function --- nova/virt/xenapi/vmops.py | 30 ++++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index d1ef95c6c..76b88a8bd 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -135,6 +135,8 @@ class VMOps(object): timer.f = _wait_for_boot # call to reset network to inject network info and configure + networks = self.inject_network_info(instance) + self.create_vifs(instance, networks) self.reset_network(instance) return timer.start(interval=0.5, now=True) @@ -414,8 +416,9 @@ class VMOps(object): vm_opaque_ref) admin_context = context.get_admin_context() IPs = db.fixed_ip_get_all_by_instance(admin_context, instance['id']) - for network in db.network_get_all_by_instance(admin_context, - instance['id']): + networks = db.network_get_all_by_instance(admin_context, + instance['id']) + for network in networks: network_IPs = [ip for ip in IPs if ip.network_id == network.id] def ip_dict(ip): @@ -438,11 +441,23 @@ class VMOps(object): # catch KeyError for domid if instance isn't running pass - # TODO(tr3buchet) - remove comment in multi-nic - # this bit here about creating the vifs will be updated - # in multi-nic to handle multiple IPs on the same network - # and multiple networks - # for now it works as there is only one of each + return networks + + def create_vifs(self, instance, networks=None): + """ + Creates vifs for an instance + + """ + logging.debug(_("creating vif(s) for vm: |%s|"), vm_opaque_ref) + if networks is None: + networks = db.network_get_all_by_instance(admin_context, + instance['id']) + # TODO(tr3buchet) - remove comment in multi-nic + # this bit here about creating the vifs will be updated + # in multi-nic to handle multiple IPs on the same network + # and multiple networks + # for now it works as there is only one of each + for network in networks: bridge = network['bridge'] network_ref = \ NetworkHelper.find_network_with_bridge(self._session, bridge) @@ -456,7 +471,6 @@ class VMOps(object): Creates uuid arg to pass to make_agent_call and calls it. """ - self.inject_network_info(instance) args = {'id': str(uuid.uuid4())} resp = self._make_agent_call('resetnetwork', instance, '', args) -- cgit From be898f26984da4ee92da1a027e47775cd816fed5 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Fri, 18 Feb 2011 14:35:12 -0800 Subject: Fix typo (?) in authentication logic --- nova/api/openstack/auth.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index 473071738..0b6ef8fc5 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -121,8 +121,8 @@ class AuthMiddleware(wsgi.Middleware): req - webob.Request object """ ctxt = context.get_admin_context() - user = self.auth.get_user_from_access_key(key) - if user and user.name == username: + user = self.auth.get_user_from_access_key(username) + if user and user.secret == key: token_hash = hashlib.sha1('%s%s%f' % (username, key, time.time())).hexdigest() token_dict = {} -- cgit From b4c67400324df02480b171b84ba73cfe8a6d044e Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Fri, 18 Feb 2011 14:36:01 -0800 Subject: If there are no keypairs, output a useful error message --- nova/api/openstack/servers.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 486eca508..ce9601ecb 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -162,8 +162,12 @@ class Controller(wsgi.Controller): if not env: return faults.Fault(exc.HTTPUnprocessableEntity()) - key_pair = auth_manager.AuthManager.get_key_pairs( - req.environ['nova.context'])[0] + key_pairs = auth_manager.AuthManager.get_key_pairs( + req.environ['nova.context']) + if not key_pairs: + raise exception.NotFound(_("No keypairs defined")) + key_pair = key_pairs[0] + image_id = common.get_image_id_from_image_hash(self._image_service, req.environ['nova.context'], env['server']['imageId']) kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( -- cgit From e369f2842446876505ce528c5bb56a3d41215f8f Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 18 Feb 2011 16:42:26 -0600 Subject: added admin api call for injecting network info, added api test for inject network info --- nova/api/openstack/__init__.py | 1 + nova/api/openstack/servers.py | 14 ++++++++++++++ nova/compute/api.py | 7 +++++++ nova/compute/manager.py | 12 ++++++++++++ nova/tests/api/openstack/test_servers.py | 12 ++++++++++++ nova/virt/xenapi_conn.py | 4 ++++ 6 files changed, 50 insertions(+) diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index dc3738d4a..cfa2da486 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -80,6 +80,7 @@ class APIRouter(wsgi.Router): server_members['suspend'] = 'POST' server_members['resume'] = 'POST' server_members['reset_network'] = 'POST' + server_members['inject_network_info'] = 'POST' mapper.resource("server", "servers", controller=servers.Controller(), collection={'detail': 'GET'}, diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 33cc3bbde..55fdb765b 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -263,6 +263,20 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() + def inject_network_info(self, req, id): + """ + Inject network info for an instance (admin only). + + """ + context = req.environ['nova.context'] + try: + self.compute_api.inject_network_info(context, id) + except: + readable = traceback.format_exc() + LOG.exception(_("Compute.api::inject_network_info %s"), readable) + return faults.Fault(exc.HTTPUnprocessableEntity()) + return exc.HTTPAccepted() + def pause(self, req, id): """ Permit Admins to Pause the server. """ ctxt = req.environ['nova.context'] diff --git a/nova/compute/api.py b/nova/compute/api.py index ed6f0e34a..3e5cd495e 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -473,6 +473,13 @@ class API(base.Base): """ self._cast_compute_message('reset_network', context, instance_id) + def inject_network_info(self, context, instance_id): + """ + Inject network info for the instance. + + """ + self._cast_compute_message('inject_network_info', context, instance_id) + def attach_volume(self, context, instance_id, volume_id, device): if not re.match("^/dev/[a-z]d[a-z]+$", device): raise exception.ApiError(_("Invalid device specified: %s. " diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 6fab1a41c..ae3ab519f 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -510,6 +510,18 @@ class ComputeManager(manager.Manager): context=context) self.driver.reset_network(instance_ref) + @checks_instance_lock + def inject_network_info(self, context, instance_id): + """ + Inject network info for the instance. + + """ + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + LOG.debug(_('instance %s: inject network info'), instance_id, + context=context) + self.driver.inject_network_info(instance_ref) + @exception.wrap_exception def get_console_output(self, context, instance_id): """Send the console output for an instance.""" diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 89e192eed..58da12dcc 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -293,6 +293,18 @@ class ServersTest(unittest.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 202) + def test_server_inject_network_info(self): + FLAGS.allow_admin_api = True + body = dict(server=dict( + name='server_test', imageId=2, flavorId=2, metadata={}, + personality={})) + req = webob.Request.blank('/v1.0/servers/1/inject_network_info') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + def test_server_diagnostics(self): req = webob.Request.blank("/v1.0/servers/1/diagnostics") req.method = "GET" diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 2720d175f..f72a12380 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -192,6 +192,10 @@ class XenAPIConnection(object): """reset networking for specified instance""" self._vmops.reset_network(instance) + def inject_network_info(self, instance): + """inject network info for specified instance""" + self._vmops.inject_network_info(instance) + def get_info(self, instance_id): """Return data about VM instance""" return self._vmops.get_info(instance_id) -- cgit From 8684eb3aa638883ea82bbaf8eb59076f1d7e6a05 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Fri, 18 Feb 2011 17:17:51 -0800 Subject: ObjectStore doesn't use properties collection; kernel_id and ramdisk_id aren't required anyway --- nova/api/openstack/servers.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 486eca508..11a84687d 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -144,13 +144,11 @@ class Controller(wsgi.Controller): metadata stored in Glance as 'image_properties' """ def lookup(param): - _image_id = image_id - try: - return image['properties'][param] - except KeyError: - raise exception.NotFound( - _("%(param)s property not found for image %(_image_id)s") % - locals()) + properties = image.get('properties') + if properties: + return properties.get(param) + else: + return image.get(param) image_id = str(image_id) image = self._image_service.show(req.environ['nova.context'], image_id) -- cgit From aeab8eeb038ca1d1dde05705028144a78552c4f7 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Fri, 18 Feb 2011 17:27:25 -0800 Subject: Don't crash if there's no 'fixed_ip' attribute (was returning None, which was unsubscriptable) --- nova/api/openstack/servers.py | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 486eca508..b54e28c0c 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -63,20 +63,22 @@ def _translate_detail_keys(inst): inst_dict['status'] = power_mapping[inst_dict['status']] inst_dict['addresses'] = dict(public=[], private=[]) - # grab single private fixed ip - try: - private_ip = inst['fixed_ip']['address'] - if private_ip: - inst_dict['addresses']['private'].append(private_ip) - except KeyError: - LOG.debug(_("Failed to read private ip")) - - # grab all public floating ips - try: - for floating in inst['fixed_ip']['floating_ips']: - inst_dict['addresses']['public'].append(floating['address']) - except KeyError: - LOG.debug(_("Failed to read public ip(s)")) + fixed_ip = inst['fixed_ip'] + if fixed_ip: + # grab single private fixed ip + try: + private_ip = fixed_ip['address'] + if private_ip: + inst_dict['addresses']['private'].append(private_ip) + except KeyError: + LOG.debug(_("Failed to read private ip")) + + # grab all public floating ips + try: + for floating in fixed_ip['floating_ips']: + inst_dict['addresses']['public'].append(floating['address']) + except KeyError: + LOG.debug(_("Failed to read public ip(s)")) inst_dict['metadata'] = {} inst_dict['hostId'] = '' -- cgit From e21567404aa31c39bf1b14b8a8b2f02703fd5905 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 18 Feb 2011 21:00:58 -0800 Subject: remove the weird is_vpn logic in compute/api.py --- nova/compute/api.py | 36 +++++++++++++++++------------------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 0d2690c72..81ea6dc53 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -100,25 +100,23 @@ class API(base.Base): "run %s more instances of this type.") % num_instances, "InstanceLimitExceeded") - is_vpn = image_id == FLAGS.vpn_image_id - if not is_vpn: - image = self.image_service.show(context, image_id) - if kernel_id is None: - kernel_id = image.get('kernel_id', None) - if ramdisk_id is None: - ramdisk_id = image.get('ramdisk_id', None) - # No kernel and ramdisk for raw images - if kernel_id == str(FLAGS.null_kernel): - kernel_id = None - ramdisk_id = None - LOG.debug(_("Creating a raw instance")) - # Make sure we have access to kernel and ramdisk (if not raw) - logging.debug("Using Kernel=%s, Ramdisk=%s" % - (kernel_id, ramdisk_id)) - if kernel_id: - self.image_service.show(context, kernel_id) - if ramdisk_id: - self.image_service.show(context, ramdisk_id) + image = self.image_service.show(context, image_id) + if kernel_id is None: + kernel_id = image.get('kernel_id', None) + if ramdisk_id is None: + ramdisk_id = image.get('ramdisk_id', None) + # No kernel and ramdisk for raw images + if kernel_id == str(FLAGS.null_kernel): + kernel_id = None + ramdisk_id = None + LOG.debug(_("Creating a raw instance")) + # Make sure we have access to kernel and ramdisk (if not raw) + logging.debug("Using Kernel=%s, Ramdisk=%s" % + (kernel_id, ramdisk_id)) + if kernel_id: + self.image_service.show(context, kernel_id) + if ramdisk_id: + self.image_service.show(context, ramdisk_id) if security_group is None: security_group = ['default'] -- cgit From e518ab4d16ec6166c0ea391af4c94aaf4d8aa2db Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 18 Feb 2011 22:49:13 -0800 Subject: replace context.user.is_admin() with context.is_admin because it is much faster --- nova/api/ec2/cloud.py | 12 ++++++------ nova/objectstore/bucket.py | 2 +- nova/objectstore/image.py | 2 +- nova/volume/api.py | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 6919cd8d2..3b8c60c31 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -290,7 +290,7 @@ class CloudController(object): for key_pair in key_pairs: # filter out the vpn keys suffix = FLAGS.vpn_key_suffix - if context.user.is_admin() or \ + if context..is_admin or \ not key_pair['name'].endswith(suffix): result.append({ 'keyName': key_pair['name'], @@ -318,7 +318,7 @@ class CloudController(object): def describe_security_groups(self, context, group_name=None, **kwargs): self.compute_api.ensure_default_security_group(context) - if context.user.is_admin(): + if context..is_admin: groups = db.security_group_get_all(context) else: groups = db.security_group_get_by_project(context, @@ -674,7 +674,7 @@ class CloudController(object): else: instances = self.compute_api.get_all(context, **kwargs) for instance in instances: - if not context.user.is_admin(): + if not context..is_admin: if instance['image_id'] == FLAGS.vpn_image_id: continue i = {} @@ -702,7 +702,7 @@ class CloudController(object): i['dnsName'] = i['publicDnsName'] or i['privateDnsName'] i['keyName'] = instance['key_name'] - if context.user.is_admin(): + if context..is_admin: i['keyName'] = '%s (%s, %s)' % (i['keyName'], instance['project_id'], instance['host']) @@ -736,7 +736,7 @@ class CloudController(object): def format_addresses(self, context): addresses = [] - if context.user.is_admin(): + if context..is_admin: iterator = db.floating_ip_get_all(context) else: iterator = db.floating_ip_get_all_by_project(context, @@ -750,7 +750,7 @@ class CloudController(object): ec2_id = id_to_ec2_id(instance_id) address_rv = {'public_ip': address, 'instance_id': ec2_id} - if context.user.is_admin(): + if context..is_admin: details = "%s (%s)" % (address_rv['instance_id'], floating_ip_ref['project_id']) address_rv['instance_id'] = details diff --git a/nova/objectstore/bucket.py b/nova/objectstore/bucket.py index 82767e52f..b213e18e8 100644 --- a/nova/objectstore/bucket.py +++ b/nova/objectstore/bucket.py @@ -107,7 +107,7 @@ class Bucket(object): def is_authorized(self, context): try: - return context.user.is_admin() or \ + return context.is_admin or \ self.owner_id == context.project_id except Exception, e: return False diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py index 41e0abd80..27227e2ca 100644 --- a/nova/objectstore/image.py +++ b/nova/objectstore/image.py @@ -69,7 +69,7 @@ class Image(object): # but only modified by admin or owner. try: return (self.metadata['isPublic'] and readonly) or \ - context.user.is_admin() or \ + context.is_admin or \ self.metadata['imageOwnerId'] == context.project_id except: return False diff --git a/nova/volume/api.py b/nova/volume/api.py index 478c83486..5201c7d90 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -85,7 +85,7 @@ class API(base.Base): return self.db.volume_get(context, volume_id) def get_all(self, context): - if context.user.is_admin(): + if context.is_admin: return self.db.volume_get_all(context) return self.db.volume_get_all_by_project(context, context.project_id) -- cgit From c4a0f200b023ba96024d58bf731307483dcbe288 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 18 Feb 2011 23:00:28 -0800 Subject: remove extra . --- nova/api/ec2/cloud.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 3b8c60c31..05cdcc57e 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -290,7 +290,7 @@ class CloudController(object): for key_pair in key_pairs: # filter out the vpn keys suffix = FLAGS.vpn_key_suffix - if context..is_admin or \ + if context.is_admin or \ not key_pair['name'].endswith(suffix): result.append({ 'keyName': key_pair['name'], @@ -318,7 +318,7 @@ class CloudController(object): def describe_security_groups(self, context, group_name=None, **kwargs): self.compute_api.ensure_default_security_group(context) - if context..is_admin: + if context.is_admin: groups = db.security_group_get_all(context) else: groups = db.security_group_get_by_project(context, @@ -674,7 +674,7 @@ class CloudController(object): else: instances = self.compute_api.get_all(context, **kwargs) for instance in instances: - if not context..is_admin: + if not context.is_admin: if instance['image_id'] == FLAGS.vpn_image_id: continue i = {} @@ -702,7 +702,7 @@ class CloudController(object): i['dnsName'] = i['publicDnsName'] or i['privateDnsName'] i['keyName'] = instance['key_name'] - if context..is_admin: + if context.is_admin: i['keyName'] = '%s (%s, %s)' % (i['keyName'], instance['project_id'], instance['host']) @@ -736,7 +736,7 @@ class CloudController(object): def format_addresses(self, context): addresses = [] - if context..is_admin: + if context.is_admin: iterator = db.floating_ip_get_all(context) else: iterator = db.floating_ip_get_all_by_project(context, @@ -750,7 +750,7 @@ class CloudController(object): ec2_id = id_to_ec2_id(instance_id) address_rv = {'public_ip': address, 'instance_id': ec2_id} - if context..is_admin: + if context.is_admin: details = "%s (%s)" % (address_rv['instance_id'], floating_ip_ref['project_id']) address_rv['instance_id'] = details -- cgit From a60d4cb45f4298ce39cbc34ad3c0133ba344fa66 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 18 Feb 2011 23:15:42 -0800 Subject: more optimizations context.user.id to context.user_id --- nova/api/ec2/__init__.py | 2 +- nova/api/ec2/cloud.py | 8 ++++---- nova/volume/api.py | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 1a06b3f01..01ef6bf6d 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -294,7 +294,7 @@ class Authorizer(wsgi.Middleware): return True if 'none' in roles: return False - return any(context.project.has_role(context.user.id, role) + return any(context.project.has_role(context.user_id, role) for role in roles) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 05cdcc57e..882cdcfc9 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -282,7 +282,7 @@ class CloudController(object): 'description': 'fixme'}]} def describe_key_pairs(self, context, key_name=None, **kwargs): - key_pairs = db.key_pair_get_all_by_user(context, context.user.id) + key_pairs = db.key_pair_get_all_by_user(context, context.user_id) if not key_name is None: key_pairs = [x for x in key_pairs if x['name'] in key_name] @@ -301,7 +301,7 @@ class CloudController(object): def create_key_pair(self, context, key_name, **kwargs): LOG.audit(_("Create key pair %s"), key_name, context=context) - data = _gen_key(context, context.user.id, key_name) + data = _gen_key(context, context.user_id, key_name) return {'keyName': key_name, 'keyFingerprint': data['fingerprint'], 'keyMaterial': data['private_key']} @@ -310,7 +310,7 @@ class CloudController(object): def delete_key_pair(self, context, key_name, **kwargs): LOG.audit(_("Delete key pair %s"), key_name, context=context) try: - db.key_pair_destroy(context, context.user.id, key_name) + db.key_pair_destroy(context, context.user_id, key_name) except exception.NotFound: # aws returns true even if the key doesn't exist pass @@ -494,7 +494,7 @@ class CloudController(object): if db.security_group_exists(context, context.project_id, group_name): raise exception.ApiError(_('group %s already exists') % group_name) - group = {'user_id': context.user.id, + group = {'user_id': context.user_id, 'project_id': context.project_id, 'name': group_name, 'description': group_description} diff --git a/nova/volume/api.py b/nova/volume/api.py index 5201c7d90..2f4494845 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -49,7 +49,7 @@ class API(base.Base): options = { 'size': size, - 'user_id': context.user.id, + 'user_id': context.user_id, 'project_id': context.project_id, 'availability_zone': FLAGS.storage_availability_zone, 'status': "creating", -- cgit From 990a0fdce67971e81665aa2151e43b071d8bcb7c Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Fri, 18 Feb 2011 23:33:06 -0800 Subject: Fix FakeAuthManager so that unit tests pass; I believe it was matching the wrong field --- nova/tests/api/openstack/fakes.py | 8 ++++++-- nova/tests/api/openstack/test_auth.py | 6 +++--- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index fb282f1c9..e0b7b8029 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -221,7 +221,8 @@ class FakeAuthDatabase(object): class FakeAuthManager(object): auth_data = {} - def add_user(self, key, user): + def add_user(self, user): + key = user.id FakeAuthManager.auth_data[key] = user def get_user(self, uid): @@ -234,7 +235,10 @@ class FakeAuthManager(object): return None def get_user_from_access_key(self, key): - return FakeAuthManager.auth_data.get(key, None) + for k, v in FakeAuthManager.auth_data.iteritems(): + if v.access == key: + return v + return None class FakeRateLimiter(object): diff --git a/nova/tests/api/openstack/test_auth.py b/nova/tests/api/openstack/test_auth.py index 0dd65d321..eab78b50c 100644 --- a/nova/tests/api/openstack/test_auth.py +++ b/nova/tests/api/openstack/test_auth.py @@ -48,7 +48,7 @@ class Test(unittest.TestCase): def test_authorize_user(self): f = fakes.FakeAuthManager() - f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None)) + f.add_user(nova.auth.manager.User(1, 'herp', 'herp', 'derp', None)) req = webob.Request.blank('/v1.0/') req.headers['X-Auth-User'] = 'herp' @@ -62,7 +62,7 @@ class Test(unittest.TestCase): def test_authorize_token(self): f = fakes.FakeAuthManager() - f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None)) + f.add_user(nova.auth.manager.User(1, 'herp', 'herp', 'derp', None)) req = webob.Request.blank('/v1.0/', {'HTTP_HOST': 'foo'}) req.headers['X-Auth-User'] = 'herp' @@ -144,7 +144,7 @@ class TestLimiter(unittest.TestCase): def test_authorize_token(self): f = fakes.FakeAuthManager() - f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None)) + f.add_user(nova.auth.manager.User(1, 'herp', 'herp', 'derp', None)) req = webob.Request.blank('/v1.0/') req.headers['X-Auth-User'] = 'herp' -- cgit From a3c6106f99085da69ab3c51b80135d3cedd81c4d Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sat, 19 Feb 2011 01:22:27 -0800 Subject: store time when RequestLogging starts instead of using context's time --- nova/api/ec2/__init__.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 1a06b3f01..4e7e3267d 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -56,6 +56,7 @@ class RequestLogging(wsgi.Middleware): @webob.dec.wsgify def __call__(self, req): + self.start = datetime.datetime.utcnow() rv = req.get_response(self.application) self.log_request_completion(rv, req) return rv @@ -66,13 +67,9 @@ class RequestLogging(wsgi.Middleware): controller = controller.__class__.__name__ action = request.environ.get('ec2.action', None) ctxt = request.environ.get('ec2.context', None) - seconds = 'X' - microseconds = 'X' - if ctxt: - delta = datetime.datetime.utcnow() - \ - ctxt.timestamp - seconds = delta.seconds - microseconds = delta.microseconds + delta = datetime.datetime.utcnow() - self.start + seconds = delta.seconds + microseconds = delta.microseconds LOG.info( "%s.%ss %s %s %s %s:%s %s [%s] %s %s", seconds, -- cgit From 86a858d076c62ddd7c27e04300aeb5d21111b986 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sat, 19 Feb 2011 01:27:48 -0800 Subject: pass start time as a param instead of making it an attribute --- nova/api/ec2/__init__.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 4e7e3267d..cda34bc3a 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -56,18 +56,18 @@ class RequestLogging(wsgi.Middleware): @webob.dec.wsgify def __call__(self, req): - self.start = datetime.datetime.utcnow() + start = datetime.datetime.utcnow() rv = req.get_response(self.application) - self.log_request_completion(rv, req) + self.log_request_completion(rv, req, start) return rv - def log_request_completion(self, response, request): + def log_request_completion(self, response, request, start): controller = request.environ.get('ec2.controller', None) if controller: controller = controller.__class__.__name__ action = request.environ.get('ec2.action', None) ctxt = request.environ.get('ec2.context', None) - delta = datetime.datetime.utcnow() - self.start + delta = datetime.datetime.utcnow() - start seconds = delta.seconds microseconds = delta.microseconds LOG.info( -- cgit From d4a37dc28daf990d903ffd14607862cb2eafb1c8 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sat, 19 Feb 2011 01:36:13 -0800 Subject: move from datetime.datetime.utcnow -> utils.utcnow --- nova/api/ec2/__init__.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index cda34bc3a..f892123fd 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -20,7 +20,6 @@ Starting point for routing EC2 requests. """ -import datetime import webob import webob.dec import webob.exc @@ -56,7 +55,7 @@ class RequestLogging(wsgi.Middleware): @webob.dec.wsgify def __call__(self, req): - start = datetime.datetime.utcnow() + start = utils.utcnow() rv = req.get_response(self.application) self.log_request_completion(rv, req, start) return rv @@ -67,7 +66,7 @@ class RequestLogging(wsgi.Middleware): controller = controller.__class__.__name__ action = request.environ.get('ec2.action', None) ctxt = request.environ.get('ec2.context', None) - delta = datetime.datetime.utcnow() - start + delta = utils.utcnow() - start seconds = delta.seconds microseconds = delta.microseconds LOG.info( -- cgit From 915d6e70106b30ed6919fa850749b8041c3e690d Mon Sep 17 00:00:00 2001 From: Ken Pepple Date: Sat, 19 Feb 2011 01:51:13 -0800 Subject: pep8 leftover --- nova/tests/api/openstack/test_zones.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py index 5542a1cf3..df497ef1b 100644 --- a/nova/tests/api/openstack/test_zones.py +++ b/nova/tests/api/openstack/test_zones.py @@ -57,8 +57,7 @@ def zone_get_all(context): dict(id=1, api_url='http://foo.com', username='bob', password='xxx'), dict(id=2, api_url='http://blah.com', username='alice', - password='qwerty') - ] + password='qwerty')] class ZonesTest(unittest.TestCase): -- cgit From 89a63f53116b04a8d0681265ba8ce71eeeb5be0b Mon Sep 17 00:00:00 2001 From: Ken Pepple Date: Sat, 19 Feb 2011 01:59:07 -0800 Subject: fix ec2 launchtime response not in iso format bug --- nova/api/ec2/apirequest.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py index 7e72d67fb..00b527d62 100644 --- a/nova/api/ec2/apirequest.py +++ b/nova/api/ec2/apirequest.py @@ -20,6 +20,7 @@ APIRequest class """ +import datetime import re # TODO(termie): replace minidom with etree from xml.dom import minidom @@ -171,6 +172,8 @@ class APIRequest(object): self._render_dict(xml, data_el, data.__dict__) elif isinstance(data, bool): data_el.appendChild(xml.createTextNode(str(data).lower())) + elif isinstance(data, datetime.datetime): + data_el.appendChild(xml.createTextNode(data.isoformat())) elif data != None: data_el.appendChild(xml.createTextNode(str(data))) -- cgit From a7eed42c57fe7eaf6f2981a88a74a81a6890198c Mon Sep 17 00:00:00 2001 From: Christian Berendt Date: Sun, 20 Feb 2011 20:56:14 +0100 Subject: puppet scripts only there as an example, should be moved to some other place if they are still necessary --- contrib/puppet/files/etc/default/nova-compute | 1 - contrib/puppet/files/etc/default/nova-volume | 1 - contrib/puppet/files/etc/issue | 5 - contrib/puppet/files/etc/libvirt/qemu.conf | 170 -------- contrib/puppet/files/etc/lvm/lvm.conf | 463 -------------------- contrib/puppet/files/etc/nova.conf | 28 -- contrib/puppet/files/production/boto.cfg | 3 - contrib/puppet/files/production/genvpn.sh | 35 -- .../files/production/libvirt.qemu.xml.template | 35 -- contrib/puppet/files/production/my.cnf | 137 ------ contrib/puppet/files/production/nova-iptables | 187 --------- contrib/puppet/files/production/nova-iscsi-dev.sh | 19 - contrib/puppet/files/production/setup_data.sh | 6 - contrib/puppet/files/production/slap.sh | 261 ------------ contrib/puppet/fileserver.conf | 8 - contrib/puppet/manifests/classes/apt.pp | 1 - contrib/puppet/manifests/classes/issue.pp | 14 - contrib/puppet/manifests/classes/kern_module.pp | 34 -- contrib/puppet/manifests/classes/loopback.pp | 6 - contrib/puppet/manifests/classes/lvm.pp | 8 - contrib/puppet/manifests/classes/lvmconf.pp | 8 - contrib/puppet/manifests/classes/nova.pp | 464 --------------------- contrib/puppet/manifests/classes/swift.pp | 7 - contrib/puppet/manifests/site.pp | 120 ------ contrib/puppet/manifests/templates.pp | 21 - contrib/puppet/puppet.conf | 11 - contrib/puppet/templates/haproxy.cfg.erb | 39 -- contrib/puppet/templates/monitrc-nova-api.erb | 138 ------ contrib/puppet/templates/nova-iptables.erb | 10 - .../templates/production/nova-common.conf.erb | 55 --- .../puppet/templates/production/nova-nova.conf.erb | 21 - 31 files changed, 2316 deletions(-) delete mode 100644 contrib/puppet/files/etc/default/nova-compute delete mode 100644 contrib/puppet/files/etc/default/nova-volume delete mode 100644 contrib/puppet/files/etc/issue delete mode 100644 contrib/puppet/files/etc/libvirt/qemu.conf delete mode 100644 contrib/puppet/files/etc/lvm/lvm.conf delete mode 100644 contrib/puppet/files/etc/nova.conf delete mode 100644 contrib/puppet/files/production/boto.cfg delete mode 100644 contrib/puppet/files/production/genvpn.sh delete mode 100644 contrib/puppet/files/production/libvirt.qemu.xml.template delete mode 100644 contrib/puppet/files/production/my.cnf delete mode 100755 contrib/puppet/files/production/nova-iptables delete mode 100644 contrib/puppet/files/production/nova-iscsi-dev.sh delete mode 100755 contrib/puppet/files/production/setup_data.sh delete mode 100755 contrib/puppet/files/production/slap.sh delete mode 100644 contrib/puppet/fileserver.conf delete mode 100644 contrib/puppet/manifests/classes/apt.pp delete mode 100644 contrib/puppet/manifests/classes/issue.pp delete mode 100644 contrib/puppet/manifests/classes/kern_module.pp delete mode 100644 contrib/puppet/manifests/classes/loopback.pp delete mode 100644 contrib/puppet/manifests/classes/lvm.pp delete mode 100644 contrib/puppet/manifests/classes/lvmconf.pp delete mode 100644 contrib/puppet/manifests/classes/nova.pp delete mode 100644 contrib/puppet/manifests/classes/swift.pp delete mode 100644 contrib/puppet/manifests/site.pp delete mode 100644 contrib/puppet/manifests/templates.pp delete mode 100644 contrib/puppet/puppet.conf delete mode 100644 contrib/puppet/templates/haproxy.cfg.erb delete mode 100644 contrib/puppet/templates/monitrc-nova-api.erb delete mode 100644 contrib/puppet/templates/nova-iptables.erb delete mode 100644 contrib/puppet/templates/production/nova-common.conf.erb delete mode 100644 contrib/puppet/templates/production/nova-nova.conf.erb diff --git a/contrib/puppet/files/etc/default/nova-compute b/contrib/puppet/files/etc/default/nova-compute deleted file mode 100644 index 8bd7d091c..000000000 --- a/contrib/puppet/files/etc/default/nova-compute +++ /dev/null @@ -1 +0,0 @@ -ENABLED=true diff --git a/contrib/puppet/files/etc/default/nova-volume b/contrib/puppet/files/etc/default/nova-volume deleted file mode 100644 index 8bd7d091c..000000000 --- a/contrib/puppet/files/etc/default/nova-volume +++ /dev/null @@ -1 +0,0 @@ -ENABLED=true diff --git a/contrib/puppet/files/etc/issue b/contrib/puppet/files/etc/issue deleted file mode 100644 index 8c567221b..000000000 --- a/contrib/puppet/files/etc/issue +++ /dev/null @@ -1,5 +0,0 @@ ------------------------------------------------ - - Welcome to your OpenStack installation! - ------------------------------------------------ diff --git a/contrib/puppet/files/etc/libvirt/qemu.conf b/contrib/puppet/files/etc/libvirt/qemu.conf deleted file mode 100644 index 7839f12e5..000000000 --- a/contrib/puppet/files/etc/libvirt/qemu.conf +++ /dev/null @@ -1,170 +0,0 @@ -# Master configuration file for the QEMU driver. -# All settings described here are optional - if omitted, sensible -# defaults are used. - -# VNC is configured to listen on 127.0.0.1 by default. -# To make it listen on all public interfaces, uncomment -# this next option. -# -# NB, strong recommendation to enable TLS + x509 certificate -# verification when allowing public access -# -# vnc_listen = "0.0.0.0" - - -# Enable use of TLS encryption on the VNC server. This requires -# a VNC client which supports the VeNCrypt protocol extension. -# Examples include vinagre, virt-viewer, virt-manager and vencrypt -# itself. UltraVNC, RealVNC, TightVNC do not support this -# -# It is necessary to setup CA and issue a server certificate -# before enabling this. -# -# vnc_tls = 1 - - -# Use of TLS requires that x509 certificates be issued. The -# default it to keep them in /etc/pki/libvirt-vnc. This directory -# must contain -# -# ca-cert.pem - the CA master certificate -# server-cert.pem - the server certificate signed with ca-cert.pem -# server-key.pem - the server private key -# -# This option allows the certificate directory to be changed -# -# vnc_tls_x509_cert_dir = "/etc/pki/libvirt-vnc" - - -# The default TLS configuration only uses certificates for the server -# allowing the client to verify the server's identity and establish -# and encrypted channel. -# -# It is possible to use x509 certificates for authentication too, by -# issuing a x509 certificate to every client who needs to connect. -# -# Enabling this option will reject any client who does not have a -# certificate signed by the CA in /etc/pki/libvirt-vnc/ca-cert.pem -# -# vnc_tls_x509_verify = 1 - - -# The default VNC password. Only 8 letters are significant for -# VNC passwords. This parameter is only used if the per-domain -# XML config does not already provide a password. To allow -# access without passwords, leave this commented out. An empty -# string will still enable passwords, but be rejected by QEMU -# effectively preventing any use of VNC. Obviously change this -# example here before you set this -# -# vnc_password = "XYZ12345" - - -# Enable use of SASL encryption on the VNC server. This requires -# a VNC client which supports the SASL protocol extension. -# Examples include vinagre, virt-viewer and virt-manager -# itself. UltraVNC, RealVNC, TightVNC do not support this -# -# It is necessary to configure /etc/sasl2/qemu.conf to choose -# the desired SASL plugin (eg, GSSPI for Kerberos) -# -# vnc_sasl = 1 - - -# The default SASL configuration file is located in /etc/sasl2/ -# When running libvirtd unprivileged, it may be desirable to -# override the configs in this location. Set this parameter to -# point to the directory, and create a qemu.conf in that location -# -# vnc_sasl_dir = "/some/directory/sasl2" - - - - -# The default security driver is SELinux. If SELinux is disabled -# on the host, then the security driver will automatically disable -# itself. If you wish to disable QEMU SELinux security driver while -# leaving SELinux enabled for the host in general, then set this -# to 'none' instead -# -# security_driver = "selinux" - - -# The user ID for QEMU processes run by the system instance -user = "root" - -# The group ID for QEMU processes run by the system instance -group = "root" - -# Whether libvirt should dynamically change file ownership -# to match the configured user/group above. Defaults to 1. -# Set to 0 to disable file ownership changes. -#dynamic_ownership = 1 - - -# What cgroup controllers to make use of with QEMU guests -# -# - 'cpu' - use for schedular tunables -# - 'devices' - use for device whitelisting -# -# NB, even if configured here, they won't be used unless -# the adminsitrator has mounted cgroups. eg -# -# mkdir /dev/cgroup -# mount -t cgroup -o devices,cpu none /dev/cgroup -# -# They can be mounted anywhere, and different controlers -# can be mounted in different locations. libvirt will detect -# where they are located. -# -# cgroup_controllers = [ "cpu", "devices" ] - -# This is the basic set of devices allowed / required by -# all virtual machines. -# -# As well as this, any configured block backed disks, -# all sound device, and all PTY devices are allowed. -# -# This will only need setting if newer QEMU suddenly -# wants some device we don't already know a bout. -# -#cgroup_device_acl = [ -# "/dev/null", "/dev/full", "/dev/zero", -# "/dev/random", "/dev/urandom", -# "/dev/ptmx", "/dev/kvm", "/dev/kqemu", -# "/dev/rtc", "/dev/hpet", "/dev/net/tun", -#] - -# The default format for Qemu/KVM guest save images is raw; that is, the -# memory from the domain is dumped out directly to a file. If you have -# guests with a large amount of memory, however, this can take up quite -# a bit of space. If you would like to compress the images while they -# are being saved to disk, you can also set "lzop", "gzip", "bzip2", or "xz" -# for save_image_format. Note that this means you slow down the process of -# saving a domain in order to save disk space; the list above is in descending -# order by performance and ascending order by compression ratio. -# -# save_image_format = "raw" - -# If provided by the host and a hugetlbfs mount point is configured, -# a guest may request huge page backing. When this mount point is -# unspecified here, determination of a host mount point in /proc/mounts -# will be attempted. Specifying an explicit mount overrides detection -# of the same in /proc/mounts. Setting the mount point to "" will -# disable guest hugepage backing. -# -# NB, within this mount point, guests will create memory backing files -# in a location of $MOUNTPOINT/libvirt/qemu - -# hugetlbfs_mount = "/dev/hugepages" - -# mac_filter enables MAC addressed based filtering on bridge ports. -# This currently requires ebtables to be installed. -# -# mac_filter = 1 - -# By default, PCI devices below non-ACS switch are not allowed to be assigned -# to guests. By setting relaxed_acs_check to 1 such devices will be allowed to -# be assigned to guests. -# -# relaxed_acs_check = 1 diff --git a/contrib/puppet/files/etc/lvm/lvm.conf b/contrib/puppet/files/etc/lvm/lvm.conf deleted file mode 100644 index 4e814ad49..000000000 --- a/contrib/puppet/files/etc/lvm/lvm.conf +++ /dev/null @@ -1,463 +0,0 @@ -# This is an example configuration file for the LVM2 system. -# It contains the default settings that would be used if there was no -# /etc/lvm/lvm.conf file. -# -# Refer to 'man lvm.conf' for further information including the file layout. -# -# To put this file in a different directory and override /etc/lvm set -# the environment variable LVM_SYSTEM_DIR before running the tools. - - -# This section allows you to configure which block devices should -# be used by the LVM system. -devices { - - # Where do you want your volume groups to appear ? - dir = "/dev" - - # An array of directories that contain the device nodes you wish - # to use with LVM2. - scan = [ "/dev" ] - - # If several entries in the scanned directories correspond to the - # same block device and the tools need to display a name for device, - # all the pathnames are matched against each item in the following - # list of regular expressions in turn and the first match is used. - preferred_names = [ ] - - # Try to avoid using undescriptive /dev/dm-N names, if present. - # preferred_names = [ "^/dev/mpath/", "^/dev/mapper/mpath", "^/dev/[hs]d" ] - - # A filter that tells LVM2 to only use a restricted set of devices. - # The filter consists of an array of regular expressions. These - # expressions can be delimited by a character of your choice, and - # prefixed with either an 'a' (for accept) or 'r' (for reject). - # The first expression found to match a device name determines if - # the device will be accepted or rejected (ignored). Devices that - # don't match any patterns are accepted. - - # Be careful if there there are symbolic links or multiple filesystem - # entries for the same device as each name is checked separately against - # the list of patterns. The effect is that if any name matches any 'a' - # pattern, the device is accepted; otherwise if any name matches any 'r' - # pattern it is rejected; otherwise it is accepted. - - # Don't have more than one filter line active at once: only one gets used. - - # Run vgscan after you change this parameter to ensure that - # the cache file gets regenerated (see below). - # If it doesn't do what you expect, check the output of 'vgscan -vvvv'. - - - # By default we accept every block device: - filter = [ "r|/dev/etherd/.*|", "r|/dev/block/.*|", "a/.*/" ] - - # Exclude the cdrom drive - # filter = [ "r|/dev/cdrom|" ] - - # When testing I like to work with just loopback devices: - # filter = [ "a/loop/", "r/.*/" ] - - # Or maybe all loops and ide drives except hdc: - # filter =[ "a|loop|", "r|/dev/hdc|", "a|/dev/ide|", "r|.*|" ] - - # Use anchors if you want to be really specific - # filter = [ "a|^/dev/hda8$|", "r/.*/" ] - - # The results of the filtering are cached on disk to avoid - # rescanning dud devices (which can take a very long time). - # By default this cache is stored in the /etc/lvm/cache directory - # in a file called '.cache'. - # It is safe to delete the contents: the tools regenerate it. - # (The old setting 'cache' is still respected if neither of - # these new ones is present.) - cache_dir = "/etc/lvm/cache" - cache_file_prefix = "" - - # You can turn off writing this cache file by setting this to 0. - write_cache_state = 1 - - # Advanced settings. - - # List of pairs of additional acceptable block device types found - # in /proc/devices with maximum (non-zero) number of partitions. - # types = [ "fd", 16 ] - - # If sysfs is mounted (2.6 kernels) restrict device scanning to - # the block devices it believes are valid. - # 1 enables; 0 disables. - sysfs_scan = 1 - - # By default, LVM2 will ignore devices used as components of - # software RAID (md) devices by looking for md superblocks. - # 1 enables; 0 disables. - md_component_detection = 1 - - # By default, if a PV is placed directly upon an md device, LVM2 - # will align its data blocks with the md device's stripe-width. - # 1 enables; 0 disables. - md_chunk_alignment = 1 - - # By default, the start of a PV's data area will be a multiple of - # the 'minimum_io_size' or 'optimal_io_size' exposed in sysfs. - # - minimum_io_size - the smallest request the device can perform - # w/o incurring a read-modify-write penalty (e.g. MD's chunk size) - # - optimal_io_size - the device's preferred unit of receiving I/O - # (e.g. MD's stripe width) - # minimum_io_size is used if optimal_io_size is undefined (0). - # If md_chunk_alignment is enabled, that detects the optimal_io_size. - # This setting takes precedence over md_chunk_alignment. - # 1 enables; 0 disables. - data_alignment_detection = 1 - - # Alignment (in KB) of start of data area when creating a new PV. - # If a PV is placed directly upon an md device and md_chunk_alignment or - # data_alignment_detection is enabled this parameter is ignored. - # Set to 0 for the default alignment of 64KB or page size, if larger. - data_alignment = 0 - - # By default, the start of the PV's aligned data area will be shifted by - # the 'alignment_offset' exposed in sysfs. This offset is often 0 but - # may be non-zero; e.g.: certain 4KB sector drives that compensate for - # windows partitioning will have an alignment_offset of 3584 bytes - # (sector 7 is the lowest aligned logical block, the 4KB sectors start - # at LBA -1, and consequently sector 63 is aligned on a 4KB boundary). - # 1 enables; 0 disables. - data_alignment_offset_detection = 1 - - # If, while scanning the system for PVs, LVM2 encounters a device-mapper - # device that has its I/O suspended, it waits for it to become accessible. - # Set this to 1 to skip such devices. This should only be needed - # in recovery situations. - ignore_suspended_devices = 0 -} - -# This section that allows you to configure the nature of the -# information that LVM2 reports. -log { - - # Controls the messages sent to stdout or stderr. - # There are three levels of verbosity, 3 being the most verbose. - verbose = 0 - - # Should we send log messages through syslog? - # 1 is yes; 0 is no. - syslog = 1 - - # Should we log error and debug messages to a file? - # By default there is no log file. - #file = "/var/log/lvm2.log" - - # Should we overwrite the log file each time the program is run? - # By default we append. - overwrite = 0 - - # What level of log messages should we send to the log file and/or syslog? - # There are 6 syslog-like log levels currently in use - 2 to 7 inclusive. - # 7 is the most verbose (LOG_DEBUG). - level = 0 - - # Format of output messages - # Whether or not (1 or 0) to indent messages according to their severity - indent = 1 - - # Whether or not (1 or 0) to display the command name on each line output - command_names = 0 - - # A prefix to use before the message text (but after the command name, - # if selected). Default is two spaces, so you can see/grep the severity - # of each message. - prefix = " " - - # To make the messages look similar to the original LVM tools use: - # indent = 0 - # command_names = 1 - # prefix = " -- " - - # Set this if you want log messages during activation. - # Don't use this in low memory situations (can deadlock). - # activation = 0 -} - -# Configuration of metadata backups and archiving. In LVM2 when we -# talk about a 'backup' we mean making a copy of the metadata for the -# *current* system. The 'archive' contains old metadata configurations. -# Backups are stored in a human readeable text format. -backup { - - # Should we maintain a backup of the current metadata configuration ? - # Use 1 for Yes; 0 for No. - # Think very hard before turning this off! - backup = 1 - - # Where shall we keep it ? - # Remember to back up this directory regularly! - backup_dir = "/etc/lvm/backup" - - # Should we maintain an archive of old metadata configurations. - # Use 1 for Yes; 0 for No. - # On by default. Think very hard before turning this off. - archive = 1 - - # Where should archived files go ? - # Remember to back up this directory regularly! - archive_dir = "/etc/lvm/archive" - - # What is the minimum number of archive files you wish to keep ? - retain_min = 10 - - # What is the minimum time you wish to keep an archive file for ? - retain_days = 30 -} - -# Settings for the running LVM2 in shell (readline) mode. -shell { - - # Number of lines of history to store in ~/.lvm_history - history_size = 100 -} - - -# Miscellaneous global LVM2 settings -global { - - # The file creation mask for any files and directories created. - # Interpreted as octal if the first digit is zero. - umask = 077 - - # Allow other users to read the files - #umask = 022 - - # Enabling test mode means that no changes to the on disk metadata - # will be made. Equivalent to having the -t option on every - # command. Defaults to off. - test = 0 - - # Default value for --units argument - units = "h" - - # Since version 2.02.54, the tools distinguish between powers of - # 1024 bytes (e.g. KiB, MiB, GiB) and powers of 1000 bytes (e.g. - # KB, MB, GB). - # If you have scripts that depend on the old behaviour, set this to 0 - # temporarily until you update them. - si_unit_consistency = 1 - - # Whether or not to communicate with the kernel device-mapper. - # Set to 0 if you want to use the tools to manipulate LVM metadata - # without activating any logical volumes. - # If the device-mapper kernel driver is not present in your kernel - # setting this to 0 should suppress the error messages. - activation = 1 - - # If we can't communicate with device-mapper, should we try running - # the LVM1 tools? - # This option only applies to 2.4 kernels and is provided to help you - # switch between device-mapper kernels and LVM1 kernels. - # The LVM1 tools need to be installed with .lvm1 suffices - # e.g. vgscan.lvm1 and they will stop working after you start using - # the new lvm2 on-disk metadata format. - # The default value is set when the tools are built. - # fallback_to_lvm1 = 0 - - # The default metadata format that commands should use - "lvm1" or "lvm2". - # The command line override is -M1 or -M2. - # Defaults to "lvm2". - # format = "lvm2" - - # Location of proc filesystem - proc = "/proc" - - # Type of locking to use. Defaults to local file-based locking (1). - # Turn locking off by setting to 0 (dangerous: risks metadata corruption - # if LVM2 commands get run concurrently). - # Type 2 uses the external shared library locking_library. - # Type 3 uses built-in clustered locking. - # Type 4 uses read-only locking which forbids any operations that might - # change metadata. - locking_type = 1 - - # Set to 0 to fail when a lock request cannot be satisfied immediately. - wait_for_locks = 1 - - # If using external locking (type 2) and initialisation fails, - # with this set to 1 an attempt will be made to use the built-in - # clustered locking. - # If you are using a customised locking_library you should set this to 0. - fallback_to_clustered_locking = 1 - - # If an attempt to initialise type 2 or type 3 locking failed, perhaps - # because cluster components such as clvmd are not running, with this set - # to 1 an attempt will be made to use local file-based locking (type 1). - # If this succeeds, only commands against local volume groups will proceed. - # Volume Groups marked as clustered will be ignored. - fallback_to_local_locking = 1 - - # Local non-LV directory that holds file-based locks while commands are - # in progress. A directory like /tmp that may get wiped on reboot is OK. - locking_dir = "/var/lock/lvm" - - # Whenever there are competing read-only and read-write access requests for - # a volume group's metadata, instead of always granting the read-only - # requests immediately, delay them to allow the read-write requests to be - # serviced. Without this setting, write access may be stalled by a high - # volume of read-only requests. - # NB. This option only affects locking_type = 1 viz. local file-based - # locking. - prioritise_write_locks = 1 - - # Other entries can go here to allow you to load shared libraries - # e.g. if support for LVM1 metadata was compiled as a shared library use - # format_libraries = "liblvm2format1.so" - # Full pathnames can be given. - - # Search this directory first for shared libraries. - # library_dir = "/lib/lvm2" - - # The external locking library to load if locking_type is set to 2. - # locking_library = "liblvm2clusterlock.so" -} - -activation { - # Set to 0 to disable udev syncronisation (if compiled into the binaries). - # Processes will not wait for notification from udev. - # They will continue irrespective of any possible udev processing - # in the background. You should only use this if udev is not running - # or has rules that ignore the devices LVM2 creates. - # The command line argument --nodevsync takes precedence over this setting. - # If set to 1 when udev is not running, and there are LVM2 processes - # waiting for udev, run 'dmsetup udevcomplete_all' manually to wake them up. - udev_sync = 1 - - # How to fill in missing stripes if activating an incomplete volume. - # Using "error" will make inaccessible parts of the device return - # I/O errors on access. You can instead use a device path, in which - # case, that device will be used to in place of missing stripes. - # But note that using anything other than "error" with mirrored - # or snapshotted volumes is likely to result in data corruption. - missing_stripe_filler = "error" - - # How much stack (in KB) to reserve for use while devices suspended - reserved_stack = 256 - - # How much memory (in KB) to reserve for use while devices suspended - reserved_memory = 8192 - - # Nice value used while devices suspended - process_priority = -18 - - # If volume_list is defined, each LV is only activated if there is a - # match against the list. - # "vgname" and "vgname/lvname" are matched exactly. - # "@tag" matches any tag set in the LV or VG. - # "@*" matches if any tag defined on the host is also set in the LV or VG - # - # volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ] - - # Size (in KB) of each copy operation when mirroring - mirror_region_size = 512 - - # Setting to use when there is no readahead value stored in the metadata. - # - # "none" - Disable readahead. - # "auto" - Use default value chosen by kernel. - readahead = "auto" - - # 'mirror_image_fault_policy' and 'mirror_log_fault_policy' define - # how a device failure affecting a mirror is handled. - # A mirror is composed of mirror images (copies) and a log. - # A disk log ensures that a mirror does not need to be re-synced - # (all copies made the same) every time a machine reboots or crashes. - # - # In the event of a failure, the specified policy will be used to determine - # what happens. This applies to automatic repairs (when the mirror is being - # monitored by dmeventd) and to manual lvconvert --repair when - # --use-policies is given. - # - # "remove" - Simply remove the faulty device and run without it. If - # the log device fails, the mirror would convert to using - # an in-memory log. This means the mirror will not - # remember its sync status across crashes/reboots and - # the entire mirror will be re-synced. If a - # mirror image fails, the mirror will convert to a - # non-mirrored device if there is only one remaining good - # copy. - # - # "allocate" - Remove the faulty device and try to allocate space on - # a new device to be a replacement for the failed device. - # Using this policy for the log is fast and maintains the - # ability to remember sync state through crashes/reboots. - # Using this policy for a mirror device is slow, as it - # requires the mirror to resynchronize the devices, but it - # will preserve the mirror characteristic of the device. - # This policy acts like "remove" if no suitable device and - # space can be allocated for the replacement. - # - # "allocate_anywhere" - Not yet implemented. Useful to place the log device - # temporarily on same physical volume as one of the mirror - # images. This policy is not recommended for mirror devices - # since it would break the redundant nature of the mirror. This - # policy acts like "remove" if no suitable device and space can - # be allocated for the replacement. - - mirror_log_fault_policy = "allocate" - mirror_device_fault_policy = "remove" -} - - -#################### -# Advanced section # -#################### - -# Metadata settings -# -# metadata { - # Default number of copies of metadata to hold on each PV. 0, 1 or 2. - # You might want to override it from the command line with 0 - # when running pvcreate on new PVs which are to be added to large VGs. - - # pvmetadatacopies = 1 - - # Approximate default size of on-disk metadata areas in sectors. - # You should increase this if you have large volume groups or - # you want to retain a large on-disk history of your metadata changes. - - # pvmetadatasize = 255 - - # List of directories holding live copies of text format metadata. - # These directories must not be on logical volumes! - # It's possible to use LVM2 with a couple of directories here, - # preferably on different (non-LV) filesystems, and with no other - # on-disk metadata (pvmetadatacopies = 0). Or this can be in - # addition to on-disk metadata areas. - # The feature was originally added to simplify testing and is not - # supported under low memory situations - the machine could lock up. - # - # Never edit any files in these directories by hand unless you - # you are absolutely sure you know what you are doing! Use - # the supplied toolset to make changes (e.g. vgcfgrestore). - - # dirs = [ "/etc/lvm/metadata", "/mnt/disk2/lvm/metadata2" ] -#} - -# Event daemon -# -dmeventd { - # mirror_library is the library used when monitoring a mirror device. - # - # "libdevmapper-event-lvm2mirror.so" attempts to recover from - # failures. It removes failed devices from a volume group and - # reconfigures a mirror as necessary. If no mirror library is - # provided, mirrors are not monitored through dmeventd. - - mirror_library = "libdevmapper-event-lvm2mirror.so" - - # snapshot_library is the library used when monitoring a snapshot device. - # - # "libdevmapper-event-lvm2snapshot.so" monitors the filling of - # snapshots and emits a warning through syslog, when the use of - # snapshot exceedes 80%. The warning is repeated when 85%, 90% and - # 95% of the snapshot are filled. - - snapshot_library = "libdevmapper-event-lvm2snapshot.so" -} diff --git a/contrib/puppet/files/etc/nova.conf b/contrib/puppet/files/etc/nova.conf deleted file mode 100644 index a0d64078c..000000000 --- a/contrib/puppet/files/etc/nova.conf +++ /dev/null @@ -1,28 +0,0 @@ ---ec2_url=http://192.168.255.1:8773/services/Cloud ---rabbit_host=192.168.255.1 ---redis_host=192.168.255.1 ---s3_host=192.168.255.1 ---vpn_ip=192.168.255.1 ---datastore_path=/var/lib/nova/keeper ---networks_path=/var/lib/nova/networks ---instances_path=/var/lib/nova/instances ---buckets_path=/var/lib/nova/objectstore/buckets ---images_path=/var/lib/nova/objectstore/images ---ca_path=/var/lib/nova/CA ---keys_path=/var/lib/nova/keys ---vlan_start=2000 ---vlan_end=3000 ---private_range=192.168.0.0/16 ---public_range=10.0.0.0/24 ---volume_group=vgdata ---storage_dev=/dev/sdc ---bridge_dev=eth2 ---aoe_eth_dev=eth2 ---public_interface=vlan0 ---default_kernel=aki-DEFAULT ---default_ramdisk=ari-DEFAULT ---vpn_image_id=ami-cloudpipe ---daemonize ---verbose ---syslog ---prefix=nova diff --git a/contrib/puppet/files/production/boto.cfg b/contrib/puppet/files/production/boto.cfg deleted file mode 100644 index f4a2de2b6..000000000 --- a/contrib/puppet/files/production/boto.cfg +++ /dev/null @@ -1,3 +0,0 @@ -[Boto] -debug = 0 -num_retries = 1 diff --git a/contrib/puppet/files/production/genvpn.sh b/contrib/puppet/files/production/genvpn.sh deleted file mode 100644 index 538c3cd33..000000000 --- a/contrib/puppet/files/production/genvpn.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This gets zipped and run on the cloudpipe-managed OpenVPN server -NAME=$1 -SUBJ=$2 - -mkdir -p projects/$NAME -cd projects/$NAME - -# generate a server priv key -openssl genrsa -out server.key 2048 - -# generate a server CSR -openssl req -new -key server.key -out server.csr -batch -subj "$SUBJ" - -if [ "`id -u`" != "`grep nova /etc/passwd | cut -d':' -f3`" ]; then - sudo chown -R nova:nogroup . -fi diff --git a/contrib/puppet/files/production/libvirt.qemu.xml.template b/contrib/puppet/files/production/libvirt.qemu.xml.template deleted file mode 100644 index 114dfdc01..000000000 --- a/contrib/puppet/files/production/libvirt.qemu.xml.template +++ /dev/null @@ -1,35 +0,0 @@ - - %(name)s - - hvm - %(basepath)s/kernel - %(basepath)s/ramdisk - root=/dev/vda1 console=ttyS0 - - - - - %(memory_kb)s - %(vcpus)s - - - - - - - - - - - - - - - - - diff --git a/contrib/puppet/files/production/my.cnf b/contrib/puppet/files/production/my.cnf deleted file mode 100644 index 8777bc480..000000000 --- a/contrib/puppet/files/production/my.cnf +++ /dev/null @@ -1,137 +0,0 @@ -# -# The MySQL database server configuration file. -# -# You can copy this to one of: -# - "/etc/mysql/my.cnf" to set global options, -# - "~/.my.cnf" to set user-specific options. -# -# One can use all long options that the program supports. -# Run program with --help to get a list of available options and with -# --print-defaults to see which it would actually understand and use. -# -# For explanations see -# http://dev.mysql.com/doc/mysql/en/server-system-variables.html - -# This will be passed to all mysql clients -# It has been reported that passwords should be enclosed with ticks/quotes -# escpecially if they contain "#" chars... -# Remember to edit /etc/mysql/debian.cnf when changing the socket location. -[client] -port = 3306 -socket = /var/run/mysqld/mysqld.sock - -# Here is entries for some specific programs -# The following values assume you have at least 32M ram - -# This was formally known as [safe_mysqld]. Both versions are currently parsed. -[mysqld_safe] -socket = /var/run/mysqld/mysqld.sock -nice = 0 - -[mysqld] -# -# * Basic Settings -# - -# -# * IMPORTANT -# If you make changes to these settings and your system uses apparmor, you may -# also need to also adjust /etc/apparmor.d/usr.sbin.mysqld. -# - -user = mysql -socket = /var/run/mysqld/mysqld.sock -port = 3306 -basedir = /usr -datadir = /var/lib/mysql -tmpdir = /tmp -skip-external-locking -# -# Instead of skip-networking the default is now to listen only on -# localhost which is more compatible and is not less secure. -# bind-address = 127.0.0.1 -# -# * Fine Tuning -# -innodb_buffer_pool_size = 12G -#innodb_log_file_size = 256M -innodb_log_buffer_size=4M -innodb_flush_log_at_trx_commit=2 -innodb_thread_concurrency=8 -innodb_flush_method=O_DIRECT -key_buffer = 128M -max_allowed_packet = 256M -thread_stack = 8196K -thread_cache_size = 32 -# This replaces the startup script and checks MyISAM tables if needed -# the first time they are touched -myisam-recover = BACKUP -max_connections = 1000 -table_cache = 1024 -#thread_concurrency = 10 -# -# * Query Cache Configuration -# -query_cache_limit = 32M -query_cache_size = 256M -# -# * Logging and Replication -# -# Both location gets rotated by the cronjob. -# Be aware that this log type is a performance killer. -# As of 5.1 you can enable the log at runtime! -#general_log_file = /var/log/mysql/mysql.log -#general_log = 1 - -log_error = /var/log/mysql/error.log - -# Here you can see queries with especially long duration -log_slow_queries = /var/log/mysql/mysql-slow.log -long_query_time = 2 -#log-queries-not-using-indexes -# -# The following can be used as easy to replay backup logs or for replication. -# note: if you are setting up a replication slave, see README.Debian about -# other settings you may need to change. -server-id = 1 -log_bin = /var/log/mysql/mysql-bin.log -expire_logs_days = 10 -max_binlog_size = 50M -#binlog_do_db = include_database_name -#binlog_ignore_db = include_database_name -# -# * InnoDB -# -sync_binlog=1 -# InnoDB is enabled by default with a 10MB datafile in /var/lib/mysql/. -# Read the manual for more InnoDB related options. There are many! -# -# * Security Features -# -# Read the manual, too, if you want chroot! -# chroot = /var/lib/mysql/ -# -# For generating SSL certificates I recommend the OpenSSL GUI "tinyca". -# -# ssl-ca=/etc/mysql/cacert.pem -# ssl-cert=/etc/mysql/server-cert.pem -# ssl-key=/etc/mysql/server-key.pem - - - -[mysqldump] -quick -quote-names -max_allowed_packet = 256M - -[mysql] -#no-auto-rehash # faster start of mysql but no tab completition - -[isamchk] -key_buffer = 128M - -# -# * IMPORTANT: Additional settings that can override those from this file! -# The files must end with '.cnf', otherwise they'll be ignored. -# -!includedir /etc/mysql/conf.d/ diff --git a/contrib/puppet/files/production/nova-iptables b/contrib/puppet/files/production/nova-iptables deleted file mode 100755 index 61e2ca2b9..000000000 --- a/contrib/puppet/files/production/nova-iptables +++ /dev/null @@ -1,187 +0,0 @@ -#! /bin/sh - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# NOTE(vish): This script sets up some reasonable defaults for iptables and -# creates nova-specific chains. If you use this script you should -# run nova-network and nova-compute with --use_nova_chains=True - - -# NOTE(vish): If you run public nova-api on a different port, make sure to -# change the port here - -if [ -f /etc/default/nova-iptables ] ; then - . /etc/default/nova-iptables -fi - -export LC_ALL=C - -API_PORT=${API_PORT:-"8773"} - -if [ ! -n "$IP" ]; then - # NOTE(vish): IP address is what address the services ALLOW on. - # This will just get the first ip in the list, so if you - # have more than one eth device set up, this will fail, and - # you should explicitly pass in the ip of the instance - IP=`ifconfig | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'` -fi - -if [ ! -n "$PRIVATE_RANGE" ]; then - #NOTE(vish): PRIVATE_RANGE: range is ALLOW to access DHCP - PRIVATE_RANGE="192.168.0.0/12" -fi - -if [ ! -n "$MGMT_IP" ]; then - # NOTE(vish): Management IP is the ip over which to allow ssh traffic. It - # will also allow traffic to nova-api - MGMT_IP="$IP" -fi - -if [ ! -n "$DMZ_IP" ]; then - # NOTE(vish): DMZ IP is the ip over which to allow api & objectstore access - DMZ_IP="$IP" -fi - -clear_nova_iptables() { - iptables -P INPUT ACCEPT - iptables -P FORWARD ACCEPT - iptables -P OUTPUT ACCEPT - iptables -F - iptables -t nat -F - iptables -F services - iptables -X services - # HACK: re-adding fail2ban rules :( - iptables -N fail2ban-ssh - iptables -A INPUT -p tcp -m multiport --dports 22 -j fail2ban-ssh - iptables -A fail2ban-ssh -j RETURN -} - -load_nova_iptables() { - - iptables -P INPUT DROP - iptables -A INPUT -m state --state INVALID -j DROP - iptables -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT - # NOTE(ja): allow localhost for everything - iptables -A INPUT -d 127.0.0.1/32 -j ACCEPT - # NOTE(ja): 22 only allowed MGMT_IP before, but we widened it to any - # address, since ssh should be listening only on internal - # before we re-add this rule we will need to add - # flexibility for RSYNC between omega/stingray - iptables -A INPUT -m tcp -p tcp --dport 22 -j ACCEPT - iptables -A INPUT -m udp -p udp --dport 123 -j ACCEPT - iptables -A INPUT -p icmp -j ACCEPT - iptables -N services - iptables -A INPUT -j services - iptables -A INPUT -p tcp -j REJECT --reject-with tcp-reset - iptables -A INPUT -j REJECT --reject-with icmp-port-unreachable - - iptables -P FORWARD DROP - iptables -A FORWARD -m state --state INVALID -j DROP - iptables -A FORWARD -m state --state RELATED,ESTABLISHED -j ACCEPT - iptables -A FORWARD -p tcp -m tcp --tcp-flags SYN,RST SYN -j TCPMSS --clamp-mss-to-pmtu - - # NOTE(vish): DROP on output is too restrictive for now. We need to add - # in a bunch of more specific output rules to use it. - # iptables -P OUTPUT DROP - iptables -A OUTPUT -m state --state INVALID -j DROP - iptables -A OUTPUT -m state --state RELATED,ESTABLISHED -j ACCEPT - - if [ -n "$GANGLIA" ] || [ -n "$ALL" ]; then - iptables -A services -m tcp -p tcp -d $IP --dport 8649 -j ACCEPT - iptables -A services -m udp -p udp -d $IP --dport 8649 -j ACCEPT - fi - - # if [ -n "$WEB" ] || [ -n "$ALL" ]; then - # # NOTE(vish): This opens up ports for web access, allowing web-based - # # dashboards to work. - # iptables -A services -m tcp -p tcp -d $IP --dport 80 -j ACCEPT - # iptables -A services -m tcp -p tcp -d $IP --dport 443 -j ACCEPT - # fi - - if [ -n "$OBJECTSTORE" ] || [ -n "$ALL" ]; then - # infrastructure - iptables -A services -m tcp -p tcp -d $IP --dport 3333 -j ACCEPT - # clients - iptables -A services -m tcp -p tcp -d $DMZ_IP --dport 3333 -j ACCEPT - fi - - if [ -n "$API" ] || [ -n "$ALL" ]; then - iptables -A services -m tcp -p tcp -d $IP --dport $API_PORT -j ACCEPT - if [ "$IP" != "$DMZ_IP" ]; then - iptables -A services -m tcp -p tcp -d $DMZ_IP --dport $API_PORT -j ACCEPT - fi - if [ "$IP" != "$MGMT_IP" ] && [ "$DMZ_IP" != "$MGMT_IP" ]; then - iptables -A services -m tcp -p tcp -d $MGMT_IP --dport $API_PORT -j ACCEPT - fi - fi - - if [ -n "$REDIS" ] || [ -n "$ALL" ]; then - iptables -A services -m tcp -p tcp -d $IP --dport 6379 -j ACCEPT - fi - - if [ -n "$MYSQL" ] || [ -n "$ALL" ]; then - iptables -A services -m tcp -p tcp -d $IP --dport 3306 -j ACCEPT - fi - - if [ -n "$RABBITMQ" ] || [ -n "$ALL" ]; then - iptables -A services -m tcp -p tcp -d $IP --dport 4369 -j ACCEPT - iptables -A services -m tcp -p tcp -d $IP --dport 5672 -j ACCEPT - iptables -A services -m tcp -p tcp -d $IP --dport 53284 -j ACCEPT - fi - - if [ -n "$DNSMASQ" ] || [ -n "$ALL" ]; then - # NOTE(vish): this could theoretically be setup per network - # for each host, but it seems like overkill - iptables -A services -m tcp -p tcp -s $PRIVATE_RANGE --dport 53 -j ACCEPT - iptables -A services -m udp -p udp -s $PRIVATE_RANGE --dport 53 -j ACCEPT - iptables -A services -m udp -p udp --dport 67 -j ACCEPT - fi - - if [ -n "$LDAP" ] || [ -n "$ALL" ]; then - iptables -A services -m tcp -p tcp -d $IP --dport 389 -j ACCEPT - fi - - if [ -n "$ISCSI" ] || [ -n "$ALL" ]; then - iptables -A services -m tcp -p tcp -d $IP --dport 3260 -j ACCEPT - iptables -A services -m tcp -p tcp -d 127.0.0.0/16 --dport 3260 -j ACCEPT - fi -} - - -case "$1" in - start) - echo "Starting nova-iptables: " - load_nova_iptables - ;; - stop) - echo "Clearing nova-iptables: " - clear_nova_iptables - ;; - restart) - echo "Restarting nova-iptables: " - clear_nova_iptables - load_nova_iptables - ;; - *) - echo "Usage: $NAME {start|stop|restart}" >&2 - exit 1 - ;; -esac - -exit 0 diff --git a/contrib/puppet/files/production/nova-iscsi-dev.sh b/contrib/puppet/files/production/nova-iscsi-dev.sh deleted file mode 100644 index 8eda10d2e..000000000 --- a/contrib/puppet/files/production/nova-iscsi-dev.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/sh - -# FILE: /etc/udev/scripts/iscsidev.sh - -BUS=${1} -HOST=${BUS%%:*} - -[ -e /sys/class/iscsi_host ] || exit 1 - -file="/sys/class/iscsi_host/host${HOST}/device/session*/iscsi_session*/session*/targetname" - -target_name=$(cat ${file}) - -# This is not an open-scsi drive -if [ -z "${target_name}" ]; then - exit 1 -fi - -echo "${target_name##*:}" diff --git a/contrib/puppet/files/production/setup_data.sh b/contrib/puppet/files/production/setup_data.sh deleted file mode 100755 index 1fbbac41c..000000000 --- a/contrib/puppet/files/production/setup_data.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -/root/slap.sh -mysql -e "DROP DATABASE nova" -mysql -e "CREATE DATABASE nova" -mysql -e "GRANT ALL on nova.* to nova@'%' identified by 'TODO:CHANGEME:CMON'" -touch /root/installed diff --git a/contrib/puppet/files/production/slap.sh b/contrib/puppet/files/production/slap.sh deleted file mode 100755 index f8ea16949..000000000 --- a/contrib/puppet/files/production/slap.sh +++ /dev/null @@ -1,261 +0,0 @@ -#!/usr/bin/env bash -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# LDAP INSTALL SCRIPT - SHOULD BE IDEMPOTENT, but it SCRUBS all USERS - -apt-get install -y slapd ldap-utils python-ldap - -cat >/etc/ldap/schema/openssh-lpk_openldap.schema < -# -# Based on the proposal of : Mark Ruijter -# - - -# octetString SYNTAX -attributetype ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey' - DESC 'MANDATORY: OpenSSH Public key' - EQUALITY octetStringMatch - SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 ) - -# printableString SYNTAX yes|no -objectclass ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY - DESC 'MANDATORY: OpenSSH LPK objectclass' - MAY ( sshPublicKey $ uid ) - ) -LPK_SCHEMA_EOF - -cat >/etc/ldap/schema/nova.schema < -# -# - -# using internet experimental oid arc as per BP64 3.1 -objectidentifier novaSchema 1.3.6.1.3.1.666.666 -objectidentifier novaAttrs novaSchema:3 -objectidentifier novaOCs novaSchema:4 - -attributetype ( - novaAttrs:1 - NAME 'accessKey' - DESC 'Key for accessing data' - EQUALITY caseIgnoreMatch - SUBSTR caseIgnoreSubstringsMatch - SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 - SINGLE-VALUE - ) - -attributetype ( - novaAttrs:2 - NAME 'secretKey' - DESC 'Secret key' - EQUALITY caseIgnoreMatch - SUBSTR caseIgnoreSubstringsMatch - SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 - SINGLE-VALUE - ) - -attributetype ( - novaAttrs:3 - NAME 'keyFingerprint' - DESC 'Fingerprint of private key' - EQUALITY caseIgnoreMatch - SUBSTR caseIgnoreSubstringsMatch - SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 - SINGLE-VALUE - ) - -attributetype ( - novaAttrs:4 - NAME 'isAdmin' - DESC 'Is user an administrator?' - EQUALITY booleanMatch - SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 - SINGLE-VALUE - ) - -attributetype ( - novaAttrs:5 - NAME 'projectManager' - DESC 'Project Managers of a project' - SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 - ) - -objectClass ( - novaOCs:1 - NAME 'novaUser' - DESC 'access and secret keys' - AUXILIARY - MUST ( uid ) - MAY ( accessKey $ secretKey $ isAdmin ) - ) - -objectClass ( - novaOCs:2 - NAME 'novaKeyPair' - DESC 'Key pair for User' - SUP top - STRUCTURAL - MUST ( cn $ sshPublicKey $ keyFingerprint ) - ) - -objectClass ( - novaOCs:3 - NAME 'novaProject' - DESC 'Container for project' - SUP groupOfNames - STRUCTURAL - MUST ( cn $ projectManager ) - ) - -NOVA_SCHEMA_EOF - -mv /etc/ldap/slapd.conf /etc/ldap/slapd.conf.orig -cat >/etc/ldap/slapd.conf </etc/ldap/ldap.conf </etc/ldap/base.ldif < "/usr/bin/apt-get update" } diff --git a/contrib/puppet/manifests/classes/issue.pp b/contrib/puppet/manifests/classes/issue.pp deleted file mode 100644 index 8bb37ee3f..000000000 --- a/contrib/puppet/manifests/classes/issue.pp +++ /dev/null @@ -1,14 +0,0 @@ -class issue { - file { "/etc/issue": - owner => "root", - group => "root", - mode => 444, - source => "puppet://${puppet_server}/files/etc/issue", - } - file { "/etc/issue.net": - owner => "root", - group => "root", - mode => 444, - source => "puppet://${puppet_server}/files/etc/issue", - } -} diff --git a/contrib/puppet/manifests/classes/kern_module.pp b/contrib/puppet/manifests/classes/kern_module.pp deleted file mode 100644 index 00ec0636c..000000000 --- a/contrib/puppet/manifests/classes/kern_module.pp +++ /dev/null @@ -1,34 +0,0 @@ -# via http://projects.puppetlabs.com/projects/puppet/wiki/Kernel_Modules_Patterns - -define kern_module ($ensure) { - $modulesfile = $operatingsystem ? { ubuntu => "/etc/modules", redhat => "/etc/rc.modules" } - case $operatingsystem { - redhat: { file { "/etc/rc.modules": ensure => file, mode => 755 } } - } - case $ensure { - present: { - exec { "insert_module_${name}": - command => $operatingsystem ? { - ubuntu => "/bin/echo '${name}' >> '${modulesfile}'", - redhat => "/bin/echo '/sbin/modprobe ${name}' >> '${modulesfile}' " - }, - unless => "/bin/grep -qFx '${name}' '${modulesfile}'" - } - exec { "/sbin/modprobe ${name}": unless => "/bin/grep -q '^${name} ' '/proc/modules'" } - } - absent: { - exec { "/sbin/modprobe -r ${name}": onlyif => "/bin/grep -q '^${name} ' '/proc/modules'" } - exec { "remove_module_${name}": - command => $operatingsystem ? { - ubuntu => "/usr/bin/perl -ni -e 'print unless /^\\Q${name}\\E\$/' '${modulesfile}'", - redhat => "/usr/bin/perl -ni -e 'print unless /^\\Q/sbin/modprobe ${name}\\E\$/' '${modulesfile}'" - }, - onlyif => $operatingsystem ? { - ubuntu => "/bin/grep -qFx '${name}' '${modulesfile}'", - redhat => "/bin/grep -q '^/sbin/modprobe ${name}' '${modulesfile}'" - } - } - } - default: { err ( "unknown ensure value ${ensure}" ) } - } -} diff --git a/contrib/puppet/manifests/classes/loopback.pp b/contrib/puppet/manifests/classes/loopback.pp deleted file mode 100644 index e0fa9d541..000000000 --- a/contrib/puppet/manifests/classes/loopback.pp +++ /dev/null @@ -1,6 +0,0 @@ -define loopback($num) { - exec { "mknod -m 0660 /dev/loop${num} b 7 ${num}; chown root:disk /dev/loop${num}": - creates => "/dev/loop${num}", - path => ["/usr/bin", "/usr/sbin", "/bin"] - } -} diff --git a/contrib/puppet/manifests/classes/lvm.pp b/contrib/puppet/manifests/classes/lvm.pp deleted file mode 100644 index 5a407abcb..000000000 --- a/contrib/puppet/manifests/classes/lvm.pp +++ /dev/null @@ -1,8 +0,0 @@ -class lvm { - file { "/etc/lvm/lvm.conf": - owner => "root", - group => "root", - mode => 444, - source => "puppet://${puppet_server}/files/etc/lvm.conf", - } -} diff --git a/contrib/puppet/manifests/classes/lvmconf.pp b/contrib/puppet/manifests/classes/lvmconf.pp deleted file mode 100644 index 4aa7ddfdc..000000000 --- a/contrib/puppet/manifests/classes/lvmconf.pp +++ /dev/null @@ -1,8 +0,0 @@ -class lvmconf { - file { "/etc/lvm/lvm.conf": - owner => "root", group => "root", mode => 644, - source => "puppet://${puppet_server}/files/etc/lvm/lvm.conf", - ensure => present - } -} - diff --git a/contrib/puppet/manifests/classes/nova.pp b/contrib/puppet/manifests/classes/nova.pp deleted file mode 100644 index e942860f4..000000000 --- a/contrib/puppet/manifests/classes/nova.pp +++ /dev/null @@ -1,464 +0,0 @@ -import "kern_module" -import "apt" -import "loopback" - -#$head_node_ip = "undef" -#$rabbit_ip = "undef" -#$vpn_ip = "undef" -#$public_interface = "undef" -#$vlan_start = "5000" -#$vlan_end = "6000" -#$private_range = "10.0.0.0/16" -#$public_range = "192.168.177.0/24" - -define nova_iptables($services, $ip="", $private_range="", $mgmt_ip="", $dmz_ip="") { - file { "/etc/init.d/nova-iptables": - owner => "root", mode => 755, - source => "puppet://${puppet_server}/files/production/nova-iptables", - } - - file { "/etc/default/nova-iptables": - owner => "root", mode => 644, - content => template("nova-iptables.erb") - } -} - -define nova_conf_pointer($name) { - file { "/etc/nova/nova-${name}.conf": - owner => "nova", mode => 400, - content => "--flagfile=/etc/nova/nova.conf" - } -} - -class novaconf { - file { "/etc/nova/nova.conf": - owner => "nova", mode => 400, - content => template("production/nova-common.conf.erb", "production/nova-${cluster_name}.conf.erb") - } - nova_conf_pointer{'manage': name => 'manage'} -} - -class novadata { - package { "rabbitmq-server": ensure => present } - - file { "/etc/rabbitmq/rabbitmq.conf": - owner => "root", mode => 644, - content => "NODENAME=rabbit@localhost", - } - - service { "rabbitmq-server": - ensure => running, - enable => true, - hasstatus => true, - require => [ - File["/etc/rabbitmq/rabbitmq.conf"], - Package["rabbitmq-server"] - ] - } - - package { "mysql-server": ensure => present } - - file { "/etc/mysql/my.cnf": - owner => "root", mode => 644, - source => "puppet://${puppet_server}/files/production/my.cnf", - } - - service { "mysql": - ensure => running, - enable => true, - hasstatus => true, - require => [ - File["/etc/mysql/my.cnf"], - Package["mysql-server"] - ] - } - - file { "/root/slap.sh": - owner => "root", mode => 755, - source => "puppet://${puppet_server}/files/production/slap.sh", - } - - file { "/root/setup_data.sh": - owner => "root", mode => 755, - source => "puppet://${puppet_server}/files/production/setup_data.sh", - } - - # setup compute data - exec { "setup_data": - command => "/root/setup_data.sh", - path => "/usr/bin:/bin", - unless => "test -f /root/installed", - require => [ - Service["mysql"], - File["/root/slap.sh"], - File["/root/setup_data.sh"] - ] - } -} - -define nscheduler($version) { - package { "nova-scheduler": ensure => $version, require => Exec["update-apt"] } - nova_conf_pointer{'scheduler': name => 'scheduler'} - exec { "update-rc.d -f nova-scheduler remove; update-rc.d nova-scheduler defaults 50": - path => "/usr/bin:/usr/sbin:/bin", - onlyif => "test -f /etc/init.d/nova-scheduler", - unless => "test -f /etc/rc2.d/S50nova-scheduler" - } - service { "nova-scheduler": - ensure => running, - hasstatus => true, - subscribe => [ - Package["nova-scheduler"], - File["/etc/nova/nova.conf"], - File["/etc/nova/nova-scheduler.conf"] - ] - } - -} - -define napi($version, $api_servers, $api_base_port) { - file { "/etc/boto.cfg": - owner => "root", mode => 644, - source => "puppet://${puppet_server}/files/production/boto.cfg", - } - - file { "/var/lib/nova/CA/genvpn.sh": - owner => "nova", mode => 755, - source => "puppet://${puppet_server}/files/production/genvpn.sh", - } - - package { "python-greenlet": ensure => present } - package { "nova-api": ensure => $version, require => [Exec["update-apt"], Package["python-greenlet"]] } - nova_conf_pointer{'api': name => 'api'} - - exec { "update-rc.d -f nova-api remove; update-rc.d nova-api defaults 50": - path => "/usr/bin:/usr/sbin:/bin", - onlyif => "test -f /etc/init.d/nova-api", - unless => "test -f /etc/rc2.d/S50nova-api" - } - - service { "nova-netsync": - start => "/usr/bin/nova-netsync --pidfile=/var/run/nova/nova-netsync.pid --lockfile=/var/run/nova/nova-netsync.pid.lock start", - stop => "/usr/bin/nova-netsync --pidfile=/var/run/nova/nova-netsync.pid --lockfile=/var/run/nova/nova-netsync.pid.lock stop", - ensure => running, - hasstatus => false, - pattern => "nova-netsync", - require => Service["nova-api"], - subscribe => File["/etc/nova/nova.conf"] - } - service { "nova-api": - start => "monit start all -g nova_api", - stop => "monit stop all -g nova_api", - restart => "monit restart all -g nova_api", - # ensure => running, - # hasstatus => true, - require => Service["monit"], - subscribe => [ - Package["nova-objectstore"], - File["/etc/boto.cfg"], - File["/etc/nova/nova.conf"], - File["/etc/nova/nova-objectstore.conf"] - ] - } - - # the haproxy & monit's template use $api_servers and $api_base_port - - package { "haproxy": ensure => present } - file { "/etc/default/haproxy": - owner => "root", mode => 644, - content => "ENABLED=1", - require => Package['haproxy'] - } - file { "/etc/haproxy/haproxy.cfg": - owner => "root", mode => 644, - content => template("/srv/cloud/puppet/templates/haproxy.cfg.erb"), - require => Package['haproxy'] - } - service { "haproxy": - ensure => true, - enable => true, - hasstatus => true, - subscribe => [ - Package["haproxy"], - File["/etc/default/haproxy"], - File["/etc/haproxy/haproxy.cfg"], - ] - } - - package { "socat": ensure => present } - - file { "/usr/local/bin/gmetric_haproxy.sh": - owner => "root", mode => 755, - source => "puppet://${puppet_server}/files/production/ganglia/gmetric_scripts/gmetric_haproxy.sh", - } - - cron { "gmetric_haproxy": - command => "/usr/local/bin/gmetric_haproxy.sh", - user => root, - minute => "*/3", - } - - package { "monit": ensure => present } - - file { "/etc/default/monit": - owner => "root", mode => 644, - content => "startup=1", - require => Package['monit'] - } - file { "/etc/monit/monitrc": - owner => "root", mode => 600, - content => template("/srv/cloud/puppet/templates/monitrc-nova-api.erb"), - require => Package['monit'] - } - service { "monit": - ensure => true, - pattern => "sbin/monit", - subscribe => [ - Package["monit"], - File["/etc/default/monit"], - File["/etc/monit/monitrc"], - ] - } - -} - - -define nnetwork($version) { - # kill the default network added by the package - exec { "kill-libvirt-default-net": - command => "virsh net-destroy default; rm /etc/libvirt/qemu/networks/autostart/default.xml", - path => "/usr/bin:/bin", - onlyif => "test -f /etc/libvirt/qemu/networks/autostart/default.xml" - } - - # EVIL HACK: custom binary because dnsmasq 2.52 segfaulted accessing dereferenced object - file { "/usr/sbin/dnsmasq": - owner => "root", group => "root", - source => "puppet://${puppet_server}/files/production/dnsmasq", - } - - package { "nova-network": ensure => $version, require => Exec["update-apt"] } - nova_conf_pointer{'dhcpbridge': name => 'dhcpbridge'} - nova_conf_pointer{'network': name => "network" } - - exec { "update-rc.d -f nova-network remove; update-rc.d nova-network defaults 50": - path => "/usr/bin:/usr/sbin:/bin", - onlyif => "test -f /etc/init.d/nova-network", - unless => "test -f /etc/rc2.d/S50nova-network" - } - service { "nova-network": - ensure => running, - hasstatus => true, - subscribe => [ - Package["nova-network"], - File["/etc/nova/nova.conf"], - File["/etc/nova/nova-network.conf"] - ] - } -} - -define nobjectstore($version) { - package { "nova-objectstore": ensure => $version, require => Exec["update-apt"] } - nova_conf_pointer{'objectstore': name => 'objectstore'} - exec { "update-rc.d -f nova-objectstore remove; update-rc.d nova-objectstore defaults 50": - path => "/usr/bin:/usr/sbin:/bin", - onlyif => "test -f /etc/init.d/nova-objectstore", - unless => "test -f /etc/rc2.d/S50nova-objectstore" - } - service { "nova-objectstore": - ensure => running, - hasstatus => true, - subscribe => [ - Package["nova-objectstore"], - File["/etc/nova/nova.conf"], - File["/etc/nova/nova-objectstore.conf"] - ] - } -} - -define ncompute($version) { - include ganglia-python - include ganglia-compute - - # kill the default network added by the package - exec { "kill-libvirt-default-net": - command => "virsh net-destroy default; rm /etc/libvirt/qemu/networks/autostart/default.xml", - path => "/usr/bin:/bin", - onlyif => "test -f /etc/libvirt/qemu/networks/autostart/default.xml" - } - - - # LIBVIRT has to be restarted when ebtables / gawk is installed - service { "libvirt-bin": - ensure => running, - pattern => "sbin/libvirtd", - subscribe => [ - Package["ebtables"], - Kern_module["kvm_intel"] - ], - require => [ - Package["libvirt-bin"], - Package["ebtables"], - Package["gawk"], - Kern_module["kvm_intel"], - File["/dev/kvm"] - ] - } - - package { "libvirt-bin": ensure => "0.8.3-1ubuntu14~ppalucid2" } - package { "ebtables": ensure => present } - package { "gawk": ensure => present } - - # ensure proper permissions on /dev/kvm - file { "/dev/kvm": - owner => "root", - group => "kvm", - mode => 660 - } - - # require hardware virt - kern_module { "kvm_intel": - ensure => present, - } - - # increase loopback devices - file { "/etc/modprobe.d/loop.conf": - owner => "root", mode => 644, - content => "options loop max_loop=40" - } - - nova_conf_pointer{'compute': name => 'compute'} - - loopback{loop0: num => 0} - loopback{loop1: num => 1} - loopback{loop2: num => 2} - loopback{loop3: num => 3} - loopback{loop4: num => 4} - loopback{loop5: num => 5} - loopback{loop6: num => 6} - loopback{loop7: num => 7} - loopback{loop8: num => 8} - loopback{loop9: num => 9} - loopback{loop10: num => 10} - loopback{loop11: num => 11} - loopback{loop12: num => 12} - loopback{loop13: num => 13} - loopback{loop14: num => 14} - loopback{loop15: num => 15} - loopback{loop16: num => 16} - loopback{loop17: num => 17} - loopback{loop18: num => 18} - loopback{loop19: num => 19} - loopback{loop20: num => 20} - loopback{loop21: num => 21} - loopback{loop22: num => 22} - loopback{loop23: num => 23} - loopback{loop24: num => 24} - loopback{loop25: num => 25} - loopback{loop26: num => 26} - loopback{loop27: num => 27} - loopback{loop28: num => 28} - loopback{loop29: num => 29} - loopback{loop30: num => 30} - loopback{loop31: num => 31} - loopback{loop32: num => 32} - loopback{loop33: num => 33} - loopback{loop34: num => 34} - loopback{loop35: num => 35} - loopback{loop36: num => 36} - loopback{loop37: num => 37} - loopback{loop38: num => 38} - loopback{loop39: num => 39} - - package { "python-libvirt": ensure => "0.8.3-1ubuntu14~ppalucid2" } - - package { "nova-compute": - ensure => "$version", - require => Package["python-libvirt"] - } - - #file { "/usr/share/nova/libvirt.qemu.xml.template": - # owner => "nova", mode => 400, - # source => "puppet://${puppet_server}/files/production/libvirt.qemu.xml.template", - #} - - # fix runlevels: using enable => true adds it as 20, which is too early - exec { "update-rc.d -f nova-compute remove": - path => "/usr/bin:/usr/sbin:/bin", - onlyif => "test -f /etc/rc2.d/S??nova-compute" - } - service { "nova-compute": - ensure => running, - hasstatus => true, - subscribe => [ - Package["nova-compute"], - File["/etc/nova/nova.conf"], - File["/etc/nova/nova-compute.conf"], - #File["/usr/share/nova/libvirt.qemu.xml.template"], - Service["libvirt-bin"], - Kern_module["kvm_intel"] - ] - } -} - -define nvolume($version) { - - package { "nova-volume": ensure => $version, require => Exec["update-apt"] } - - nova_conf_pointer{'volume': name => 'volume'} - - # fix runlevels: using enable => true adds it as 20, which is too early - exec { "update-rc.d -f nova-volume remove": - path => "/usr/bin:/usr/sbin:/bin", - onlyif => "test -f /etc/rc2.d/S??nova-volume" - } - - file { "/etc/default/iscsitarget": - owner => "root", mode => 644, - content => "ISCSITARGET_ENABLE=true" - } - - package { "iscsitarget": ensure => present } - - file { "/dev/iscsi": ensure => directory } # FIXME(vish): owner / mode? - file { "/usr/sbin/nova-iscsi-dev.sh": - owner => "root", mode => 755, - source => "puppet://${puppet_server}/files/production/nova-iscsi-dev.sh" - } - file { "/etc/udev/rules.d/55-openiscsi.rules": - owner => "root", mode => 644, - content => 'KERNEL=="sd*", BUS=="scsi", PROGRAM="/usr/sbin/nova-iscsi-dev.sh %b",SYMLINK+="iscsi/%c%n"' - } - - service { "iscsitarget": - ensure => running, - enable => true, - hasstatus => true, - require => [ - File["/etc/default/iscsitarget"], - Package["iscsitarget"] - ] - } - - service { "nova-volume": - ensure => running, - hasstatus => true, - subscribe => [ - Package["nova-volume"], - File["/etc/nova/nova.conf"], - File["/etc/nova/nova-volume.conf"] - ] - } -} - -class novaspool { - # This isn't in release yet - #cron { logspool: - # command => "/usr/bin/nova-logspool /var/log/nova.log /var/lib/nova/spool", - # user => "nova" - #} - #cron { spoolsentry: - # command => "/usr/bin/nova-spoolsentry ${sentry_url} ${sentry_key} /var/lib/nova/spool", - # user => "nova" - #} -} diff --git a/contrib/puppet/manifests/classes/swift.pp b/contrib/puppet/manifests/classes/swift.pp deleted file mode 100644 index 64ffb6fa3..000000000 --- a/contrib/puppet/manifests/classes/swift.pp +++ /dev/null @@ -1,7 +0,0 @@ -class swift { - package { "memcached": ensure => present } - service { "memcached": require => Package['memcached'] } - - package { "swift-proxy": ensure => present } -} - diff --git a/contrib/puppet/manifests/site.pp b/contrib/puppet/manifests/site.pp deleted file mode 100644 index ca07a34ad..000000000 --- a/contrib/puppet/manifests/site.pp +++ /dev/null @@ -1,120 +0,0 @@ -# site.pp - -import "templates" -import "classes/*" - -node novabase inherits default { -# $puppet_server = "192.168.0.10" - $cluster_name = "openstack001" - $ganglia_udp_send_channel = "openstack001.example.com" - $syslog = "192.168.0.10" - - # THIS STUFF ISN'T IN RELEASE YET - #$sentry_url = "http://192.168.0.19/sentry/store/" - #$sentry_key = "TODO:SENTRYPASS" - - $local_network = "192.168.0.0/16" - $vpn_ip = "192.168.0.2" - $public_interface = "eth0" - include novanode -# include nova-common - include opsmetrics - -# non-nova stuff such as nova-dash inherit from novanode -# novaspool needs a better home -# include novaspool -} - -# Builder -node "nova000.example.com" inherits novabase { - $syslog = "server" - include ntp - include syslog-server -} - -# Non-Nova nodes - -node - "blog.example.com", - "wiki.example.com" -inherits novabase { - include ganglia-python - include ganglia-apache - include ganglia-mysql -} - - -node "nova001.example.com" -inherits novabase { - include novabase - - nova_iptables { nova: - services => [ - "ganglia", - "mysql", - "rabbitmq", - "ldap", - "api", - "objectstore", - "nrpe", - ], - ip => "192.168.0.10", - } - - nobjectstore { nova: version => "0.9.0" } - nscheduler { nova: version => "0.9.0" } - napi { nova: - version => "0.9.0", - api_servers => 10, - api_base_port => 8000 - } -} - -node "nova002.example.com" -inherits novabase { - include novaconf - - nova_iptables { nova: - services => [ - "ganglia", - "dnsmasq", - "nrpe" - ], - ip => "192.168.4.2", - private_range => "192.168.0.0/16", - } - - nnetwork { nova: version => "0.9.0" } -} - -node - "nova003.example.com", - "nova004.example.com", - "nova005.example.com", - "nova006.example.com", - "nova007.example.com", - "nova008.example.com", - "nova009.example.com", - "nova010.example.com", - "nova011.example.com", - "nova012.example.com", - "nova013.example.com", - "nova014.example.com", - "nova015.example.com", - "nova016.example.com", - "nova017.example.com", - "nova018.example.com", - "nova019.example.com", -inherits novabase { - include novaconf - ncompute { nova: version => "0.9.0" } - nvolume { nova: version => "0.9.0" } -} - -#node -# "nova020.example.com" -# "nova021.example.com" -#inherits novanode { -# include novaconf - #ncompute { nova: version => "0.9.0" } -#} diff --git a/contrib/puppet/manifests/templates.pp b/contrib/puppet/manifests/templates.pp deleted file mode 100644 index 90e433013..000000000 --- a/contrib/puppet/manifests/templates.pp +++ /dev/null @@ -1,21 +0,0 @@ -# templates.pp - -import "classes/*" - -class baseclass { -# include dns-client # FIXME: missing resolv.conf.erb?? - include issue -} - -node default { - $nova_site = "undef" - $nova_ns1 = "undef" - $nova_ns2 = "undef" -# include baseclass -} - -# novanode handles the system-level requirements for Nova/Swift nodes -class novanode { - include baseclass - include lvmconf -} diff --git a/contrib/puppet/puppet.conf b/contrib/puppet/puppet.conf deleted file mode 100644 index 92af920e3..000000000 --- a/contrib/puppet/puppet.conf +++ /dev/null @@ -1,11 +0,0 @@ -[main] -logdir=/var/log/puppet -vardir=/var/lib/puppet -ssldir=/var/lib/puppet/ssl -rundir=/var/run/puppet -factpath=$vardir/lib/facter -pluginsync=false - -[puppetmasterd] -templatedir=/var/lib/nova/contrib/puppet/templates -autosign=true diff --git a/contrib/puppet/templates/haproxy.cfg.erb b/contrib/puppet/templates/haproxy.cfg.erb deleted file mode 100644 index bd9991de7..000000000 --- a/contrib/puppet/templates/haproxy.cfg.erb +++ /dev/null @@ -1,39 +0,0 @@ -# this config needs haproxy-1.1.28 or haproxy-1.2.1 - -global - log 127.0.0.1 local0 - log 127.0.0.1 local1 notice - #log loghost local0 info - maxconn 4096 - #chroot /usr/share/haproxy - stats socket /var/run/haproxy.sock - user haproxy - group haproxy - daemon - #debug - #quiet - -defaults - log global - mode http - option httplog - option dontlognull - retries 3 - option redispatch - stats enable - stats uri /haproxy - maxconn 2000 - contimeout 5000 - clitimeout 50000 - srvtimeout 50000 - - -listen nova-api 0.0.0.0:8773 - option httpchk GET / HTTP/1.0\r\nHost:\ example.com - option forwardfor - reqidel ^X-Forwarded-For:.* - balance roundrobin -<% api_servers.to_i.times do |offset| %><% port = api_base_port.to_i + offset -%> - server api_<%= port %> 127.0.0.1:<%= port %> maxconn 1 check -<% end -%> - option httpclose # disable keep-alive diff --git a/contrib/puppet/templates/monitrc-nova-api.erb b/contrib/puppet/templates/monitrc-nova-api.erb deleted file mode 100644 index fe2626327..000000000 --- a/contrib/puppet/templates/monitrc-nova-api.erb +++ /dev/null @@ -1,138 +0,0 @@ -############################################################################### -## Monit control file -############################################################################### -## -## Comments begin with a '#' and extend through the end of the line. Keywords -## are case insensitive. All path's MUST BE FULLY QUALIFIED, starting with '/'. -## -## Below you will find examples of some frequently used statements. For -## information about the control file, a complete list of statements and -## options please have a look in the monit manual. -## -## -############################################################################### -## Global section -############################################################################### -## -## Start monit in the background (run as a daemon): -# -set daemon 60 # check services at 1-minute intervals - with start delay 30 # optional: delay the first check by half a minute - # (by default check immediately after monit start) - - -## Set syslog logging with the 'daemon' facility. If the FACILITY option is -## omitted, monit will use 'user' facility by default. If you want to log to -## a stand alone log file instead, specify the path to a log file -# -set logfile syslog facility log_daemon -# -# -### Set the location of monit id file which saves the unique id specific for -### given monit. The id is generated and stored on first monit start. -### By default the file is placed in $HOME/.monit.id. -# -# set idfile /var/.monit.id -# -### Set the location of monit state file which saves the monitoring state -### on each cycle. By default the file is placed in $HOME/.monit.state. If -### state file is stored on persistent filesystem, monit will recover the -### monitoring state across reboots. If it is on temporary filesystem, the -### state will be lost on reboot. -# -# set statefile /var/.monit.state -# -## Set the list of mail servers for alert delivery. Multiple servers may be -## specified using comma separator. By default monit uses port 25 - this -## is possible to override with the PORT option. -# -# set mailserver mail.bar.baz, # primary mailserver -# backup.bar.baz port 10025, # backup mailserver on port 10025 -# localhost # fallback relay -# -# -## By default monit will drop alert events if no mail servers are available. -## If you want to keep the alerts for a later delivery retry, you can use the -## EVENTQUEUE statement. The base directory where undelivered alerts will be -## stored is specified by the BASEDIR option. You can limit the maximal queue -## size using the SLOTS option (if omitted, the queue is limited by space -## available in the back end filesystem). -# -# set eventqueue -# basedir /var/monit # set the base directory where events will be stored -# slots 100 # optionaly limit the queue size -# -# -## Send status and events to M/Monit (Monit central management: for more -## informations about M/Monit see http://www.tildeslash.com/mmonit). -# -# set mmonit http://monit:monit@192.168.1.10:8080/collector -# -# -## Monit by default uses the following alert mail format: -## -## --8<-- -## From: monit@$HOST # sender -## Subject: monit alert -- $EVENT $SERVICE # subject -## -## $EVENT Service $SERVICE # -## # -## Date: $DATE # -## Action: $ACTION # -## Host: $HOST # body -## Description: $DESCRIPTION # -## # -## Your faithful employee, # -## monit # -## --8<-- -## -## You can override this message format or parts of it, such as subject -## or sender using the MAIL-FORMAT statement. Macros such as $DATE, etc. -## are expanded at runtime. For example, to override the sender: -# -# set mail-format { from: monit@foo.bar } -# -# -## You can set alert recipients here whom will receive alerts if/when a -## service defined in this file has errors. Alerts may be restricted on -## events by using a filter as in the second example below. -# -# set alert sysadm@foo.bar # receive all alerts -# set alert manager@foo.bar only on { timeout } # receive just service- -# # timeout alert -# -# -## Monit has an embedded web server which can be used to view status of -## services monitored, the current configuration, actual services parameters -## and manage services from a web interface. -# - set httpd port 2812 and - use address localhost # only accept connection from localhost - allow localhost # allow localhost to connect to the server and -# allow admin:monit # require user 'admin' with password 'monit' -# allow @monit # allow users of group 'monit' to connect (rw) -# allow @users readonly # allow users of group 'users' to connect readonly -# -# -############################################################################### -## Services -############################################################################### - -<% api_servers.to_i.times do |offset| %><% port = api_base_port.to_i + offset %> - -check process nova_api_<%= port %> with pidfile /var/run/nova/nova-api-<%= port %>.pid - group nova_api - start program = "/usr/bin/nova-api --flagfile=/etc/nova/nova.conf --pidfile=/var/run/nova/nova-api-<%= port %>.pid --api_listen_port=<%= port %> --lockfile=/var/run/nova/nova-api-<%= port %>.pid.lock start" - as uid nova - stop program = "/usr/bin/nova-api --flagfile=/etc/nova/nova.conf --pidfile=/var/run/nova/nova-api-<%= port %>.pid --api_listen_port=<%= port %> --lockfile=/var/run/nova/nova-api-<%= port %>.pid.lock stop" - as uid nova - if failed port <%= port %> protocol http - with timeout 15 seconds - for 4 cycles - then restart - if totalmem > 300 Mb then restart - if cpu is greater than 60% for 2 cycles then alert - if cpu > 80% for 3 cycles then restart - if 3 restarts within 5 cycles then timeout - -<% end %> diff --git a/contrib/puppet/templates/nova-iptables.erb b/contrib/puppet/templates/nova-iptables.erb deleted file mode 100644 index 2fc066305..000000000 --- a/contrib/puppet/templates/nova-iptables.erb +++ /dev/null @@ -1,10 +0,0 @@ -<% services.each do |service| -%> -<%= service.upcase %>=1 -<% end -%> -<% if ip && ip != "" %>IP="<%=ip%>"<% end %> -<% if private_range && private_range != "" %>PRIVATE_RANGE="<%=private_range%>"<% end %> -<% if mgmt_ip && mgmt_ip != "" %>MGMT_IP="<%=mgmt_ip%>"<% end %> -<% if dmz_ip && dmz_ip != "" %>DMZ_IP="<%=dmz_ip%>"<% end %> - -# warning: this file is auto-generated by puppet - diff --git a/contrib/puppet/templates/production/nova-common.conf.erb b/contrib/puppet/templates/production/nova-common.conf.erb deleted file mode 100644 index 23ee0c5e8..000000000 --- a/contrib/puppet/templates/production/nova-common.conf.erb +++ /dev/null @@ -1,55 +0,0 @@ -# global ---dmz_net=192.168.0.0 ---dmz_mask=255.255.0.0 ---dmz_cidr=192.168.0.0/16 ---ldap_user_dn=cn=Administrators,dc=example,dc=com ---ldap_user_unit=Users ---ldap_user_subtree=ou=Users,dc=example,dc=com ---ldap_project_subtree=ou=Groups,dc=example,dc=com ---role_project_subtree=ou=Groups,dc=example,dc=com ---ldap_cloudadmin=cn=NovaAdmins,ou=Groups,dc=example,dc=com ---ldap_itsec=cn=NovaSecurity,ou=Groups,dc=example,dc=com ---ldap_sysadmin=cn=Administrators,ou=Groups,dc=example,dc=com ---ldap_netadmin=cn=Administrators,ou=Groups,dc=example,dc=com ---ldap_developer=cn=developers,ou=Groups,dc=example,dc=com ---verbose ---daemonize ---syslog ---networks_path=/var/lib/nova/networks ---instances_path=/var/lib/nova/instances ---buckets_path=/var/lib/nova/objectstore/buckets ---images_path=/var/lib/nova/objectstore/images ---scheduler_driver=nova.scheduler.simple.SimpleScheduler ---libvirt_xml_template=/usr/share/nova/libvirt.qemu.xml.template ---credentials_template=/usr/share/nova/novarc.template ---boot_script_template=/usr/share/nova/bootscript.template ---vpn_client_template=/usr/share/nova/client.ovpn.template ---max_cores=40 ---max_gigabytes=2000 ---ca_path=/var/lib/nova/CA ---keys_path=/var/lib/nova/keys ---vpn_start=11000 ---volume_group=vgdata ---volume_manager=nova.volume.manager.ISCSIManager ---volume_driver=nova.volume.driver.ISCSIDriver ---default_kernel=aki-DEFAULT ---default_ramdisk=ari-DEFAULT ---dhcpbridge=/usr/bin/nova-dhcpbridge ---vpn_image_id=ami-cloudpipe ---dhcpbridge_flagfile=/etc/nova/nova.conf ---credential_cert_subject=/C=US/ST=Texas/L=Bexar/O=NovaDev/OU=NOVA/CN=%s-%s ---auth_driver=nova.auth.ldapdriver.LdapDriver ---quota_cores=17 ---quota_floating_ips=5 ---quota_instances=6 ---quota_volumes=10 ---quota_gigabytes=100 ---use_nova_chains=True ---input_chain=services ---use_project_ca=True ---fixed_ip_disassociate_timeout=300 ---api_max_requests=1 ---api_listen_ip=127.0.0.1 ---user_cert_subject=/C=US/ST=Texas/L=Bexar/O=NovaDev/OU=Nova/CN=%s-%s-%s ---project_cert_subject=/C=US/ST=Texas/L=Bexar/O=NovaDev/OU=Nova/CN=project-ca-%s-%s ---vpn_cert_subject=/C=US/ST=Texas/L=Bexar/O=NovaDev/OU=Nova/CN=project-vpn-%s-%s diff --git a/contrib/puppet/templates/production/nova-nova.conf.erb b/contrib/puppet/templates/production/nova-nova.conf.erb deleted file mode 100644 index 8683fefde..000000000 --- a/contrib/puppet/templates/production/nova-nova.conf.erb +++ /dev/null @@ -1,21 +0,0 @@ ---fixed_range=192.168.0.0/16 ---iscsi_ip_prefix=192.168.4 ---floating_range=10.0.0.0/24 ---rabbit_host=192.168.0.10 ---s3_host=192.168.0.10 ---cc_host=192.168.0.10 ---cc_dmz=192.168.24.10 ---s3_dmz=192.168.24.10 ---ec2_url=http://192.168.0.1:8773/services/Cloud ---vpn_ip=192.168.0.2 ---ldap_url=ldap://192.168.0.10 ---sql_connection=mysql://nova:TODO-MYPASS@192.168.0.10/nova ---other_sql_connection=mysql://nova:TODO-MYPASS@192.168.0.10/nova ---routing_source_ip=192.168.0.2 ---bridge_dev=eth1 ---public_interface=eth0 ---vlan_start=3100 ---num_networks=700 ---rabbit_userid=TODO:RABBIT ---rabbit_password=TODO:CHANGEME ---ldap_password=TODO:CHANGEME -- cgit From 106eb47eff0551c73b2e90385b9c3a910fec8633 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sun, 20 Feb 2011 23:16:10 -0800 Subject: fixes for various logging errors and issues --- nova/api/openstack/auth.py | 1 - nova/api/openstack/backup_schedules.py | 1 - nova/api/openstack/images.py | 2 - nova/api/openstack/servers.py | 1 - nova/api/openstack/shared_ip_groups.py | 2 - nova/api/openstack/zones.py | 1 - nova/console/manager.py | 2 +- nova/console/xvp.py | 2 +- nova/flags.py | 4 ++ nova/log.py | 117 ++++++++++++++++++++------------- nova/test.py | 2 + nova/tests/fake_flags.py | 2 + nova/tests/test_auth.py | 9 --- nova/tests/test_console.py | 2 - nova/tests/test_direct.py | 1 - nova/tests/test_localization.py | 1 - nova/tests/test_log.py | 65 +++++++++--------- nova/utils.py | 2 +- run_tests.py | 5 +- 19 files changed, 117 insertions(+), 105 deletions(-) diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index 473071738..1dfdd5318 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -19,7 +19,6 @@ import datetime import hashlib import json import time -import logging import webob.exc import webob.dec diff --git a/nova/api/openstack/backup_schedules.py b/nova/api/openstack/backup_schedules.py index 197125d86..7abb5f884 100644 --- a/nova/api/openstack/backup_schedules.py +++ b/nova/api/openstack/backup_schedules.py @@ -15,7 +15,6 @@ # License for the specific language governing permissions and limitations # under the License. -import logging import time from webob import exc diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 9d56bc508..cf85a496f 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -15,8 +15,6 @@ # License for the specific language governing permissions and limitations # under the License. -import logging - from webob import exc from nova import compute diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index ce9601ecb..0bac4c64d 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -33,7 +33,6 @@ import nova.api.openstack LOG = logging.getLogger('server') -LOG.setLevel(logging.DEBUG) FLAGS = flags.FLAGS diff --git a/nova/api/openstack/shared_ip_groups.py b/nova/api/openstack/shared_ip_groups.py index bd3cc23a8..5d78f9377 100644 --- a/nova/api/openstack/shared_ip_groups.py +++ b/nova/api/openstack/shared_ip_groups.py @@ -15,8 +15,6 @@ # License for the specific language governing permissions and limitations # under the License. -import logging - from webob import exc from nova import wsgi diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py index 830464ffd..d5206da20 100644 --- a/nova/api/openstack/zones.py +++ b/nova/api/openstack/zones.py @@ -14,7 +14,6 @@ # under the License. import common -import logging from nova import flags from nova import wsgi diff --git a/nova/console/manager.py b/nova/console/manager.py index 5697e7cb1..57c75cf4f 100644 --- a/nova/console/manager.py +++ b/nova/console/manager.py @@ -20,11 +20,11 @@ Console Proxy Service """ import functools -import logging import socket from nova import exception from nova import flags +from nova import log as logging from nova import manager from nova import rpc from nova import utils diff --git a/nova/console/xvp.py b/nova/console/xvp.py index ee66dac46..cd257e0a6 100644 --- a/nova/console/xvp.py +++ b/nova/console/xvp.py @@ -20,7 +20,6 @@ XVP (Xenserver VNC Proxy) driver. """ import fcntl -import logging import os import signal import subprocess @@ -31,6 +30,7 @@ from nova import context from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import utils flags.DEFINE_string('console_xvp_conf_template', diff --git a/nova/flags.py b/nova/flags.py index f64a62da9..2f3bdd675 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -38,6 +38,7 @@ class FlagValues(gflags.FlagValues): defined after the initial parsing. """ + initialized = False def __init__(self, extra_context=None): gflags.FlagValues.__init__(self) @@ -45,6 +46,8 @@ class FlagValues(gflags.FlagValues): self.__dict__['__was_already_parsed'] = False self.__dict__['__stored_argv'] = [] self.__dict__['__extra_context'] = extra_context + # NOTE(vish): force a pseudo flag to keep track of whether + # flags have been parsed already def __call__(self, argv): # We're doing some hacky stuff here so that we don't have to copy @@ -90,6 +93,7 @@ class FlagValues(gflags.FlagValues): self.__dict__['__stored_argv'] = original_argv self.__dict__['__was_already_parsed'] = True self.ClearDirty() + FlagValues.initialized = True return args def Reset(self): diff --git a/nova/log.py b/nova/log.py index 6b201ffcc..12b695a41 100644 --- a/nova/log.py +++ b/nova/log.py @@ -117,7 +117,22 @@ def _get_binary_name(): return os.path.basename(inspect.stack()[-1][1]) -def get_log_file_path(binary=None): +def _get_level_from_flags(name): + # if exactly "nova", or a child logger, honor the verbose flag + if (name == "nova" or name.startswith("nova.")) and FLAGS.verbose: + return 'DEBUG' + for pair in FLAGS.default_log_levels: + logger, _sep, level = pair.partition('=') + # NOTE(todd): if we set a.b, we want a.b.c to have the same level + # (but not a.bc, so we check the dot) + if name == logger: + return level + if name.startswith(logger) and name[len(logger)] == '.': + return level + return 'INFO' + + +def _get_log_file_path(binary=None): if FLAGS.logfile: return FLAGS.logfile if FLAGS.logdir: @@ -126,22 +141,13 @@ def get_log_file_path(binary=None): def basicConfig(): - logging.basicConfig() - for handler in logging.root.handlers: - handler.setFormatter(_formatter) - if FLAGS.verbose: - logging.root.setLevel(logging.DEBUG) - else: - logging.root.setLevel(logging.INFO) - if FLAGS.use_syslog: - syslog = SysLogHandler(address='/dev/log') - syslog.setFormatter(_formatter) - logging.root.addHandler(syslog) - logpath = get_log_file_path() - if logpath: - logfile = WatchedFileHandler(logpath) - logfile.setFormatter(_formatter) - logging.root.addHandler(logfile) + pass + + +logging.basicConfig = basicConfig +_syslog = SysLogHandler(address='/dev/log') +_filelog = None +_streamlog = StreamHandler() class NovaLogger(logging.Logger): @@ -151,23 +157,24 @@ class NovaLogger(logging.Logger): This becomes the class that is instanciated by logging.getLogger. """ def __init__(self, name, level=NOTSET): - level_name = self._get_level_from_flags(name, FLAGS) - level = globals()[level_name] logging.Logger.__init__(self, name, level) - - def _get_level_from_flags(self, name, FLAGS): - # if exactly "nova", or a child logger, honor the verbose flag - if (name == "nova" or name.startswith("nova.")) and FLAGS.verbose: - return 'DEBUG' - for pair in FLAGS.default_log_levels: - logger, _sep, level = pair.partition('=') - # NOTE(todd): if we set a.b, we want a.b.c to have the same level - # (but not a.bc, so we check the dot) - if name == logger: - return level - if name.startswith(logger) and name[len(logger)] == '.': - return level - return 'INFO' + self.initialized = False + if flags.FlagValues.initialized: + self._setup_from_flags() + + def _setup_from_flags(self): + """Setup logger from flags""" + level_name = _get_level_from_flags(self.name) + self.setLevel(globals()[level_name]) + self.initialized = True + if not logging.root.initialized: + logging.root._setup_from_flags() + + def isEnabledFor(self, level): + """Reset level after flags have been loaded""" + if not self.initialized and flags.FlagValues.initialized: + self._setup_from_flags() + return logging.Logger.isEnabledFor(self, level) def _log(self, level, msg, args, exc_info=None, extra=None, context=None): """Extract context from any log call""" @@ -176,12 +183,12 @@ class NovaLogger(logging.Logger): if context: extra.update(_dictify_context(context)) extra.update({"nova_version": version.version_string_with_vcs()}) - logging.Logger._log(self, level, msg, args, exc_info, extra) + return logging.Logger._log(self, level, msg, args, exc_info, extra) def addHandler(self, handler): """Each handler gets our custom formatter""" handler.setFormatter(_formatter) - logging.Logger.addHandler(self, handler) + return logging.Logger.addHandler(self, handler) def audit(self, msg, *args, **kwargs): """Shortcut for our AUDIT level""" @@ -192,7 +199,7 @@ class NovaLogger(logging.Logger): """Logging.exception doesn't handle kwargs, so breaks context""" if not kwargs.get('exc_info'): kwargs['exc_info'] = 1 - self.error(msg, *args, **kwargs) + return self.error(msg, *args, **kwargs) # NOTE(todd): does this really go here, or in _log ? extra = kwargs.get('extra') if not extra: @@ -209,6 +216,8 @@ class NovaLogger(logging.Logger): def handle_exception(type, value, tb): + if len(logging.root.handlers) == 0: + logging.root.addHandler(_streamlog) logging.root.critical(str(value), exc_info=(type, value, tb)) @@ -216,15 +225,6 @@ sys.excepthook = handle_exception logging.setLoggerClass(NovaLogger) -class NovaRootLogger(NovaLogger): - pass - -if not isinstance(logging.root, NovaRootLogger): - logging.root = NovaRootLogger("nova.root", WARNING) - NovaLogger.root = logging.root - NovaLogger.manager.root = logging.root - - class NovaFormatter(logging.Formatter): """ A nova.context.RequestContext aware formatter configured through flags. @@ -271,6 +271,33 @@ class NovaFormatter(logging.Formatter): _formatter = NovaFormatter() +class NovaRootLogger(NovaLogger): + def __init__(self, name, level=NOTSET): + NovaLogger.__init__(self, name, level) + self.addHandler(_streamlog) + + def _setup_from_flags(self): + """Setup logger from flags""" + global _filelog + if FLAGS.use_syslog: + self.addHandler(_syslog) + logpath = _get_log_file_path() + if logpath: + if not _filelog: + _filelog = WatchedFileHandler(logpath) + self.addHandler(_filelog) + self.removeHandler(_streamlog) + return NovaLogger._setup_from_flags(self) + + +if not isinstance(logging.root, NovaRootLogger): + for handler in logging.root.handlers: + logging.root.removeHandler(handler) + logging.root = NovaRootLogger("nova") + NovaLogger.root = logging.root + NovaLogger.manager.root = logging.root + + def audit(msg, *args, **kwargs): """Shortcut for logging to root log with sevrity 'AUDIT'.""" if len(logging.root.handlers) == 0: diff --git a/nova/test.py b/nova/test.py index a12cf9d32..a649b4c5f 100644 --- a/nova/test.py +++ b/nova/test.py @@ -60,6 +60,8 @@ class TestCase(unittest.TestCase): def setUp(self): """Run before each test method to initialize test environment""" super(TestCase, self).setUp() + # NOTE(vish): pretend like we've loaded flags from command line + flags.FlagValues.initialized = True # NOTE(vish): We need a better method for creating fixtures for tests # now that we have some required db setup for the system # to work properly. diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py index 1097488ec..68b14a46e 100644 --- a/nova/tests/fake_flags.py +++ b/nova/tests/fake_flags.py @@ -41,3 +41,5 @@ FLAGS.iscsi_num_targets = 8 FLAGS.verbose = True FLAGS.sql_connection = 'sqlite:///nova.sqlite' FLAGS.use_ipv6 = True +FLAGS.logfile = 'run_tests.err' +flags.FlagValues.initialized = True diff --git a/nova/tests/test_auth.py b/nova/tests/test_auth.py index 35ffffb67..2a7817032 100644 --- a/nova/tests/test_auth.py +++ b/nova/tests/test_auth.py @@ -327,15 +327,6 @@ class AuthManagerTestCase(object): class AuthManagerLdapTestCase(AuthManagerTestCase, test.TestCase): auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver' - def __init__(self, *args, **kwargs): - AuthManagerTestCase.__init__(self) - test.TestCase.__init__(self, *args, **kwargs) - import nova.auth.fakeldap as fakeldap - if FLAGS.flush_db: - LOG.info("Flushing datastore") - r = fakeldap.Store.instance() - r.flushdb() - class AuthManagerDbTestCase(AuthManagerTestCase, test.TestCase): auth_driver = 'nova.auth.dbdriver.DbDriver' diff --git a/nova/tests/test_console.py b/nova/tests/test_console.py index 85bf94458..49ff24413 100644 --- a/nova/tests/test_console.py +++ b/nova/tests/test_console.py @@ -21,7 +21,6 @@ Tests For Console proxy. """ import datetime -import logging from nova import context from nova import db @@ -38,7 +37,6 @@ FLAGS = flags.FLAGS class ConsoleTestCase(test.TestCase): """Test case for console proxy""" def setUp(self): - logging.getLogger().setLevel(logging.DEBUG) super(ConsoleTestCase, self).setUp() self.flags(console_driver='nova.console.fake.FakeConsoleProxy', stub_compute=True) diff --git a/nova/tests/test_direct.py b/nova/tests/test_direct.py index 8a74b2296..7656f5396 100644 --- a/nova/tests/test_direct.py +++ b/nova/tests/test_direct.py @@ -19,7 +19,6 @@ """Tests for Direct API.""" import json -import logging import webob diff --git a/nova/tests/test_localization.py b/nova/tests/test_localization.py index 6992773f5..393d71038 100644 --- a/nova/tests/test_localization.py +++ b/nova/tests/test_localization.py @@ -15,7 +15,6 @@ # under the License. import glob -import logging import os import re import sys diff --git a/nova/tests/test_log.py b/nova/tests/test_log.py index c2c9d7772..ada8d0a56 100644 --- a/nova/tests/test_log.py +++ b/nova/tests/test_log.py @@ -1,9 +1,12 @@ import cStringIO from nova import context +from nova import flags from nova import log from nova import test +FLAGS = flags.FLAGS + def _fake_context(): return context.RequestContext(1, 1) @@ -14,15 +17,11 @@ class RootLoggerTestCase(test.TestCase): super(RootLoggerTestCase, self).setUp() self.log = log.logging.root - def tearDown(self): - super(RootLoggerTestCase, self).tearDown() - log.NovaLogger.manager.loggerDict = {} - def test_is_nova_instance(self): self.assert_(isinstance(self.log, log.NovaLogger)) - def test_name_is_nova_root(self): - self.assertEqual("nova.root", self.log.name) + def test_name_is_nova(self): + self.assertEqual("nova", self.log.name) def test_handlers_have_nova_formatter(self): formatters = [] @@ -45,25 +44,38 @@ class RootLoggerTestCase(test.TestCase): log.audit("foo", context=_fake_context()) self.assert_(True) # didn't raise exception + def test_will_be_verbose_if_verbose_flag_set(self): + self.flags(verbose=True) + self.log.initialized = False + log.audit("foo", context=_fake_context()) + self.assertEqual(log.DEBUG, self.log.level) + + def test_will_not_be_verbose_if_verbose_flag_not_set(self): + self.flags(verbose=False) + self.log.initialized = False + log.audit("foo", context=_fake_context()) + self.assertEqual(log.INFO, self.log.level) + class LogHandlerTestCase(test.TestCase): def test_log_path_logdir(self): - self.flags(logdir='/some/path') - self.assertEquals(log.get_log_file_path(binary='foo-bar'), + self.flags(logdir='/some/path', logfile=None) + self.assertEquals(log._get_log_file_path(binary='foo-bar'), '/some/path/foo-bar.log') def test_log_path_logfile(self): self.flags(logfile='/some/path/foo-bar.log') - self.assertEquals(log.get_log_file_path(binary='foo-bar'), + self.assertEquals(log._get_log_file_path(binary='foo-bar'), '/some/path/foo-bar.log') def test_log_path_none(self): - self.assertTrue(log.get_log_file_path(binary='foo-bar') is None) + self.flags(logdir=None, logfile=None) + self.assertTrue(log._get_log_file_path(binary='foo-bar') is None) def test_log_path_logfile_overrides_logdir(self): self.flags(logdir='/some/other/path', logfile='/some/path/foo-bar.log') - self.assertEquals(log.get_log_file_path(binary='foo-bar'), + self.assertEquals(log._get_log_file_path(binary='foo-bar'), '/some/path/foo-bar.log') @@ -76,13 +88,15 @@ class NovaFormatterTestCase(test.TestCase): logging_debug_format_suffix="--DBG") self.log = log.logging.root self.stream = cStringIO.StringIO() - handler = log.StreamHandler(self.stream) - self.log.addHandler(handler) + self.handler = log.StreamHandler(self.stream) + self.log.addHandler(self.handler) + self.level = self.log.level self.log.setLevel(log.DEBUG) def tearDown(self): + self.log.setLevel(self.level) + self.log.removeHandler(self.handler) super(NovaFormatterTestCase, self).tearDown() - log.NovaLogger.manager.loggerDict = {} def test_uncontextualized_log(self): self.log.info("foo") @@ -102,30 +116,15 @@ class NovaFormatterTestCase(test.TestCase): class NovaLoggerTestCase(test.TestCase): def setUp(self): super(NovaLoggerTestCase, self).setUp() - self.flags(default_log_levels=["nova-test=AUDIT"], verbose=False) + levels = FLAGS.default_log_levels + levels.append("nova-test=AUDIT") + self.flags(default_log_levels=levels, + verbose=True) self.log = log.getLogger('nova-test') - def tearDown(self): - super(NovaLoggerTestCase, self).tearDown() - log.NovaLogger.manager.loggerDict = {} - def test_has_level_from_flags(self): self.assertEqual(log.AUDIT, self.log.level) def test_child_log_has_level_of_parent_flag(self): l = log.getLogger('nova-test.foo') self.assertEqual(log.AUDIT, l.level) - - -class VerboseLoggerTestCase(test.TestCase): - def setUp(self): - super(VerboseLoggerTestCase, self).setUp() - self.flags(default_log_levels=["nova.test=AUDIT"], verbose=True) - self.log = log.getLogger('nova.test') - - def tearDown(self): - super(VerboseLoggerTestCase, self).tearDown() - log.NovaLogger.manager.loggerDict = {} - - def test_will_be_verbose_if_named_nova_and_verbose_flag_set(self): - self.assertEqual(log.DEBUG, self.log.level) diff --git a/nova/utils.py b/nova/utils.py index 42efa0008..c2fd5f2ee 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -55,7 +55,7 @@ def import_class(import_str): __import__(mod_str) return getattr(sys.modules[mod_str], class_str) except (ImportError, ValueError, AttributeError), exc: - logging.debug(_('Inner Exception: %s'), exc) + LOG.info(_('Inner Exception: %s'), exc) raise exception.NotFound(_('Class %s cannot be found') % class_str) diff --git a/run_tests.py b/run_tests.py index 24786e8ad..bf12c62c6 100644 --- a/run_tests.py +++ b/run_tests.py @@ -26,8 +26,6 @@ from nose import config from nose import result from nose import core -from nova import log as logging - class NovaTestResult(result.TextTestResult): def __init__(self, *args, **kw): @@ -60,7 +58,8 @@ class NovaTestRunner(core.TextTestRunner): if __name__ == '__main__': - logging.basicConfig() + if os.path.exists("nova.sqlite"): + os.unlink("nova.sqlite") c = config.Config(stream=sys.stdout, env=os.environ, verbosity=3, -- cgit From 98c2ef77e6018c0a6233e32ace435fba488175f5 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sun, 20 Feb 2011 23:18:09 -0800 Subject: move the fake initialized into fake flags --- nova/test.py | 2 -- nova/tests/fake_flags.py | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/nova/test.py b/nova/test.py index a649b4c5f..a12cf9d32 100644 --- a/nova/test.py +++ b/nova/test.py @@ -60,8 +60,6 @@ class TestCase(unittest.TestCase): def setUp(self): """Run before each test method to initialize test environment""" super(TestCase, self).setUp() - # NOTE(vish): pretend like we've loaded flags from command line - flags.FlagValues.initialized = True # NOTE(vish): We need a better method for creating fixtures for tests # now that we have some required db setup for the system # to work properly. diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py index 68b14a46e..0a1011d86 100644 --- a/nova/tests/fake_flags.py +++ b/nova/tests/fake_flags.py @@ -42,4 +42,5 @@ FLAGS.verbose = True FLAGS.sql_connection = 'sqlite:///nova.sqlite' FLAGS.use_ipv6 = True FLAGS.logfile = 'run_tests.err' +# NOTE(vish): pretend like we've loaded flags from command line flags.FlagValues.initialized = True -- cgit From eaddb9ea529672549af28cab4aacd2f8243c62a3 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sun, 20 Feb 2011 23:36:36 -0800 Subject: remove extra references to logging.basicConfig --- bin/nova-api | 4 ++-- bin/nova-combined | 2 -- bin/nova-dhcpbridge | 1 - bin/nova-manage | 2 -- nova/log.py | 19 +++++++++++++------ nova/service.py | 1 - nova/twistd.py | 1 - nova/wsgi.py | 1 - 8 files changed, 15 insertions(+), 16 deletions(-) diff --git a/bin/nova-api b/bin/nova-api index 11176a021..8b3674880 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -39,7 +39,6 @@ from nova import log as logging from nova import version from nova import wsgi -logging.basicConfig() LOG = logging.getLogger('nova.api') LOG.setLevel(logging.DEBUG) @@ -71,7 +70,8 @@ def run_app(paste_config_file): return # NOTE(todd): redo logging config, verbose could be set in paste config - logging.basicConfig() + logging.root.setup_from_flags() + server = wsgi.Server() for app in apps: server.start(*app) diff --git a/bin/nova-combined b/bin/nova-combined index 913c866bf..5911d9016 100755 --- a/bin/nova-combined +++ b/bin/nova-combined @@ -49,7 +49,6 @@ FLAGS = flags.FLAGS if __name__ == '__main__': utils.default_flagfile() FLAGS(sys.argv) - logging.basicConfig() compute = service.Service.create(binary='nova-compute') network = service.Service.create(binary='nova-network') @@ -73,7 +72,6 @@ if __name__ == '__main__': apps.append((app, getattr(FLAGS, "%s_port" % api), getattr(FLAGS, "%s_host" % api))) if len(apps) > 0: - logging.basicConfig() server = wsgi.Server() for app in apps: server.start(*app) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index d38ba2543..e0e6af826 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -102,7 +102,6 @@ def main(): flagfile = os.environ.get('FLAGFILE', FLAGS.dhcpbridge_flagfile) utils.default_flagfile(flagfile) argv = FLAGS(sys.argv) - logging.basicConfig() interface = os.environ.get('DNSMASQ_INTERFACE', 'br0') if int(os.environ.get('TESTING', '0')): FLAGS.fake_rabbit = True diff --git a/bin/nova-manage b/bin/nova-manage index 6d67252b8..878a9afaa 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -77,7 +77,6 @@ from nova import crypto from nova import db from nova import exception from nova import flags -from nova import log as logging from nova import quota from nova import rpc from nova import utils @@ -87,7 +86,6 @@ from nova.cloudpipe import pipelib from nova.db import migration -logging.basicConfig() FLAGS = flags.FLAGS flags.DECLARE('fixed_range', 'nova.network.manager') flags.DECLARE('num_networks', 'nova.network.manager') diff --git a/nova/log.py b/nova/log.py index 12b695a41..0a863c921 100644 --- a/nova/log.py +++ b/nova/log.py @@ -160,20 +160,20 @@ class NovaLogger(logging.Logger): logging.Logger.__init__(self, name, level) self.initialized = False if flags.FlagValues.initialized: - self._setup_from_flags() + self.setup_from_flags() - def _setup_from_flags(self): + def setup_from_flags(self): """Setup logger from flags""" level_name = _get_level_from_flags(self.name) self.setLevel(globals()[level_name]) self.initialized = True if not logging.root.initialized: - logging.root._setup_from_flags() + logging.root.setup_from_flags() def isEnabledFor(self, level): """Reset level after flags have been loaded""" if not self.initialized and flags.FlagValues.initialized: - self._setup_from_flags() + self.setup_from_flags() return logging.Logger.isEnabledFor(self, level) def _log(self, level, msg, args, exc_info=None, extra=None, context=None): @@ -276,18 +276,24 @@ class NovaRootLogger(NovaLogger): NovaLogger.__init__(self, name, level) self.addHandler(_streamlog) - def _setup_from_flags(self): + def setup_from_flags(self): """Setup logger from flags""" global _filelog if FLAGS.use_syslog: self.addHandler(_syslog) + else: + self.removeHandler(_syslog) logpath = _get_log_file_path() if logpath: if not _filelog: _filelog = WatchedFileHandler(logpath) self.addHandler(_filelog) self.removeHandler(_streamlog) - return NovaLogger._setup_from_flags(self) + else: + self.removeHandler(_filelog) + self.addHandler(_streamlog) + + return NovaLogger.setup_from_flags(self) if not isinstance(logging.root, NovaRootLogger): @@ -296,6 +302,7 @@ if not isinstance(logging.root, NovaRootLogger): logging.root = NovaRootLogger("nova") NovaLogger.root = logging.root NovaLogger.manager.root = logging.root +root=logging.root def audit(msg, *args, **kwargs): diff --git a/nova/service.py b/nova/service.py index 59648adf2..02e86f6b3 100644 --- a/nova/service.py +++ b/nova/service.py @@ -215,7 +215,6 @@ class Service(object): def serve(*services): FLAGS(sys.argv) - logging.basicConfig() if not services: services = [Service.create()] diff --git a/nova/twistd.py b/nova/twistd.py index 60ff7879a..0e4db022c 100644 --- a/nova/twistd.py +++ b/nova/twistd.py @@ -258,7 +258,6 @@ def serve(filename): print 'usage: %s [options] [start|stop|restart]' % argv[0] sys.exit(1) - logging.basicConfig() logging.debug(_("Full set of FLAGS:")) for flag in FLAGS: logging.debug("%s : %s" % (flag, FLAGS.get(flag, None))) diff --git a/nova/wsgi.py b/nova/wsgi.py index e01cc1e1e..280baa80b 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -59,7 +59,6 @@ class Server(object): """Server class to manage multiple WSGI sockets and applications.""" def __init__(self, threads=1000): - logging.basicConfig() self.pool = eventlet.GreenPool(threads) def start(self, application, port, host='0.0.0.0', backlog=128): -- cgit From 2792e42a9c7da390b3db0b59b7dff357c440d3e5 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sun, 20 Feb 2011 23:45:43 -0800 Subject: clean up location of method --- nova/log.py | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/nova/log.py b/nova/log.py index 0a863c921..57a550a0b 100644 --- a/nova/log.py +++ b/nova/log.py @@ -117,21 +117,6 @@ def _get_binary_name(): return os.path.basename(inspect.stack()[-1][1]) -def _get_level_from_flags(name): - # if exactly "nova", or a child logger, honor the verbose flag - if (name == "nova" or name.startswith("nova.")) and FLAGS.verbose: - return 'DEBUG' - for pair in FLAGS.default_log_levels: - logger, _sep, level = pair.partition('=') - # NOTE(todd): if we set a.b, we want a.b.c to have the same level - # (but not a.bc, so we check the dot) - if name == logger: - return level - if name.startswith(logger) and name[len(logger)] == '.': - return level - return 'INFO' - - def _get_log_file_path(binary=None): if FLAGS.logfile: return FLAGS.logfile @@ -162,9 +147,24 @@ class NovaLogger(logging.Logger): if flags.FlagValues.initialized: self.setup_from_flags() + @staticmethod + def _get_level_from_flags(name): + # if exactly "nova", or a child logger, honor the verbose flag + if (name == "nova" or name.startswith("nova.")) and FLAGS.verbose: + return 'DEBUG' + for pair in FLAGS.default_log_levels: + logger, _sep, level = pair.partition('=') + # NOTE(todd): if we set a.b, we want a.b.c to have the same level + # (but not a.bc, so we check the dot) + if name == logger: + return level + if name.startswith(logger) and name[len(logger)] == '.': + return level + return 'INFO' + def setup_from_flags(self): """Setup logger from flags""" - level_name = _get_level_from_flags(self.name) + level_name = self._get_level_from_flags(self.name) self.setLevel(globals()[level_name]) self.initialized = True if not logging.root.initialized: @@ -302,7 +302,7 @@ if not isinstance(logging.root, NovaRootLogger): logging.root = NovaRootLogger("nova") NovaLogger.root = logging.root NovaLogger.manager.root = logging.root -root=logging.root +root = logging.root def audit(msg, *args, **kwargs): -- cgit From 7eab72b30cad9708e976f60e121569972b835b61 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 21 Feb 2011 00:15:49 -0800 Subject: get rid of initialized flag --- nova/flags.py | 4 ++-- nova/log.py | 16 ++++------------ nova/test.py | 3 +++ nova/tests/fake_flags.py | 2 -- nova/tests/test_log.py | 6 ++---- 5 files changed, 11 insertions(+), 20 deletions(-) diff --git a/nova/flags.py b/nova/flags.py index 2f3bdd675..72d123e21 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -29,7 +29,6 @@ import sys import gflags - class FlagValues(gflags.FlagValues): """Extension of gflags.FlagValues that allows undefined and runtime flags. @@ -93,7 +92,8 @@ class FlagValues(gflags.FlagValues): self.__dict__['__stored_argv'] = original_argv self.__dict__['__was_already_parsed'] = True self.ClearDirty() - FlagValues.initialized = True + from nova import log as logging + logging.reset() return args def Reset(self): diff --git a/nova/log.py b/nova/log.py index 57a550a0b..8d240782d 100644 --- a/nova/log.py +++ b/nova/log.py @@ -143,9 +143,7 @@ class NovaLogger(logging.Logger): """ def __init__(self, name, level=NOTSET): logging.Logger.__init__(self, name, level) - self.initialized = False - if flags.FlagValues.initialized: - self.setup_from_flags() + self.setup_from_flags() @staticmethod def _get_level_from_flags(name): @@ -166,15 +164,6 @@ class NovaLogger(logging.Logger): """Setup logger from flags""" level_name = self._get_level_from_flags(self.name) self.setLevel(globals()[level_name]) - self.initialized = True - if not logging.root.initialized: - logging.root.setup_from_flags() - - def isEnabledFor(self, level): - """Reset level after flags have been loaded""" - if not self.initialized and flags.FlagValues.initialized: - self.setup_from_flags() - return logging.Logger.isEnabledFor(self, level) def _log(self, level, msg, args, exc_info=None, extra=None, context=None): """Extract context from any log call""" @@ -304,6 +293,9 @@ if not isinstance(logging.root, NovaRootLogger): NovaLogger.manager.root = logging.root root = logging.root +def reset(): + root.setup_from_flags() + def audit(msg, *args, **kwargs): """Shortcut for logging to root log with sevrity 'AUDIT'.""" diff --git a/nova/test.py b/nova/test.py index a12cf9d32..8022396cd 100644 --- a/nova/test.py +++ b/nova/test.py @@ -32,9 +32,12 @@ from nova import context from nova import db from nova import fakerabbit from nova import flags +from nova import log as logging from nova import rpc from nova.network import manager as network_manager + from nova.tests import fake_flags +logging.reset() FLAGS = flags.FLAGS diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py index 0a1011d86..59839b090 100644 --- a/nova/tests/fake_flags.py +++ b/nova/tests/fake_flags.py @@ -42,5 +42,3 @@ FLAGS.verbose = True FLAGS.sql_connection = 'sqlite:///nova.sqlite' FLAGS.use_ipv6 = True FLAGS.logfile = 'run_tests.err' -# NOTE(vish): pretend like we've loaded flags from command line -flags.FlagValues.initialized = True diff --git a/nova/tests/test_log.py b/nova/tests/test_log.py index ada8d0a56..122351ff6 100644 --- a/nova/tests/test_log.py +++ b/nova/tests/test_log.py @@ -46,14 +46,12 @@ class RootLoggerTestCase(test.TestCase): def test_will_be_verbose_if_verbose_flag_set(self): self.flags(verbose=True) - self.log.initialized = False - log.audit("foo", context=_fake_context()) + log.reset() self.assertEqual(log.DEBUG, self.log.level) def test_will_not_be_verbose_if_verbose_flag_not_set(self): self.flags(verbose=False) - self.log.initialized = False - log.audit("foo", context=_fake_context()) + log.reset() self.assertEqual(log.INFO, self.log.level) -- cgit From 86b202f7397b80358346e1b2a9894af81faa4f4b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 21 Feb 2011 00:17:58 -0800 Subject: fix nova-api as well --- bin/nova-api | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/nova-api b/bin/nova-api index 8b3674880..5937f7744 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -70,7 +70,7 @@ def run_app(paste_config_file): return # NOTE(todd): redo logging config, verbose could be set in paste config - logging.root.setup_from_flags() + logging.reset() server = wsgi.Server() for app in apps: -- cgit From f9af5309cf50b3b1a4ef9799c071cbaa6b1b304f Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 21 Feb 2011 00:22:45 -0800 Subject: removed extra comments and initialized from flags --- nova/flags.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/nova/flags.py b/nova/flags.py index 72d123e21..e2f7960ec 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -37,7 +37,6 @@ class FlagValues(gflags.FlagValues): defined after the initial parsing. """ - initialized = False def __init__(self, extra_context=None): gflags.FlagValues.__init__(self) @@ -45,8 +44,6 @@ class FlagValues(gflags.FlagValues): self.__dict__['__was_already_parsed'] = False self.__dict__['__stored_argv'] = [] self.__dict__['__extra_context'] = extra_context - # NOTE(vish): force a pseudo flag to keep track of whether - # flags have been parsed already def __call__(self, argv): # We're doing some hacky stuff here so that we don't have to copy -- cgit From f28ed7d95afd17e55e1db25a75e065f9da0f06e6 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 21 Feb 2011 00:24:35 -0800 Subject: add docstring to reset method --- nova/log.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/log.py b/nova/log.py index 8d240782d..94eeecce0 100644 --- a/nova/log.py +++ b/nova/log.py @@ -294,6 +294,7 @@ if not isinstance(logging.root, NovaRootLogger): root = logging.root def reset(): + """Resets logging handlers. Should be called if FLAGS changes.""" root.setup_from_flags() -- cgit From bfba5b2cf8ade746d74485bd76f9d60238ccb2ea Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 21 Feb 2011 00:48:33 -0800 Subject: reset all loggers on flag change, not just root --- nova/log.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nova/log.py b/nova/log.py index 94eeecce0..2b43f7311 100644 --- a/nova/log.py +++ b/nova/log.py @@ -295,6 +295,9 @@ root = logging.root def reset(): """Resets logging handlers. Should be called if FLAGS changes.""" + for logger in logging.Logger.manager.loggerDict.itervalues(): + if isinstance(logger, NovaLogger): + logger.setup_from_flags() root.setup_from_flags() -- cgit From e773c16e1bce0e00b269394d1ed20d15884827ff Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 21 Feb 2011 01:07:46 -0800 Subject: simplify logic for parsing log level flags --- nova/log.py | 25 ++++++++----------------- 1 file changed, 8 insertions(+), 17 deletions(-) diff --git a/nova/log.py b/nova/log.py index 2b43f7311..0cd42d00e 100644 --- a/nova/log.py +++ b/nova/log.py @@ -145,25 +145,14 @@ class NovaLogger(logging.Logger): logging.Logger.__init__(self, name, level) self.setup_from_flags() - @staticmethod - def _get_level_from_flags(name): - # if exactly "nova", or a child logger, honor the verbose flag - if (name == "nova" or name.startswith("nova.")) and FLAGS.verbose: - return 'DEBUG' + def setup_from_flags(self): + """Setup logger from flags""" for pair in FLAGS.default_log_levels: logger, _sep, level = pair.partition('=') # NOTE(todd): if we set a.b, we want a.b.c to have the same level # (but not a.bc, so we check the dot) - if name == logger: - return level - if name.startswith(logger) and name[len(logger)] == '.': - return level - return 'INFO' - - def setup_from_flags(self): - """Setup logger from flags""" - level_name = self._get_level_from_flags(self.name) - self.setLevel(globals()[level_name]) + if self.name == logger or self.name.startswith("%s." % logger): + self.setLevel(globals()[level]) def _log(self, level, msg, args, exc_info=None, extra=None, context=None): """Extract context from any log call""" @@ -281,8 +270,10 @@ class NovaRootLogger(NovaLogger): else: self.removeHandler(_filelog) self.addHandler(_streamlog) - - return NovaLogger.setup_from_flags(self) + if FLAGS.verbose: + self.setLevel(DEBUG) + else: + self.setLevel(INFO) if not isinstance(logging.root, NovaRootLogger): -- cgit From 753d3a6915ad8387ea29ad1a7fb4aed74c4b71fd Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 21 Feb 2011 01:26:15 -0800 Subject: move exception hook into appropriate location and remove extra stuff from module namespace --- bin/nova-manage | 1 - nova/log.py | 45 +++++++++++++++++++++++---------------------- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 878a9afaa..861798717 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -85,7 +85,6 @@ from nova.auth import manager from nova.cloudpipe import pipelib from nova.db import migration - FLAGS = flags.FLAGS flags.DECLARE('fixed_range', 'nova.network.manager') flags.DECLARE('num_networks', 'nova.network.manager') diff --git a/nova/log.py b/nova/log.py index 0cd42d00e..5ffb52cde 100644 --- a/nova/log.py +++ b/nova/log.py @@ -130,9 +130,6 @@ def basicConfig(): logging.basicConfig = basicConfig -_syslog = SysLogHandler(address='/dev/log') -_filelog = None -_streamlog = StreamHandler() class NovaLogger(logging.Logger): @@ -193,16 +190,6 @@ class NovaLogger(logging.Logger): self.error(message, **kwargs) -def handle_exception(type, value, tb): - if len(logging.root.handlers) == 0: - logging.root.addHandler(_streamlog) - logging.root.critical(str(value), exc_info=(type, value, tb)) - - -sys.excepthook = handle_exception -logging.setLoggerClass(NovaLogger) - - class NovaFormatter(logging.Formatter): """ A nova.context.RequestContext aware formatter configured through flags. @@ -251,25 +238,30 @@ _formatter = NovaFormatter() class NovaRootLogger(NovaLogger): def __init__(self, name, level=NOTSET): + self.logpath = None + self.filelog = None + self.syslog = SysLogHandler(address='/dev/log') + self.streamlog = StreamHandler() NovaLogger.__init__(self, name, level) - self.addHandler(_streamlog) def setup_from_flags(self): """Setup logger from flags""" global _filelog if FLAGS.use_syslog: - self.addHandler(_syslog) + self.addHandler(self.syslog) else: - self.removeHandler(_syslog) + self.removeHandler(self.syslog) logpath = _get_log_file_path() if logpath: - if not _filelog: - _filelog = WatchedFileHandler(logpath) - self.addHandler(_filelog) - self.removeHandler(_streamlog) + self.removeHandler(self.streamlog) + if logpath != self.logpath: + self.removeHandler(self.filelog) + self.filelog = WatchedFileHandler(logpath) + self.addHandler(self.filelog) + self.logpath = logpath else: - self.removeHandler(_filelog) - self.addHandler(_streamlog) + self.removeHandler(self.filelog) + self.addHandler(self.streamlog) if FLAGS.verbose: self.setLevel(DEBUG) else: @@ -284,6 +276,15 @@ if not isinstance(logging.root, NovaRootLogger): NovaLogger.manager.root = logging.root root = logging.root + +def handle_exception(type, value, tb): + root.critical(str(value), exc_info=(type, value, tb)) + + +sys.excepthook = handle_exception +logging.setLoggerClass(NovaLogger) + + def reset(): """Resets logging handlers. Should be called if FLAGS changes.""" for logger in logging.Logger.manager.loggerDict.itervalues(): -- cgit From eebb9bb14edb6fd1d218b3aa18142ee739ddd715 Mon Sep 17 00:00:00 2001 From: Christian Berendt Date: Mon, 21 Feb 2011 16:16:21 +0100 Subject: introducing a new flag timeout_nbd for manually setting the time in seconds for waiting for an upcoming NBD device --- nova/virt/disk.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nova/virt/disk.py b/nova/virt/disk.py index c5565abfa..cb639a102 100644 --- a/nova/virt/disk.py +++ b/nova/virt/disk.py @@ -38,6 +38,8 @@ flags.DEFINE_integer('minimum_root_size', 1024 * 1024 * 1024 * 10, 'minimum size in bytes of root partition') flags.DEFINE_integer('block_size', 1024 * 1024 * 256, 'block_size to use for dd') +flags.DEFINE_integer('timeout_nbd', 10, + 'time to wait for a NBD device coming up') def extend(image, size): @@ -117,7 +119,7 @@ def _link_device(image, nbd): utils.execute('sudo qemu-nbd -c %s %s' % (device, image)) # NOTE(vish): this forks into another process, so give it a chance # to set up before continuuing - for i in xrange(10): + for i in xrange(FLAGS.timeout_nbd): if os.path.exists("/sys/block/%s/pid" % os.path.basename(device)): return device time.sleep(1) -- cgit From 8b30a903a4d2c5c6ffe44e58b8531ddc889492c0 Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Mon, 21 Feb 2011 13:10:45 -0500 Subject: PEP8 errors and remove check in authors file for nova-core, since nova-core owns the translation export branch --- nova/tests/test_misc.py | 2 ++ setup.py | 6 ++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/nova/tests/test_misc.py b/nova/tests/test_misc.py index 33c1777d5..7a4d512a4 100644 --- a/nova/tests/test_misc.py +++ b/nova/tests/test_misc.py @@ -46,6 +46,8 @@ class ProjectTestCase(test.TestCase): missing = set() for contributor in contributors: + if contributor == 'nova-core': + pass if not contributor in authors_file: missing.add(contributor) diff --git a/setup.py b/setup.py index 89add02c3..4ab8f386b 100644 --- a/setup.py +++ b/setup.py @@ -26,9 +26,11 @@ from setuptools.command.sdist import sdist try: import DistUtilsExtra.auto except ImportError: - print >> sys.stderr, 'To build nova you need https://launchpad.net/python-distutils-extra' + print >> sys.stderr, 'To build nova you need '\ + 'https://launchpad.net/python-distutils-extra' sys.exit(1) -assert DistUtilsExtra.auto.__version__ >= '2.18', 'needs DistUtilsExtra.auto >= 2.18' +assert DistUtilsExtra.auto.__version__ >= '2.18',\ + 'needs DistUtilsExtra.auto >= 2.18' from nova.utils import parse_mailmap, str_dict_replace -- cgit From 5e02518d8ae5e7f57033e28ae85067d28c1ff3d8 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 21 Feb 2011 10:52:28 -0800 Subject: reset to notset if level isn't in flags --- nova/log.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/nova/log.py b/nova/log.py index 5ffb52cde..61a279882 100644 --- a/nova/log.py +++ b/nova/log.py @@ -144,12 +144,14 @@ class NovaLogger(logging.Logger): def setup_from_flags(self): """Setup logger from flags""" + level = NOTSET for pair in FLAGS.default_log_levels: - logger, _sep, level = pair.partition('=') + logger, _sep, level_name = pair.partition('=') # NOTE(todd): if we set a.b, we want a.b.c to have the same level # (but not a.bc, so we check the dot) if self.name == logger or self.name.startswith("%s." % logger): - self.setLevel(globals()[level]) + level = globals()[level_name] + self.setLevel(level) def _log(self, level, msg, args, exc_info=None, extra=None, context=None): """Extract context from any log call""" -- cgit From cf006cce87ce18cdf9d7705fcc2be05b912d4c3f Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Mon, 21 Feb 2011 13:55:25 -0500 Subject: Duh, continue skips iteration, not pass. #iamanidiot --- nova/tests/test_misc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/test_misc.py b/nova/tests/test_misc.py index 7a4d512a4..e6da6112a 100644 --- a/nova/tests/test_misc.py +++ b/nova/tests/test_misc.py @@ -47,7 +47,7 @@ class ProjectTestCase(test.TestCase): missing = set() for contributor in contributors: if contributor == 'nova-core': - pass + continue if not contributor in authors_file: missing.add(contributor) -- cgit From 8388144744849265b46d26735da01a11e35990b0 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 21 Feb 2011 11:07:50 -0800 Subject: cleanup from review --- nova/log.py | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/nova/log.py b/nova/log.py index 61a279882..3a48c97ff 100644 --- a/nova/log.py +++ b/nova/log.py @@ -125,13 +125,6 @@ def _get_log_file_path(binary=None): return '%s.log' % (os.path.join(FLAGS.logdir, binary),) -def basicConfig(): - pass - - -logging.basicConfig = basicConfig - - class NovaLogger(logging.Logger): """ NovaLogger manages request context and formatting. @@ -176,7 +169,7 @@ class NovaLogger(logging.Logger): """Logging.exception doesn't handle kwargs, so breaks context""" if not kwargs.get('exc_info'): kwargs['exc_info'] = 1 - return self.error(msg, *args, **kwargs) + self.error(msg, *args, **kwargs) # NOTE(todd): does this really go here, or in _log ? extra = kwargs.get('extra') if not extra: @@ -271,11 +264,16 @@ class NovaRootLogger(NovaLogger): if not isinstance(logging.root, NovaRootLogger): + logging._acquireLock() for handler in logging.root.handlers: logging.root.removeHandler(handler) logging.root = NovaRootLogger("nova") + for logger in NovaLogger.manager.loggerDict.itervalues(): + logger.root = logging.root NovaLogger.root = logging.root NovaLogger.manager.root = logging.root + NovaLogger.manager.loggerDict["nova"] = logging.root + logging._releaseLock() root = logging.root @@ -289,14 +287,11 @@ logging.setLoggerClass(NovaLogger) def reset(): """Resets logging handlers. Should be called if FLAGS changes.""" - for logger in logging.Logger.manager.loggerDict.itervalues(): + for logger in NovaLogger.manager.loggerDict.itervalues(): if isinstance(logger, NovaLogger): logger.setup_from_flags() - root.setup_from_flags() def audit(msg, *args, **kwargs): """Shortcut for logging to root log with sevrity 'AUDIT'.""" - if len(logging.root.handlers) == 0: - basicConfig() logging.root.log(AUDIT, msg, *args, **kwargs) -- cgit From c7d83e26f7d6388857b4db4538602395b688aa7a Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 21 Feb 2011 11:42:46 -0800 Subject: use tests.sqlite so it doesn't conflict with running db --- nova/tests/fake_flags.py | 4 ++-- run_tests.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py index 59839b090..575fefff6 100644 --- a/nova/tests/fake_flags.py +++ b/nova/tests/fake_flags.py @@ -39,6 +39,6 @@ FLAGS.num_shelves = 2 FLAGS.blades_per_shelf = 4 FLAGS.iscsi_num_targets = 8 FLAGS.verbose = True -FLAGS.sql_connection = 'sqlite:///nova.sqlite' +FLAGS.sql_connection = 'sqlite:///tests.sqlite' FLAGS.use_ipv6 = True -FLAGS.logfile = 'run_tests.err' +FLAGS.logfile = 'tests.log' diff --git a/run_tests.py b/run_tests.py index bf12c62c6..274dc4a97 100644 --- a/run_tests.py +++ b/run_tests.py @@ -58,8 +58,8 @@ class NovaTestRunner(core.TextTestRunner): if __name__ == '__main__': - if os.path.exists("nova.sqlite"): - os.unlink("nova.sqlite") + if os.path.exists("tests.sqlite"): + os.unlink("tests.sqlite") c = config.Config(stream=sys.stdout, env=os.environ, verbosity=3, -- cgit From 02e196192ea1f8be22c31828266b177d14d123cd Mon Sep 17 00:00:00 2001 From: Ken Pepple Date: Mon, 21 Feb 2011 12:41:15 -0800 Subject: make sure that ec2 response times are xs:dateTime parsable --- nova/api/ec2/apirequest.py | 8 +++++++- nova/tests/test_api.py | 23 +++++++++++++++++++++++ 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py index 00b527d62..2b1acba5a 100644 --- a/nova/api/ec2/apirequest.py +++ b/nova/api/ec2/apirequest.py @@ -46,6 +46,11 @@ def _underscore_to_xmlcase(str): return res[:1].lower() + res[1:] +def _database_to_isoformat(datetimeobj): + """Return a xs:dateTime parsable string from datatime""" + return datetimeobj.strftime("%Y-%m-%dT%H:%M:%SZ") + + def _try_convert(value): """Return a non-string if possible""" if value == 'None': @@ -173,7 +178,8 @@ class APIRequest(object): elif isinstance(data, bool): data_el.appendChild(xml.createTextNode(str(data).lower())) elif isinstance(data, datetime.datetime): - data_el.appendChild(xml.createTextNode(data.isoformat())) + data_el.appendChild( + xml.createTextNode(_database_to_isoformat(data))) elif data != None: data_el.appendChild(xml.createTextNode(str(data))) diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py index fa27825cd..d5c54a1c3 100644 --- a/nova/tests/test_api.py +++ b/nova/tests/test_api.py @@ -20,6 +20,7 @@ import boto from boto.ec2 import regioninfo +import datetime import httplib import random import StringIO @@ -127,6 +128,28 @@ class ApiEc2TestCase(test.TestCase): self.ec2.new_http_connection(host, is_secure).AndReturn(self.http) return self.http + def test_return_valid_isoformat(self): + """ + Ensure that the ec2 api returns datetime in xs:dateTime + (which apparently isn't datetime.isoformat()) + NOTE(ken-pepple): https://bugs.launchpad.net/nova/+bug/721297 + """ + conv = apirequest._database_to_isoformat + # sqlite database representation with microseconds + time_to_convert = datetime.datetime.strptime( + "2011-02-21 20:14:10.634276", + "%Y-%m-%d %H:%M:%S.%f") + self.assertEqual( + conv(time_to_convert), + '2011-02-21T20:14:10Z') + # mysqlite database representation + time_to_convert = datetime.datetime.strptime( + "2011-02-21 19:56:18", + "%Y-%m-%d %H:%M:%S") + self.assertEqual( + conv(time_to_convert), + '2011-02-21T19:56:18Z') + def test_xmlns_version_matches_request_version(self): self.expect_http(api_version='2010-10-30') self.mox.ReplayAll() -- cgit From 29644fe5a9cf47ae33af31b848c0edc4567f3c09 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 21 Feb 2011 13:46:41 -0800 Subject: switch to explicit call to logging.setup() --- bin/nova-ajax-console-proxy | 2 +- bin/nova-api | 2 +- bin/nova-combined | 1 + bin/nova-compute | 5 ++++ bin/nova-console | 4 ++++ bin/nova-dhcpbridge | 3 ++- bin/nova-direct-api | 2 ++ bin/nova-import-canonical-imagestore | 2 ++ bin/nova-instancemonitor | 3 --- bin/nova-manage | 2 ++ bin/nova-network | 4 ++++ bin/nova-scheduler | 4 ++++ bin/nova-volume | 4 ++++ nova/log.py | 44 ++++++++++++++++++++---------------- nova/service.py | 2 -- nova/twistd.py | 1 + nova/utils.py | 2 +- run_tests.py | 2 ++ 18 files changed, 61 insertions(+), 28 deletions(-) diff --git a/bin/nova-ajax-console-proxy b/bin/nova-ajax-console-proxy index 2bc407658..392b328b1 100755 --- a/bin/nova-ajax-console-proxy +++ b/bin/nova-ajax-console-proxy @@ -25,7 +25,6 @@ from eventlet.green import urllib2 import exceptions import gettext -import logging import os import sys import time @@ -130,6 +129,7 @@ class AjaxConsoleProxy(object): if __name__ == '__main__': utils.default_flagfile() FLAGS(sys.argv) + logging.setup() server = wsgi.Server() acp = AjaxConsoleProxy() acp.register_listeners() diff --git a/bin/nova-api b/bin/nova-api index 5937f7744..61a4c7402 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -40,7 +40,6 @@ from nova import version from nova import wsgi LOG = logging.getLogger('nova.api') -LOG.setLevel(logging.DEBUG) FLAGS = flags.FLAGS @@ -80,6 +79,7 @@ def run_app(paste_config_file): if __name__ == '__main__': FLAGS(sys.argv) + logging.setup() LOG.audit(_("Starting nova-api node (version %s)"), version.version_string_with_vcs()) conf = wsgi.paste_config_file('nova-api.conf') diff --git a/bin/nova-combined b/bin/nova-combined index 5911d9016..6ae8400d1 100755 --- a/bin/nova-combined +++ b/bin/nova-combined @@ -49,6 +49,7 @@ FLAGS = flags.FLAGS if __name__ == '__main__': utils.default_flagfile() FLAGS(sys.argv) + logging.setup() compute = service.Service.create(binary='nova-compute') network = service.Service.create(binary='nova-network') diff --git a/bin/nova-compute b/bin/nova-compute index d2d352da2..e412598c2 100755 --- a/bin/nova-compute +++ b/bin/nova-compute @@ -36,10 +36,15 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): gettext.install('nova', unicode=1) +from nova import flags +from nova import log as logging from nova import service from nova import utils if __name__ == '__main__': utils.default_flagfile() + flags.FLAGS(sys.argv) + logging.setup() + service.serve() service.wait() diff --git a/bin/nova-console b/bin/nova-console index 802cc80b6..40608b995 100755 --- a/bin/nova-console +++ b/bin/nova-console @@ -35,10 +35,14 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): gettext.install('nova', unicode=1) +from nova import flags +from nova import log as logging from nova import service from nova import utils if __name__ == '__main__': utils.default_flagfile() + flags.FLAGS(sys.argv) + logging.setup() service.serve() service.wait() diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index e0e6af826..eda2dc072 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -102,6 +102,7 @@ def main(): flagfile = os.environ.get('FLAGFILE', FLAGS.dhcpbridge_flagfile) utils.default_flagfile(flagfile) argv = FLAGS(sys.argv) + logging.setup() interface = os.environ.get('DNSMASQ_INTERFACE', 'br0') if int(os.environ.get('TESTING', '0')): FLAGS.fake_rabbit = True @@ -112,7 +113,7 @@ def main(): FLAGS.num_networks = 5 path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', - 'nova.sqlite')) + 'tests.sqlite')) FLAGS.sql_connection = 'sqlite:///%s' % path action = argv[1] if action in ['add', 'del', 'old']: diff --git a/bin/nova-direct-api b/bin/nova-direct-api index 173b39bdb..6c63bd26b 100755 --- a/bin/nova-direct-api +++ b/bin/nova-direct-api @@ -35,6 +35,7 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): gettext.install('nova', unicode=1) from nova import flags +from nova import log as logging from nova import utils from nova import wsgi from nova.api import direct @@ -48,6 +49,7 @@ flags.DEFINE_string('direct_host', '0.0.0.0', 'Direct API host') if __name__ == '__main__': utils.default_flagfile() FLAGS(sys.argv) + logging.setup() direct.register_service('compute', compute_api.API()) direct.register_service('reflect', direct.Reflection()) diff --git a/bin/nova-import-canonical-imagestore b/bin/nova-import-canonical-imagestore index 036b41e48..404ae37f4 100755 --- a/bin/nova-import-canonical-imagestore +++ b/bin/nova-import-canonical-imagestore @@ -41,6 +41,7 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): gettext.install('nova', unicode=1) from nova import flags +from nova import log as logging from nova import utils from nova.objectstore import image @@ -92,6 +93,7 @@ def main(): """Main entry point.""" utils.default_flagfile() argv = FLAGS(sys.argv) + logging.setup() images = get_images() if len(argv) == 2: diff --git a/bin/nova-instancemonitor b/bin/nova-instancemonitor index 7dca02014..24cc9fd23 100755 --- a/bin/nova-instancemonitor +++ b/bin/nova-instancemonitor @@ -41,9 +41,6 @@ from nova import utils from nova import twistd from nova.compute import monitor -# TODO(todd): shouldn't this be done with flags? And what about verbose? -logging.getLogger('boto').setLevel(logging.WARN) - LOG = logging.getLogger('nova.instancemonitor') diff --git a/bin/nova-manage b/bin/nova-manage index 861798717..5189de0e1 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -77,6 +77,7 @@ from nova import crypto from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import quota from nova import rpc from nova import utils @@ -707,6 +708,7 @@ def main(): """Parse options and call the appropriate class/method.""" utils.default_flagfile() argv = FLAGS(sys.argv) + logging.setup() script_name = argv.pop(0) if len(argv) < 1: diff --git a/bin/nova-network b/bin/nova-network index 0143846a7..101761ef7 100755 --- a/bin/nova-network +++ b/bin/nova-network @@ -36,10 +36,14 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): gettext.install('nova', unicode=1) +from nova import flags +from nova import log as logging from nova import service from nova import utils if __name__ == '__main__': utils.default_flagfile() + flags.FLAGS(sys.argv) + logging.setup() service.serve() service.wait() diff --git a/bin/nova-scheduler b/bin/nova-scheduler index f4c0eaed6..0c205a80f 100755 --- a/bin/nova-scheduler +++ b/bin/nova-scheduler @@ -36,10 +36,14 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): gettext.install('nova', unicode=1) +from nova import flags +from nova import log as logging from nova import service from nova import utils if __name__ == '__main__': utils.default_flagfile() + flags.FLAGS(sys.argv) + logging.setup() service.serve() service.wait() diff --git a/bin/nova-volume b/bin/nova-volume index ad3ddc405..8dcdbc500 100755 --- a/bin/nova-volume +++ b/bin/nova-volume @@ -36,10 +36,14 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): gettext.install('nova', unicode=1) +from nova import flags +from nova import log as logging from nova import service from nova import utils if __name__ == '__main__': utils.default_flagfile() + flags.FLAGS(sys.argv) + logging.setup() service.serve() service.wait() diff --git a/nova/log.py b/nova/log.py index 3a48c97ff..10c14d74b 100644 --- a/nova/log.py +++ b/nova/log.py @@ -65,6 +65,7 @@ flags.DEFINE_string('logging_exception_prefix', flags.DEFINE_list('default_log_levels', ['amqplib=WARN', 'sqlalchemy=WARN', + 'boto=WARN', 'eventlet.wsgi.server=WARN'], 'list of logger=LEVEL pairs') @@ -263,26 +264,8 @@ class NovaRootLogger(NovaLogger): self.setLevel(INFO) -if not isinstance(logging.root, NovaRootLogger): - logging._acquireLock() - for handler in logging.root.handlers: - logging.root.removeHandler(handler) - logging.root = NovaRootLogger("nova") - for logger in NovaLogger.manager.loggerDict.itervalues(): - logger.root = logging.root - NovaLogger.root = logging.root - NovaLogger.manager.root = logging.root - NovaLogger.manager.loggerDict["nova"] = logging.root - logging._releaseLock() -root = logging.root - - def handle_exception(type, value, tb): - root.critical(str(value), exc_info=(type, value, tb)) - - -sys.excepthook = handle_exception -logging.setLoggerClass(NovaLogger) + logging.root.critical(str(value), exc_info=(type, value, tb)) def reset(): @@ -292,6 +275,29 @@ def reset(): logger.setup_from_flags() +def setup(): + """Setup nova logging.""" + if not isinstance(logging.root, NovaRootLogger): + logging._acquireLock() + for handler in logging.root.handlers: + logging.root.removeHandler(handler) + logging.root = NovaRootLogger("nova") + NovaLogger.root = logging.root + NovaLogger.manager.root = logging.root + for logger in NovaLogger.manager.loggerDict.itervalues(): + logger.root = logging.root + if isinstance(logger, logging.Logger): + NovaLogger.manager._fixupParents(logger) + NovaLogger.manager.loggerDict["nova"] = logging.root + logging._releaseLock() + sys.excepthook = handle_exception + reset() + + +root = logging.root +logging.setLoggerClass(NovaLogger) + + def audit(msg, *args, **kwargs): """Shortcut for logging to root log with sevrity 'AUDIT'.""" logging.root.log(AUDIT, msg, *args, **kwargs) diff --git a/nova/service.py b/nova/service.py index 02e86f6b3..ddb4d7791 100644 --- a/nova/service.py +++ b/nova/service.py @@ -214,8 +214,6 @@ class Service(object): def serve(*services): - FLAGS(sys.argv) - if not services: services = [Service.create()] diff --git a/nova/twistd.py b/nova/twistd.py index 0e4db022c..c07ed991f 100644 --- a/nova/twistd.py +++ b/nova/twistd.py @@ -148,6 +148,7 @@ def WrapTwistedOptions(wrapped): options.insert(0, '') args = FLAGS(options) + logging.setup() argv = args[1:] # ignore subcommands diff --git a/nova/utils.py b/nova/utils.py index c2fd5f2ee..2a3acf042 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -55,7 +55,7 @@ def import_class(import_str): __import__(mod_str) return getattr(sys.modules[mod_str], class_str) except (ImportError, ValueError, AttributeError), exc: - LOG.info(_('Inner Exception: %s'), exc) + LOG.debug(_('Inner Exception: %s'), exc) raise exception.NotFound(_('Class %s cannot be found') % class_str) diff --git a/run_tests.py b/run_tests.py index 274dc4a97..82345433a 100644 --- a/run_tests.py +++ b/run_tests.py @@ -26,6 +26,7 @@ from nose import config from nose import result from nose import core +from nova import log as logging class NovaTestResult(result.TextTestResult): def __init__(self, *args, **kw): @@ -60,6 +61,7 @@ class NovaTestRunner(core.TextTestRunner): if __name__ == '__main__': if os.path.exists("tests.sqlite"): os.unlink("tests.sqlite") + logging.setup() c = config.Config(stream=sys.stdout, env=os.environ, verbosity=3, -- cgit From dbb071c8424871b6985c6b470d9eff522cdda660 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 21 Feb 2011 13:59:46 -0800 Subject: fix pep8 and remove extra reference to reset --- bin/nova-compute | 1 - nova/flags.py | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/bin/nova-compute b/bin/nova-compute index e412598c2..95fa393b1 100755 --- a/bin/nova-compute +++ b/bin/nova-compute @@ -45,6 +45,5 @@ if __name__ == '__main__': utils.default_flagfile() flags.FLAGS(sys.argv) logging.setup() - service.serve() service.wait() diff --git a/nova/flags.py b/nova/flags.py index e2f7960ec..f64a62da9 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -29,6 +29,7 @@ import sys import gflags + class FlagValues(gflags.FlagValues): """Extension of gflags.FlagValues that allows undefined and runtime flags. @@ -89,8 +90,6 @@ class FlagValues(gflags.FlagValues): self.__dict__['__stored_argv'] = original_argv self.__dict__['__was_already_parsed'] = True self.ClearDirty() - from nova import log as logging - logging.reset() return args def Reset(self): -- cgit From e5d030863eae7f997867350916adf0c721625d26 Mon Sep 17 00:00:00 2001 From: termie Date: Mon, 21 Feb 2011 14:55:06 -0800 Subject: add a test for rpc consumer isolation --- nova/tests/test_test.py | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 nova/tests/test_test.py diff --git a/nova/tests/test_test.py b/nova/tests/test_test.py new file mode 100644 index 000000000..c1d96a148 --- /dev/null +++ b/nova/tests/test_test.py @@ -0,0 +1,37 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Tests for the testing base code.""" + +from nova import rpc +from nova import test + + +class IsolationTestCase(test.TestCase): + """Ensure that things are cleaned up after failed tests. + + These tests don't really do much here, but if isolation fails a bunch + of other tests should fail. + + """ + def test_rpc_consumer_isolation(self): + connection = rpc.Connection.instance(new=True) + consumer = rpc.TopicConsumer(connection, topic='compute') + consumer.register_callback( + lambda x, y: self.fail('I should never be called')) + consumer.attach_to_eventlet() -- cgit From ab73d72d33369d47012437c022a0679fa4ca3b38 Mon Sep 17 00:00:00 2001 From: termie Date: Mon, 21 Feb 2011 14:55:06 -0800 Subject: add a start_service method to our test baseclass --- nova/service.py | 7 +++ nova/test.py | 53 +++++++++++++++++------ nova/tests/test_scheduler.py | 100 +++++++------------------------------------ nova/tests/test_test.py | 3 ++ 4 files changed, 67 insertions(+), 96 deletions(-) diff --git a/nova/service.py b/nova/service.py index 59648adf2..45286cf94 100644 --- a/nova/service.py +++ b/nova/service.py @@ -181,6 +181,13 @@ class Service(object): pass self.timers = [] + def wait(self): + for x in self.timers: + try: + x.wait() + except Exception: + pass + def periodic_tasks(self): """Tasks to be run at a periodic interval""" self.manager.periodic_tasks(context.get_admin_context()) diff --git a/nova/test.py b/nova/test.py index a12cf9d32..9bff401a1 100644 --- a/nova/test.py +++ b/nova/test.py @@ -23,6 +23,7 @@ and some black magic for inline callbacks. """ import datetime +import uuid import unittest import mox @@ -33,6 +34,7 @@ from nova import db from nova import fakerabbit from nova import flags from nova import rpc +from nova import service from nova.network import manager as network_manager from nova.tests import fake_flags @@ -80,6 +82,7 @@ class TestCase(unittest.TestCase): self.stubs = stubout.StubOutForTesting() self.flag_overrides = {} self.injected = [] + self._services = [] self._monkey_patch_attach() self._original_flags = FLAGS.FlagValuesDict() @@ -91,25 +94,42 @@ class TestCase(unittest.TestCase): self.stubs.UnsetAll() self.stubs.SmartUnsetAll() self.mox.VerifyAll() - # NOTE(vish): Clean up any ips associated during the test. - ctxt = context.get_admin_context() - db.fixed_ip_disassociate_all_by_timeout(ctxt, FLAGS.host, - self.start) - db.network_disassociate_all(ctxt) + super(TestCase, self).tearDown() + finally: + try: + # Clean up any ips associated during the test. + ctxt = context.get_admin_context() + db.fixed_ip_disassociate_all_by_timeout(ctxt, FLAGS.host, + self.start) + db.network_disassociate_all(ctxt) + + db.security_group_destroy_all(ctxt) + except Exception: + pass + + # Clean out fake_rabbit's queue if we used it + if FLAGS.fake_rabbit: + fakerabbit.reset_all() + + # Reset any overriden flags + self.reset_flags() + + # Reset our monkey-patches rpc.Consumer.attach_to_eventlet = self.originalAttach + + # Stop any timers for x in self.injected: try: x.stop() except AssertionError: pass - if FLAGS.fake_rabbit: - fakerabbit.reset_all() - - db.security_group_destroy_all(ctxt) - super(TestCase, self).tearDown() - finally: - self.reset_flags() + # Kill any services + for x in self._services: + try: + x.kill() + except Exception: + pass def flags(self, **kw): """Override flag variables for a test""" @@ -127,6 +147,15 @@ class TestCase(unittest.TestCase): for k, v in self._original_flags.iteritems(): setattr(FLAGS, k, v) + def start_service(self, name=None, host=None, **kwargs): + host = host and host or uuid.uuid4().hex + kwargs.setdefault('host', host) + kwargs.setdefault('binary', 'nova-%s' % name) + svc = service.Service.create(**kwargs) + svc.start() + self._services.append(svc) + return svc + def _monkey_patch_attach(self): self.originalAttach = rpc.Consumer.attach_to_eventlet diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index 9d458244b..250170072 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -176,18 +176,8 @@ class SimpleDriverTestCase(test.TestCase): def test_doesnt_report_disabled_hosts_as_up(self): """Ensures driver doesn't find hosts before they are enabled""" - # NOTE(vish): constructing service without create method - # because we are going to use it without queue - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() + compute1 = self.start_service('compute', host='host1') + compute2 = self.start_service('compute', host='host2') s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute') db.service_update(self.context, s1['id'], {'disabled': True}) @@ -199,18 +189,8 @@ class SimpleDriverTestCase(test.TestCase): def test_reports_enabled_hosts_as_up(self): """Ensures driver can find the hosts that are up""" - # NOTE(vish): constructing service without create method - # because we are going to use it without queue - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() + compute1 = self.start_service('compute', host='host1') + compute2 = self.start_service('compute', host='host2') hosts = self.scheduler.driver.hosts_up(self.context, 'compute') self.assertEqual(2, len(hosts)) compute1.kill() @@ -218,16 +198,8 @@ class SimpleDriverTestCase(test.TestCase): def test_least_busy_host_gets_instance(self): """Ensures the host with less cores gets the next one""" - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() + compute1 = self.start_service('compute', host='host1') + compute2 = self.start_service('compute', host='host2') instance_id1 = self._create_instance() compute1.run_instance(self.context, instance_id1) instance_id2 = self._create_instance() @@ -241,16 +213,8 @@ class SimpleDriverTestCase(test.TestCase): def test_specific_host_gets_instance(self): """Ensures if you set availability_zone it launches on that zone""" - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() + compute1 = self.start_service('compute', host='host1') + compute2 = self.start_service('compute', host='host2') instance_id1 = self._create_instance() compute1.run_instance(self.context, instance_id1) instance_id2 = self._create_instance(availability_zone='nova:host1') @@ -263,11 +227,7 @@ class SimpleDriverTestCase(test.TestCase): compute2.kill() def test_wont_sechedule_if_specified_host_is_down(self): - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() + compute1 = self.start_service('compute', host='host1') s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') now = datetime.datetime.utcnow() delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) @@ -282,11 +242,7 @@ class SimpleDriverTestCase(test.TestCase): compute1.kill() def test_will_schedule_on_disabled_host_if_specified(self): - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() + compute1 = self.start_service('compute', host='host1') s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') db.service_update(self.context, s1['id'], {'disabled': True}) instance_id2 = self._create_instance(availability_zone='nova:host1') @@ -298,16 +254,8 @@ class SimpleDriverTestCase(test.TestCase): def test_too_many_cores(self): """Ensures we don't go over max cores""" - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() + compute1 = self.start_service('compute', host='host1') + compute2 = self.start_service('compute', host='host2') instance_ids1 = [] instance_ids2 = [] for index in xrange(FLAGS.max_cores): @@ -331,16 +279,8 @@ class SimpleDriverTestCase(test.TestCase): def test_least_busy_host_gets_volume(self): """Ensures the host with less gigabytes gets the next one""" - volume1 = service.Service('host1', - 'nova-volume', - 'volume', - FLAGS.volume_manager) - volume1.start() - volume2 = service.Service('host2', - 'nova-volume', - 'volume', - FLAGS.volume_manager) - volume2.start() + volume1 = self.start_service('volume', host='host1') + volume2 = self.start_service('volume', host='host2') volume_id1 = self._create_volume() volume1.create_volume(self.context, volume_id1) volume_id2 = self._create_volume() @@ -354,16 +294,8 @@ class SimpleDriverTestCase(test.TestCase): def test_too_many_gigabytes(self): """Ensures we don't go over max gigabytes""" - volume1 = service.Service('host1', - 'nova-volume', - 'volume', - FLAGS.volume_manager) - volume1.start() - volume2 = service.Service('host2', - 'nova-volume', - 'volume', - FLAGS.volume_manager) - volume2.start() + volume1 = self.start_service('volume', host='host1') + volume2 = self.start_service('volume', host='host2') volume_ids1 = [] volume_ids2 = [] for index in xrange(FLAGS.max_gigabytes): diff --git a/nova/tests/test_test.py b/nova/tests/test_test.py index c1d96a148..e237674e6 100644 --- a/nova/tests/test_test.py +++ b/nova/tests/test_test.py @@ -29,6 +29,9 @@ class IsolationTestCase(test.TestCase): of other tests should fail. """ + def test_service_isolation(self): + self.start_service('compute') + def test_rpc_consumer_isolation(self): connection = rpc.Connection.instance(new=True) consumer = rpc.TopicConsumer(connection, topic='compute') -- cgit From 9003241814ab67817ea910943e932d7b2e542eb6 Mon Sep 17 00:00:00 2001 From: termie Date: Mon, 21 Feb 2011 14:55:06 -0800 Subject: move test_cloud to use start_service, too --- nova/tests/test_cloud.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 445cc6e8b..a174ea75d 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -65,10 +65,8 @@ class CloudTestCase(test.TestCase): self.cloud = cloud.CloudController() # set up services - self.compute = service.Service.create(binary='nova-compute') - self.compute.start() - self.network = service.Service.create(binary='nova-network') - self.network.start() + self.compute = self.start_service('compute') + self.network = self.start_service('network') self.manager = manager.AuthManager() self.user = self.manager.create_user('admin', 'admin', 'admin', True) -- cgit From 83e4dcb7184169d4d35769c2d56b21e66c908e75 Mon Sep 17 00:00:00 2001 From: termie Date: Mon, 21 Feb 2011 14:55:06 -0800 Subject: remove keyword argument, per review --- nova/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/test.py b/nova/test.py index 9bff401a1..4602f0313 100644 --- a/nova/test.py +++ b/nova/test.py @@ -147,7 +147,7 @@ class TestCase(unittest.TestCase): for k, v in self._original_flags.iteritems(): setattr(FLAGS, k, v) - def start_service(self, name=None, host=None, **kwargs): + def start_service(self, name, host=None, **kwargs): host = host and host or uuid.uuid4().hex kwargs.setdefault('host', host) kwargs.setdefault('binary', 'nova-%s' % name) -- cgit From 4b2a45aa5dc91b24aea53f748906d8a69e40f7c8 Mon Sep 17 00:00:00 2001 From: termie Date: Mon, 21 Feb 2011 15:42:16 -0800 Subject: modify tests to use specific hosts rather than default --- nova/tests/test_cloud.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index a174ea75d..1824d24bc 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -100,7 +100,7 @@ class CloudTestCase(test.TestCase): address = "10.10.10.10" db.floating_ip_create(self.context, {'address': address, - 'host': FLAGS.host}) + 'host': self.network.host}) self.cloud.allocate_address(self.context) self.cloud.describe_addresses(self.context) self.cloud.release_address(self.context, @@ -113,9 +113,9 @@ class CloudTestCase(test.TestCase): address = "10.10.10.10" db.floating_ip_create(self.context, {'address': address, - 'host': FLAGS.host}) + 'host': self.network.host}) self.cloud.allocate_address(self.context) - inst = db.instance_create(self.context, {'host': FLAGS.host}) + inst = db.instance_create(self.context, {'host': self.compute.host}) fixed = self.network.allocate_fixed_ip(self.context, inst['id']) ec2_id = cloud.id_to_ec2_id(inst['id']) self.cloud.associate_address(self.context, -- cgit From f797d6c6464f8ee2816d56ee771ad718418def64 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Mon, 21 Feb 2011 15:52:41 -0800 Subject: Renamed db_update to model_update, and lots more documentation --- nova/volume/driver.py | 26 ++++++++++++++++++++++---- nova/volume/manager.py | 12 ++++++------ nova/volume/san.py | 18 +++++++++--------- 3 files changed, 37 insertions(+), 19 deletions(-) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index f172e2fdc..687bc99d0 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -91,7 +91,8 @@ class VolumeDriver(object): % FLAGS.volume_group) def create_volume(self, volume): - """Creates a logical volume.""" + """Creates a logical volume. Can optionally return a Dictionary of + changes to the volume object to be persisted.""" if int(volume['size']) == 0: sizestr = '100M' else: @@ -126,7 +127,8 @@ class VolumeDriver(object): raise NotImplementedError() def create_export(self, context, volume): - """Exports the volume.""" + """Exports the volume. Can optionally return a Dictionary of changes + to the volume object to be persisted.""" raise NotImplementedError() def remove_export(self, context, volume): @@ -225,7 +227,14 @@ class FakeAOEDriver(AOEDriver): class ISCSIDriver(VolumeDriver): - """Executes commands relating to ISCSI volumes.""" + """Executes commands relating to ISCSI volumes. We make use of model + provider properties as follows: + provider_location - if present, contains the iSCSI target information + in the same format as an ietadm discovery + i.e. ', ' + provider_auth - if present, contains a space-separated triple: + ' '. CHAP is the only + auth_method in use at the moment.""" def ensure_export(self, context, volume): """Synchronously recreates an export for a logical volume.""" @@ -313,7 +322,16 @@ class ISCSIDriver(VolumeDriver): def _get_iscsi_properties(self, volume): """Gets iscsi configuration, ideally from saved information in the - volume entity, but falling back to discovery if need be.""" + volume entity, but falling back to discovery if need be. The + properties are: + target_discovered - boolean indicating whether discovery was used, + target_iqn - the IQN of the iSCSI target, + target_portal - the portal of the iSCSI target, + and auth_method, auth_username and auth_password + - the authentication details. Right now, either + auth_method is not present meaning no authentication, or + auth_method == 'CHAP' meaning use CHAP with the specified + credentials.""" properties = {} diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 7193ece14..3e8bc16b3 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -107,14 +107,14 @@ class VolumeManager(manager.Manager): vol_size = volume_ref['size'] LOG.debug(_("volume %(vol_name)s: creating lv of" " size %(vol_size)sG") % locals()) - db_update = self.driver.create_volume(volume_ref) - if db_update: - self.db.volume_update(context, volume_ref['id'], db_update) + model_update = self.driver.create_volume(volume_ref) + if model_update: + self.db.volume_update(context, volume_ref['id'], model_update) LOG.debug(_("volume %s: creating export"), volume_ref['name']) - db_update = self.driver.create_export(context, volume_ref) - if db_update: - self.db.volume_update(context, volume_ref['id'], db_update) + model_update = self.driver.create_export(context, volume_ref) + if model_update: + self.db.volume_update(context, volume_ref['id'], model_update) except Exception: self.db.volume_update(context, volume_ref['id'], {'status': 'error'}) diff --git a/nova/volume/san.py b/nova/volume/san.py index 911ad096f..09192bc9f 100644 --- a/nova/volume/san.py +++ b/nova/volume/san.py @@ -485,12 +485,12 @@ class HpSanISCSIDriver(SanISCSIDriver): cluster_vip = self._cliq_get_cluster_vip(cluster_name) iscsi_portal = cluster_vip + ":3260," + cluster_interface - db_update = {} - db_update['provider_location'] = ("%s %s" % - (iscsi_portal, - iscsi_iqn)) + model_update = {} + model_update['provider_location'] = ("%s %s" % + (iscsi_portal, + iscsi_iqn)) - return db_update + return model_update def delete_volume(self, volume): """Deletes a volume.""" @@ -517,7 +517,7 @@ class HpSanISCSIDriver(SanISCSIDriver): is_shared = 'permission.authGroup' in volume_info - db_update = {} + model_update = {} should_export = False @@ -551,10 +551,10 @@ class HpSanISCSIDriver(SanISCSIDriver): self._cliq_run_xml("assignVolumeChap", cliq_args) - db_update['provider_auth'] = ("CHAP %s %s" % - (chap_username, chap_password)) + model_update['provider_auth'] = ("CHAP %s %s" % + (chap_username, chap_password)) - return db_update + return model_update def remove_export(self, context, volume): """Removes an export for a logical volume.""" -- cgit From 90007cd9085909a1e1ac59732a0b371dd79c2557 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 21 Feb 2011 15:55:50 -0800 Subject: pretty colors for logs and a few optimizations --- bin/nova-dhcpbridge | 2 + run_tests.py | 202 +++++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 202 insertions(+), 2 deletions(-) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index eda2dc072..04a1771f0 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -113,6 +113,8 @@ def main(): FLAGS.num_networks = 5 path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', + 'nova', + 'tests', 'tests.sqlite')) FLAGS.sql_connection = 'sqlite:///%s' % path action = argv[1] diff --git a/run_tests.py b/run_tests.py index 82345433a..43508394f 100644 --- a/run_tests.py +++ b/run_tests.py @@ -17,6 +17,29 @@ # See the License for the specific language governing permissions and # limitations under the License. +# Colorizer Code is borrowed from Twisted: +# Copyright (c) 2001-2010 Twisted Matrix Laboratories. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + import gettext import os import unittest @@ -28,14 +51,186 @@ from nose import core from nova import log as logging +class _AnsiColorizer(object): + """ + A colorizer is an object that loosely wraps around a stream, allowing + callers to write text to the stream in a particular color. + + Colorizer classes must implement C{supported()} and C{write(text, color)}. + """ + _colors = dict(black=30, red=31, green=32, yellow=33, + blue=34, magenta=35, cyan=36, white=37) + + def __init__(self, stream): + self.stream = stream + + def supported(cls, stream=sys.stdout): + """ + A class method that returns True if the current platform supports + coloring terminal output using this method. Returns False otherwise. + """ + if not stream.isatty(): + return False # auto color only on TTYs + try: + import curses + except ImportError: + return False + else: + try: + try: + return curses.tigetnum("colors") > 2 + except curses.error: + curses.setupterm(fd=stream.fileno()) + return curses.tigetnum("colors") > 2 + except: + raise + # guess false in case of error + return False + supported = classmethod(supported) + + def write(self, text, color): + """ + Write the given text to the stream in the given color. + + @param text: Text to be written to the stream. + + @param color: A string label for a color. e.g. 'red', 'white'. + """ + color = self._colors[color] + self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text)) + + +class _Win32Colorizer(object): + """ + See _AnsiColorizer docstring. + """ + def __init__(self, stream): + from win32console import GetStdHandle, STD_ERROR_HANDLE, \ + FOREGROUND_RED, FOREGROUND_BLUE, FOREGROUND_GREEN, \ + FOREGROUND_INTENSITY + red, green, blue, bold = (FOREGROUND_RED, FOREGROUND_GREEN, + FOREGROUND_BLUE, FOREGROUND_INTENSITY) + self.stream = stream + self.screenBuffer = GetStdHandle(STD_ERROR_HANDLE) + self._colors = { + 'normal': red | green | blue, + 'red': red | bold, + 'green': green | bold, + 'blue': blue | bold, + 'yellow': red | green | bold, + 'magenta': red | blue | bold, + 'cyan': green | blue | bold, + 'white': red | green | blue | bold + } + + def supported(cls, stream=sys.stdout): + try: + import win32console + screenBuffer = win32console.GetStdHandle( + win32console.STD_ERROR_HANDLE) + except ImportError: + return False + import pywintypes + try: + screenBuffer.SetConsoleTextAttribute( + win32console.FOREGROUND_RED | + win32console.FOREGROUND_GREEN | + win32console.FOREGROUND_BLUE) + except pywintypes.error: + return False + else: + return True + supported = classmethod(supported) + + def write(self, text, color): + color = self._colors[color] + self.screenBuffer.SetConsoleTextAttribute(color) + self.stream.write(text) + self.screenBuffer.SetConsoleTextAttribute(self._colors['normal']) + + +class _NullColorizer(object): + """ + See _AnsiColorizer docstring. + """ + def __init__(self, stream): + self.stream = stream + + def supported(cls, stream=sys.stdout): + return True + supported = classmethod(supported) + + def write(self, text, color): + self.stream.write(text) + class NovaTestResult(result.TextTestResult): def __init__(self, *args, **kw): result.TextTestResult.__init__(self, *args, **kw) self._last_case = None + self.colorizer = None + for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]: + # NOTE(vish): nose does funky stuff with stdout, so use stderr + # to setup the colorizer + if colorizer.supported(sys.stderr): + self.colorizer = colorizer(self.stream) + break def getDescription(self, test): return str(test) + def addSuccess(self, test): + if self.showAll: + self.colorizer.write("OK", 'green') + self.stream.writeln() + elif self.dots: + self.stream.write('.') + self.stream.flush() + + def addFailure(self, test): + if self.showAll: + self.colorizer.write("FAIL", 'red') + self.stream.writeln() + elif self.dots: + self.stream.write('F') + self.stream.flush() + + def addError(self, test, err): + """Overrides normal addError to add support for + errorClasses. If the exception is a registered class, the + error will be added to the list for that class, not errors. + """ + stream = getattr(self, 'stream', None) + ec, ev, tb = err + try: + exc_info = self._exc_info_to_string(err, test) + except TypeError: + # 2.3 compat + exc_info = self._exc_info_to_string(err) + for cls, (storage, label, isfail) in self.errorClasses.items(): + if result.isclass(ec) and issubclass(ec, cls): + if isfail: + test.passed = False + storage.append((test, exc_info)) + # Might get patched into a streamless result + if stream is not None: + if self.showAll: + message = [label] + detail = result._exception_detail(err[1]) + if detail: + message.append(detail) + stream.writeln(": ".join(message)) + elif self.dots: + stream.write(label[:1]) + return + self.errors.append((test, exc_info)) + test.passed = False + if stream is not None: + if self.showAll: + self.colorizer.write("ERROR", 'red') + self.stream.writeln() + elif self.dots: + stream.write('E') + def startTest(self, test): unittest.TestResult.startTest(self, test) current_case = test.test.__class__.__name__ @@ -59,12 +254,15 @@ class NovaTestRunner(core.TextTestRunner): if __name__ == '__main__': - if os.path.exists("tests.sqlite"): - os.unlink("tests.sqlite") logging.setup() + testdir = os.path.abspath(os.path.join("nova","tests")) + testdb = os.path.join(testdir, "tests.sqlite") + if os.path.exists(testdb): + os.unlink(testdb) c = config.Config(stream=sys.stdout, env=os.environ, verbosity=3, + workingDir=testdir, plugins=core.DefaultPluginManager()) runner = NovaTestRunner(stream=c.stream, -- cgit From 305ef6bf5f8f8926fdaa8db5f75a0680fbd8a2be Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Mon, 21 Feb 2011 16:02:38 -0800 Subject: Fixed my confusion in documenting the syntax of iSCSI discovery --- nova/volume/driver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 687bc99d0..22c2c2fc3 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -231,7 +231,7 @@ class ISCSIDriver(VolumeDriver): provider properties as follows: provider_location - if present, contains the iSCSI target information in the same format as an ietadm discovery - i.e. ', ' + i.e. ', ' provider_auth - if present, contains a space-separated triple: ' '. CHAP is the only auth_method in use at the moment.""" -- cgit From 71f7119910f16cb99c10f43a07ccb1e7c0ca473f Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 21 Feb 2011 16:05:28 -0800 Subject: remove changes to test db --- bin/nova-dhcpbridge | 2 +- nova/tests/fake_flags.py | 2 +- run_tests.py | 2 -- run_tests.sh | 2 +- 4 files changed, 3 insertions(+), 5 deletions(-) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index eda2dc072..35b837ca9 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -113,7 +113,7 @@ def main(): FLAGS.num_networks = 5 path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', - 'tests.sqlite')) + 'nova.sqlite')) FLAGS.sql_connection = 'sqlite:///%s' % path action = argv[1] if action in ['add', 'del', 'old']: diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py index 575fefff6..cfa65c137 100644 --- a/nova/tests/fake_flags.py +++ b/nova/tests/fake_flags.py @@ -39,6 +39,6 @@ FLAGS.num_shelves = 2 FLAGS.blades_per_shelf = 4 FLAGS.iscsi_num_targets = 8 FLAGS.verbose = True -FLAGS.sql_connection = 'sqlite:///tests.sqlite' +FLAGS.sql_connection = 'sqlite:///nova.sqlite' FLAGS.use_ipv6 = True FLAGS.logfile = 'tests.log' diff --git a/run_tests.py b/run_tests.py index 82345433a..cb957da42 100644 --- a/run_tests.py +++ b/run_tests.py @@ -59,8 +59,6 @@ class NovaTestRunner(core.TextTestRunner): if __name__ == '__main__': - if os.path.exists("tests.sqlite"): - os.unlink("tests.sqlite") logging.setup() c = config.Config(stream=sys.stdout, env=os.environ, diff --git a/run_tests.sh b/run_tests.sh index 4e21fe945..70212cc6a 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -40,7 +40,7 @@ done function run_tests { # Just run the test suites in current environment ${wrapper} rm -f nova.sqlite - ${wrapper} $NOSETESTS 2> run_tests.err.log + ${wrapper} $NOSETESTS } NOSETESTS="python run_tests.py $noseargs" -- cgit From 11c57867ec18bd61dcc6bde0dc4b459318d54e70 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 21 Feb 2011 16:19:48 -0800 Subject: fixed newline and moved import fake_flags into run_tests where it makes more sense --- nova/test.py | 3 --- run_tests.py | 2 ++ 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/nova/test.py b/nova/test.py index 8022396cd..bff43b6c7 100644 --- a/nova/test.py +++ b/nova/test.py @@ -36,9 +36,6 @@ from nova import log as logging from nova import rpc from nova.network import manager as network_manager -from nova.tests import fake_flags -logging.reset() - FLAGS = flags.FLAGS flags.DEFINE_bool('flush_db', True, diff --git a/run_tests.py b/run_tests.py index cb957da42..6d96454b9 100644 --- a/run_tests.py +++ b/run_tests.py @@ -27,6 +27,8 @@ from nose import result from nose import core from nova import log as logging +from nova.tests import fake_flags + class NovaTestResult(result.TextTestResult): def __init__(self, *args, **kw): -- cgit From 0f402b72cbf80d1adde503eb532a578944fa0c79 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 21 Feb 2011 16:22:09 -0800 Subject: update based on prereq branch --- nova/tests/fake_flags.py | 2 +- run_tests.sh | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py index cfa65c137..575fefff6 100644 --- a/nova/tests/fake_flags.py +++ b/nova/tests/fake_flags.py @@ -39,6 +39,6 @@ FLAGS.num_shelves = 2 FLAGS.blades_per_shelf = 4 FLAGS.iscsi_num_targets = 8 FLAGS.verbose = True -FLAGS.sql_connection = 'sqlite:///nova.sqlite' +FLAGS.sql_connection = 'sqlite:///tests.sqlite' FLAGS.use_ipv6 = True FLAGS.logfile = 'tests.log' diff --git a/run_tests.sh b/run_tests.sh index 70212cc6a..e8433bc06 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -39,7 +39,6 @@ done function run_tests { # Just run the test suites in current environment - ${wrapper} rm -f nova.sqlite ${wrapper} $NOSETESTS } -- cgit From 3125d978fec27608064dd3dd8d3696f2219fbf12 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 21 Feb 2011 23:26:03 -0800 Subject: use a different flag for listen port for apis --- bin/nova-api | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/bin/nova-api b/bin/nova-api index 11176a021..cb9b41725 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -44,6 +44,8 @@ LOG = logging.getLogger('nova.api') LOG.setLevel(logging.DEBUG) FLAGS = flags.FLAGS +flags.DEFINE_integer('ec2_listen_port', 8773, 'port for ec2 api to listen') +flags.DEFINE_integer('osapi_listen_port', 8774, 'port for os api to listen') API_ENDPOINTS = ['ec2', 'osapi'] @@ -60,10 +62,10 @@ def run_app(paste_config_file): wsgi.paste_config_to_flags(config, { "verbose": FLAGS.verbose, "%s_host" % api: config.get('host', '0.0.0.0'), - "%s_port" % api: getattr(FLAGS, "%s_port" % api)}) + "%s_listen_port" % api: getattr(FLAGS, "%s_listen_port" % api)}) LOG.info(_("Running %s API"), api) app = wsgi.load_paste_app(paste_config_file, api) - apps.append((app, getattr(FLAGS, "%s_port" % api), + apps.append((app, getattr(FLAGS, "%s_listen_port" % api), getattr(FLAGS, "%s_host" % api))) if len(apps) == 0: LOG.error(_("No known API applications configured in %s."), -- cgit From 9e2942931b5381d3ba0e8cc4f9846160b003f45b Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Tue, 22 Feb 2011 17:18:04 +0100 Subject: Get rid of nova-combined, see rationale on ML --- bin/nova-combined | 83 ------------------------------------------------------- 1 file changed, 83 deletions(-) delete mode 100755 bin/nova-combined diff --git a/bin/nova-combined b/bin/nova-combined deleted file mode 100755 index 22f0d5cb7..000000000 --- a/bin/nova-combined +++ /dev/null @@ -1,83 +0,0 @@ -#!/usr/bin/env python -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Combined starter script for Nova services.""" - -import eventlet -eventlet.monkey_patch() - -import gettext -import os -import sys - -# If ../nova/__init__.py exists, add ../ to Python search path, so that -# it will override what happens to be installed in /usr/(local/)lib/python... -possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), - os.pardir, - os.pardir)) -if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): - sys.path.insert(0, possible_topdir) - -gettext.install('nova', unicode=1) - -from nova import flags -from nova import log as logging -from nova import service -from nova import utils -from nova import wsgi - - -FLAGS = flags.FLAGS - -API_ENDPOINTS = ['ec2', 'osapi'] - -for api in API_ENDPOINTS: - flags.DEFINE_string("%s_listen" % api, "0.0.0.0", - "IP address to listen to for API %s" % api) - flags.DEFINE_integer("%s_listen_port" % api, - getattr(FLAGS, "%s_port" % api), - "Port to listen to for API %s" % api) - -if __name__ == '__main__': - utils.default_flagfile() - FLAGS(sys.argv) - logging.setup() - - compute = service.Service.create(binary='nova-compute') - network = service.Service.create(binary='nova-network') - volume = service.Service.create(binary='nova-volume') - scheduler = service.Service.create(binary='nova-scheduler') - #objectstore = service.Service.create(binary='nova-objectstore') - - service.serve(compute, network, volume, scheduler) - - apps = [] - paste_config_file = wsgi.paste_config_file('nova-api.conf') - for api in API_ENDPOINTS: - config = wsgi.load_paste_configuration(paste_config_file, api) - if config is None: - continue - app = wsgi.load_paste_app(paste_config_file, api) - apps.append((app, getattr(FLAGS, "%s_listen_port" % api), - getattr(FLAGS, "%s_listen" % api))) - if len(apps) > 0: - server = wsgi.Server() - for app in apps: - server.start(*app) - server.wait() -- cgit From 912e762c9baf3cb17a24bc0d9feba4b26892dbbc Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Tue, 22 Feb 2011 17:37:12 +0100 Subject: Also remove nova-combined from setup.py --- setup.py | 1 - 1 file changed, 1 deletion(-) diff --git a/setup.py b/setup.py index 4ab8f386b..3b48990ac 100644 --- a/setup.py +++ b/setup.py @@ -98,7 +98,6 @@ DistUtilsExtra.auto.setup(name='nova', test_suite='nose.collector', scripts=['bin/nova-ajax-console-proxy', 'bin/nova-api', - 'bin/nova-combined', 'bin/nova-compute', 'bin/nova-console', 'bin/nova-dhcpbridge', -- cgit From 5b2ec209d07d7df45f9b7ca6eebfcbc9443de94e Mon Sep 17 00:00:00 2001 From: termie Date: Tue, 22 Feb 2011 17:10:34 -0800 Subject: don't make a syslog handler if we didn't ask for one --- nova/log.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/log.py b/nova/log.py index 10c14d74b..591d26c63 100644 --- a/nova/log.py +++ b/nova/log.py @@ -236,16 +236,17 @@ class NovaRootLogger(NovaLogger): def __init__(self, name, level=NOTSET): self.logpath = None self.filelog = None - self.syslog = SysLogHandler(address='/dev/log') self.streamlog = StreamHandler() + self.syslog = None NovaLogger.__init__(self, name, level) def setup_from_flags(self): """Setup logger from flags""" global _filelog if FLAGS.use_syslog: + self.syslog = SysLogHandler(address='/dev/log') self.addHandler(self.syslog) - else: + elif self.syslog: self.removeHandler(self.syslog) logpath = _get_log_file_path() if logpath: -- cgit From 18793c2e184713d33bc93306d464cf443584ffd6 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 22 Feb 2011 17:44:07 -0800 Subject: test that shows error on filtering groups --- nova/tests/test_cloud.py | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 445cc6e8b..2bce64353 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -133,6 +133,20 @@ class CloudTestCase(test.TestCase): db.instance_destroy(self.context, inst['id']) db.floating_ip_destroy(self.context, address) + def test_describe_security_groups(self): + """Makes sure describe_security_groups works and filters results.""" + sec = db.security_group_create(self.context, {'name': 'test'}) + result = self.cloud.describe_security_groups(self.context) + # NOTE(vish): should have the default group as well + self.assertEqual(len(result['securityGroupInfo']), 2) + result = self.cloud.describe_security_groups(self.context, + group_name=[sec['name']]) + self.assertEqual(len(result['securityGroupInfo']), 1) + self.assertEqual( + cloud.ec2_id_to_id(result['securityGroupInfo'][0]['name']), + sec['name']) + db.security_group_destroy(self.context, sec['id']) + def test_describe_volumes(self): """Makes sure describe_volumes works and filters results.""" vol1 = db.volume_create(self.context, {}) @@ -286,19 +300,6 @@ class CloudTestCase(test.TestCase): LOG.debug(_("Terminating instance %s"), instance_id) rv = self.compute.terminate_instance(instance_id) - def test_describe_instances(self): - """Makes sure describe_instances works.""" - instance1 = db.instance_create(self.context, {'host': 'host2'}) - comp1 = db.service_create(self.context, {'host': 'host2', - 'availability_zone': 'zone1', - 'topic': "compute"}) - result = self.cloud.describe_instances(self.context) - self.assertEqual(result['reservationSet'][0] - ['instancesSet'][0] - ['placement']['availabilityZone'], 'zone1') - db.instance_destroy(self.context, instance1['id']) - db.service_destroy(self.context, comp1['id']) - def test_instance_update_state(self): # TODO(termie): what is this code even testing? def instance(num): -- cgit From 2610a522d26351686612058a6da0300bce731112 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 22 Feb 2011 17:49:38 -0800 Subject: fix test --- nova/tests/test_cloud.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 2bce64353..afdbb80a9 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -135,7 +135,9 @@ class CloudTestCase(test.TestCase): def test_describe_security_groups(self): """Makes sure describe_security_groups works and filters results.""" - sec = db.security_group_create(self.context, {'name': 'test'}) + sec = db.security_group_create(self.context, + {'project_id': self.context.project_id, + 'name': 'test'}) result = self.cloud.describe_security_groups(self.context) # NOTE(vish): should have the default group as well self.assertEqual(len(result['securityGroupInfo']), 2) @@ -143,7 +145,7 @@ class CloudTestCase(test.TestCase): group_name=[sec['name']]) self.assertEqual(len(result['securityGroupInfo']), 1) self.assertEqual( - cloud.ec2_id_to_id(result['securityGroupInfo'][0]['name']), + result['securityGroupInfo'][0]['groupName'], sec['name']) db.security_group_destroy(self.context, sec['id']) -- cgit From 828e3ea3f29f57767a4e25ad40b275c886cb7968 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 22 Feb 2011 18:02:25 -0800 Subject: fix and optimize security group filtering --- nova/api/ec2/cloud.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 882cdcfc9..fc9c13d91 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -318,14 +318,19 @@ class CloudController(object): def describe_security_groups(self, context, group_name=None, **kwargs): self.compute_api.ensure_default_security_group(context) - if context.is_admin: + if group_name: + groups = [] + for name in group_name: + group = db.security_group_get_by_name(context, + context.project_id, + name) + groups.append(group) + elif context.is_admin: groups = db.security_group_get_all(context) else: groups = db.security_group_get_by_project(context, context.project_id) groups = [self._format_security_group(context, g) for g in groups] - if not group_name is None: - groups = [g for g in groups if g.name in group_name] return {'securityGroupInfo': list(sorted(groups, -- cgit From 2fd33bdd50b933dc14fea065c823f5a73324129b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 22 Feb 2011 18:04:08 -0800 Subject: separate out smoketests and add updated nova.sh --- contrib/nova.sh | 12 +- smoketests/base.py | 12 + smoketests/public_network_smoketests.py | 11 +- smoketests/sysadmin_smoketests.py | 293 +++++++++++++++++++++++ smoketests/user_smoketests.py | 397 -------------------------------- 5 files changed, 321 insertions(+), 404 deletions(-) create mode 100644 smoketests/sysadmin_smoketests.py delete mode 100644 smoketests/user_smoketests.py diff --git a/contrib/nova.sh b/contrib/nova.sh index 9259035ca..1187f2728 100755 --- a/contrib/nova.sh +++ b/contrib/nova.sh @@ -66,7 +66,7 @@ if [ "$CMD" == "install" ]; then sudo apt-get install -y user-mode-linux kvm libvirt-bin sudo apt-get install -y screen euca2ools vlan curl rabbitmq-server sudo apt-get install -y lvm2 iscsitarget open-iscsi - sudo apt-get install -y socat + sudo apt-get install -y socat unzip echo "ISCSITARGET_ENABLE=true" | sudo tee /etc/default/iscsitarget sudo /etc/init.d/iscsitarget restart sudo modprobe kvm @@ -111,8 +111,7 @@ if [ "$CMD" == "run" ]; then --nodaemon --dhcpbridge_flagfile=$NOVA_DIR/bin/nova.conf --network_manager=nova.network.manager.$NET_MAN ---cc_host=$HOST_IP ---routing_source_ip=$HOST_IP +--my_ip=$HOST_IP --sql_connection=$SQL_CONN --auth_driver=nova.auth.$AUTH --libvirt_type=$LIBVIRT_TYPE @@ -151,7 +150,6 @@ NOVA_CONF_EOF mkdir -p $NOVA_DIR/instances rm -rf $NOVA_DIR/networks mkdir -p $NOVA_DIR/networks - $NOVA_DIR/tools/clean-vlans if [ ! -d "$NOVA_DIR/images" ]; then ln -s $DIR/images $NOVA_DIR/images fi @@ -169,10 +167,14 @@ NOVA_CONF_EOF # create a project called 'admin' with project manager of 'admin' $NOVA_DIR/bin/nova-manage project create admin admin # export environment variables for project 'admin' and user 'admin' - $NOVA_DIR/bin/nova-manage project environment admin admin $NOVA_DIR/novarc + $NOVA_DIR/bin/nova-manage project zipfile admin admin $NOVA_DIR/nova.zip + unzip -o $NOVA_DIR/nova.zip -d $NOVA_DIR/ # create a small network $NOVA_DIR/bin/nova-manage network create 10.0.0.0/8 1 32 + # create some floating ips + $NOVA_DIR/bin/nova-manage floating create `hostname` 10.6.0.0/27 + # nova api crashes if we start it with a regular screen command, # so send the start command by forcing text into the window. screen_it api "$NOVA_DIR/bin/nova-api" diff --git a/smoketests/base.py b/smoketests/base.py index afc618074..204b4a1eb 100644 --- a/smoketests/base.py +++ b/smoketests/base.py @@ -28,7 +28,9 @@ from boto.ec2.regioninfo import RegionInfo from smoketests import flags +SUITE_NAMES = '[image, instance, volume]' FLAGS = flags.FLAGS +flags.DEFINE_string('suite', None, 'Specific test suite to run ' + SUITE_NAMES) boto_v6 = None @@ -173,6 +175,16 @@ class SmokeTestCase(unittest.TestCase): return True +TEST_DATA = {} + + +class UserSmokeTestCase(SmokeTestCase): + def setUp(self): + global TEST_DATA + self.conn = self.connection_for_env() + self.data = TEST_DATA + + def run_tests(suites): argv = FLAGS(sys.argv) if FLAGS.use_ipv6: diff --git a/smoketests/public_network_smoketests.py b/smoketests/public_network_smoketests.py index bfc2b20ba..5a4c67642 100644 --- a/smoketests/public_network_smoketests.py +++ b/smoketests/public_network_smoketests.py @@ -24,9 +24,16 @@ import sys import time import unittest +# If ../nova/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): + sys.path.insert(0, possible_topdir) + from smoketests import flags from smoketests import base -from smoketests import user_smoketests #Note that this test should run from #public network (outside of private network segments) @@ -42,7 +49,7 @@ TEST_KEY2 = '%s_key2' % TEST_PREFIX TEST_DATA = {} -class InstanceTestsFromPublic(user_smoketests.UserSmokeTestCase): +class InstanceTestsFromPublic(base.UserSmokeTestCase): def test_001_can_create_keypair(self): key = self.create_key_pair(self.conn, TEST_KEY) self.assertEqual(key.name, TEST_KEY) diff --git a/smoketests/sysadmin_smoketests.py b/smoketests/sysadmin_smoketests.py new file mode 100644 index 000000000..e3b84d3d3 --- /dev/null +++ b/smoketests/sysadmin_smoketests.py @@ -0,0 +1,293 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import commands +import os +import random +import sys +import time +import unittest + +# If ../nova/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): + sys.path.insert(0, possible_topdir) + +from smoketests import flags +from smoketests import base + + + +FLAGS = flags.FLAGS +flags.DEFINE_string('bundle_kernel', 'openwrt-x86-vmlinuz', + 'Local kernel file to use for bundling tests') +flags.DEFINE_string('bundle_image', 'openwrt-x86-ext2.image', + 'Local image file to use for bundling tests') + +TEST_PREFIX = 'test%s' % int(random.random() * 1000000) +TEST_BUCKET = '%s_bucket' % TEST_PREFIX +TEST_KEY = '%s_key' % TEST_PREFIX +TEST_GROUP = '%s_group' % TEST_PREFIX +class ImageTests(base.UserSmokeTestCase): + def test_001_can_bundle_image(self): + self.assertTrue(self.bundle_image(FLAGS.bundle_image)) + + def test_002_can_upload_image(self): + self.assertTrue(self.upload_image(TEST_BUCKET, FLAGS.bundle_image)) + + def test_003_can_register_image(self): + image_id = self.conn.register_image('%s/%s.manifest.xml' % + (TEST_BUCKET, FLAGS.bundle_image)) + self.assert_(image_id is not None) + self.data['image_id'] = image_id + + def test_004_can_bundle_kernel(self): + self.assertTrue(self.bundle_image(FLAGS.bundle_kernel, kernel=True)) + + def test_005_can_upload_kernel(self): + self.assertTrue(self.upload_image(TEST_BUCKET, FLAGS.bundle_kernel)) + + def test_006_can_register_kernel(self): + kernel_id = self.conn.register_image('%s/%s.manifest.xml' % + (TEST_BUCKET, FLAGS.bundle_kernel)) + self.assert_(kernel_id is not None) + self.data['kernel_id'] = kernel_id + + def test_007_images_are_available_within_10_seconds(self): + for i in xrange(10): + image = self.conn.get_image(self.data['image_id']) + if image and image.state == 'available': + break + time.sleep(1) + else: + self.assert_(False) # wasn't available within 10 seconds + self.assert_(image.type == 'machine') + + for i in xrange(10): + kernel = self.conn.get_image(self.data['kernel_id']) + if kernel and kernel.state == 'available': + break + time.sleep(1) + else: + self.assert_(False) # wasn't available within 10 seconds + self.assert_(kernel.type == 'kernel') + + def test_008_can_describe_image_attribute(self): + attrs = self.conn.get_image_attribute(self.data['image_id'], + 'launchPermission') + self.assert_(attrs.name, 'launch_permission') + + def test_009_can_modify_image_launch_permission(self): + self.conn.modify_image_attribute(image_id=self.data['image_id'], + operation='add', + attribute='launchPermission', + groups='all') + image = self.conn.get_image(self.data['image_id']) + self.assertEqual(image.id, self.data['image_id']) + + def test_010_can_see_launch_permission(self): + attrs = self.conn.get_image_attribute(self.data['image_id'], + 'launchPermission') + self.assert_(attrs.name, 'launch_permission') + self.assert_(attrs.attrs['groups'][0], 'all') + + def test_011_user_can_deregister_kernel(self): + self.assertTrue(self.conn.deregister_image(self.data['kernel_id'])) + + def test_012_can_deregister_image(self): + self.assertTrue(self.conn.deregister_image(self.data['image_id'])) + + def test_013_can_delete_bundle(self): + self.assertTrue(self.delete_bundle_bucket(TEST_BUCKET)) + + +class InstanceTests(base.UserSmokeTestCase): + def test_001_can_create_keypair(self): + key = self.create_key_pair(self.conn, TEST_KEY) + self.assertEqual(key.name, TEST_KEY) + + def test_002_can_create_instance_with_keypair(self): + reservation = self.conn.run_instances(FLAGS.test_image, + key_name=TEST_KEY, + instance_type='m1.tiny') + self.assertEqual(len(reservation.instances), 1) + self.data['instance'] = reservation.instances[0] + + def test_003_instance_runs_within_60_seconds(self): + instance = self.data['instance'] + # allow 60 seconds to exit pending with IP + if not self.wait_for_running(self.data['instance']): + self.fail('instance failed to start') + self.data['instance'].update() + ip = self.data['instance'].private_dns_name + self.failIf(ip == '0.0.0.0') + if FLAGS.use_ipv6: + ipv6 = self.data['instance'].dns_name_v6 + self.failIf(ipv6 is None) + + def test_004_can_ping_private_ip(self): + if not self.wait_for_ping(self.data['instance'].private_dns_name): + self.fail('could not ping instance') + + if FLAGS.use_ipv6: + if not self.wait_for_ping(self.data['instance'].ip_v6, "ping6"): + self.fail('could not ping instance v6') + + def test_005_can_ssh_to_private_ip(self): + if not self.wait_for_ssh(self.data['instance'].private_dns_name, + TEST_KEY): + self.fail('could not ssh to instance') + + if FLAGS.use_ipv6: + if not self.wait_for_ssh(self.data['instance'].ip_v6, + TEST_KEY): + self.fail('could not ssh to instance v6') + + def test_999_tearDown(self): + self.delete_key_pair(self.conn, TEST_KEY) + self.conn.terminate_instances([self.data['instance'].id]) + + +class VolumeTests(base.UserSmokeTestCase): + def setUp(self): + super(VolumeTests, self).setUp() + self.device = '/dev/vdb' + + def test_000_setUp(self): + self.create_key_pair(self.conn, TEST_KEY) + reservation = self.conn.run_instances(FLAGS.test_image, + instance_type='m1.tiny', + key_name=TEST_KEY) + self.data['instance'] = reservation.instances[0] + if not self.wait_for_running(self.data['instance']): + self.fail('instance failed to start') + self.data['instance'].update() + if not self.wait_for_ping(self.data['instance'].private_dns_name): + self.fail('could not ping instance') + if not self.wait_for_ssh(self.data['instance'].private_dns_name, + TEST_KEY): + self.fail('could not ssh to instance') + + def test_001_can_create_volume(self): + volume = self.conn.create_volume(1, 'nova') + self.assertEqual(volume.size, 1) + self.data['volume'] = volume + # Give network time to find volume. + time.sleep(10) + + def test_002_can_attach_volume(self): + volume = self.data['volume'] + + for x in xrange(10): + volume.update() + if volume.status.startswith('available'): + break + time.sleep(1) + else: + self.fail('cannot attach volume with state %s' % volume.status) + + volume.attach(self.data['instance'].id, self.device) + + # wait + for x in xrange(10): + volume.update() + if volume.status.startswith('in-use'): + break + time.sleep(1) + else: + self.fail('volume never got to in use') + + self.assertTrue(volume.status.startswith('in-use')) + + # Give instance time to recognize volume. + time.sleep(10) + + def test_003_can_mount_volume(self): + ip = self.data['instance'].private_dns_name + conn = self.connect_ssh(ip, TEST_KEY) + # NOTE(vish): this will create an dev for images that don't have + # udev rules + stdin, stdout, stderr = conn.exec_command( + 'grep %s /proc/partitions | ' + '`awk \'{print "mknod /dev/"\\$4" b "\\$1" "\\$2}\'`' + % self.device.rpartition('/')[2]) + exec_list = [] + exec_list.append('mkdir -p /mnt/vol') + exec_list.append('/sbin/mke2fs %s' % self.device) + exec_list.append('mount %s /mnt/vol' % self.device) + exec_list.append('echo success') + stdin, stdout, stderr = conn.exec_command(' && '.join(exec_list)) + out = stdout.read() + conn.close() + if not out.strip().endswith('success'): + self.fail('Unable to mount: %s %s' % (out, stderr.read())) + + def test_004_can_write_to_volume(self): + ip = self.data['instance'].private_dns_name + conn = self.connect_ssh(ip, TEST_KEY) + # FIXME(devcamcar): This doesn't fail if the volume hasn't been mounted + stdin, stdout, stderr = conn.exec_command( + 'echo hello > /mnt/vol/test.txt') + err = stderr.read() + conn.close() + if len(err) > 0: + self.fail('Unable to write to mount: %s' % (err)) + + def test_005_volume_is_correct_size(self): + ip = self.data['instance'].private_dns_name + conn = self.connect_ssh(ip, TEST_KEY) + stdin, stdout, stderr = conn.exec_command( + "df -h | grep %s | awk {'print $2'}" % self.device) + out = stdout.read() + conn.close() + if not out.strip() == '1007.9M': + self.fail('Volume is not the right size: %s %s' % + (out, stderr.read())) + + def test_006_me_can_umount_volume(self): + ip = self.data['instance'].private_dns_name + conn = self.connect_ssh(ip, TEST_KEY) + stdin, stdout, stderr = conn.exec_command('umount /mnt/vol') + err = stderr.read() + conn.close() + if len(err) > 0: + self.fail('Unable to unmount: %s' % (err)) + + def test_007_me_can_detach_volume(self): + result = self.conn.detach_volume(volume_id=self.data['volume'].id) + self.assertTrue(result) + time.sleep(5) + + def test_008_me_can_delete_volume(self): + result = self.conn.delete_volume(self.data['volume'].id) + self.assertTrue(result) + + def test_999_tearDown(self): + self.conn.terminate_instances([self.data['instance'].id]) + self.conn.delete_key_pair(TEST_KEY) + + +if __name__ == "__main__": + suites = {'image': unittest.makeSuite(ImageTests), + 'instance': unittest.makeSuite(InstanceTests), + 'volume': unittest.makeSuite(VolumeTests) + } + sys.exit(base.run_tests(suites)) diff --git a/smoketests/user_smoketests.py b/smoketests/user_smoketests.py deleted file mode 100644 index 26f6344f7..000000000 --- a/smoketests/user_smoketests.py +++ /dev/null @@ -1,397 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import commands -import os -import random -import sys -import time -import unittest - -# If ../nova/__init__.py exists, add ../ to Python search path, so that -# it will override what happens to be installed in /usr/(local/)lib/python... -possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), - os.pardir, - os.pardir)) -if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): - sys.path.insert(0, possible_topdir) - -from smoketests import flags -from smoketests import base - - -SUITE_NAMES = '[image, instance, volume]' - -FLAGS = flags.FLAGS -flags.DEFINE_string('suite', None, 'Specific test suite to run ' + SUITE_NAMES) -flags.DEFINE_string('bundle_kernel', 'openwrt-x86-vmlinuz', - 'Local kernel file to use for bundling tests') -flags.DEFINE_string('bundle_image', 'openwrt-x86-ext2.image', - 'Local image file to use for bundling tests') - -TEST_PREFIX = 'test%s' % int(random.random() * 1000000) -TEST_BUCKET = '%s_bucket' % TEST_PREFIX -TEST_KEY = '%s_key' % TEST_PREFIX -TEST_GROUP = '%s_group' % TEST_PREFIX -TEST_DATA = {} - - -class UserSmokeTestCase(base.SmokeTestCase): - def setUp(self): - global TEST_DATA - self.conn = self.connection_for_env() - self.data = TEST_DATA - - -class ImageTests(UserSmokeTestCase): - def test_001_can_bundle_image(self): - self.assertTrue(self.bundle_image(FLAGS.bundle_image)) - - def test_002_can_upload_image(self): - self.assertTrue(self.upload_image(TEST_BUCKET, FLAGS.bundle_image)) - - def test_003_can_register_image(self): - image_id = self.conn.register_image('%s/%s.manifest.xml' % - (TEST_BUCKET, FLAGS.bundle_image)) - self.assert_(image_id is not None) - self.data['image_id'] = image_id - - def test_004_can_bundle_kernel(self): - self.assertTrue(self.bundle_image(FLAGS.bundle_kernel, kernel=True)) - - def test_005_can_upload_kernel(self): - self.assertTrue(self.upload_image(TEST_BUCKET, FLAGS.bundle_kernel)) - - def test_006_can_register_kernel(self): - kernel_id = self.conn.register_image('%s/%s.manifest.xml' % - (TEST_BUCKET, FLAGS.bundle_kernel)) - self.assert_(kernel_id is not None) - self.data['kernel_id'] = kernel_id - - def test_007_images_are_available_within_10_seconds(self): - for i in xrange(10): - image = self.conn.get_image(self.data['image_id']) - if image and image.state == 'available': - break - time.sleep(1) - else: - self.assert_(False) # wasn't available within 10 seconds - self.assert_(image.type == 'machine') - - for i in xrange(10): - kernel = self.conn.get_image(self.data['kernel_id']) - if kernel and kernel.state == 'available': - break - time.sleep(1) - else: - self.assert_(False) # wasn't available within 10 seconds - self.assert_(kernel.type == 'kernel') - - def test_008_can_describe_image_attribute(self): - attrs = self.conn.get_image_attribute(self.data['image_id'], - 'launchPermission') - self.assert_(attrs.name, 'launch_permission') - - def test_009_can_modify_image_launch_permission(self): - self.conn.modify_image_attribute(image_id=self.data['image_id'], - operation='add', - attribute='launchPermission', - groups='all') - image = self.conn.get_image(self.data['image_id']) - self.assertEqual(image.id, self.data['image_id']) - - def test_010_can_see_launch_permission(self): - attrs = self.conn.get_image_attribute(self.data['image_id'], - 'launchPermission') - self.assert_(attrs.name, 'launch_permission') - self.assert_(attrs.attrs['groups'][0], 'all') - - def test_011_user_can_deregister_kernel(self): - self.assertTrue(self.conn.deregister_image(self.data['kernel_id'])) - - def test_012_can_deregister_image(self): - self.assertTrue(self.conn.deregister_image(self.data['image_id'])) - - def test_013_can_delete_bundle(self): - self.assertTrue(self.delete_bundle_bucket(TEST_BUCKET)) - - -class InstanceTests(UserSmokeTestCase): - def test_001_can_create_keypair(self): - key = self.create_key_pair(self.conn, TEST_KEY) - self.assertEqual(key.name, TEST_KEY) - - def test_002_can_create_instance_with_keypair(self): - reservation = self.conn.run_instances(FLAGS.test_image, - key_name=TEST_KEY, - instance_type='m1.tiny') - self.assertEqual(len(reservation.instances), 1) - self.data['instance'] = reservation.instances[0] - - def test_003_instance_runs_within_60_seconds(self): - instance = self.data['instance'] - # allow 60 seconds to exit pending with IP - if not self.wait_for_running(self.data['instance']): - self.fail('instance failed to start') - self.data['instance'].update() - ip = self.data['instance'].private_dns_name - self.failIf(ip == '0.0.0.0') - if FLAGS.use_ipv6: - ipv6 = self.data['instance'].dns_name_v6 - self.failIf(ipv6 is None) - - def test_004_can_ping_private_ip(self): - if not self.wait_for_ping(self.data['instance'].private_dns_name): - self.fail('could not ping instance') - - if FLAGS.use_ipv6: - if not self.wait_for_ping(self.data['instance'].ip_v6, "ping6"): - self.fail('could not ping instance v6') - - def test_005_can_ssh_to_private_ip(self): - if not self.wait_for_ssh(self.data['instance'].private_dns_name, - TEST_KEY): - self.fail('could not ssh to instance') - - if FLAGS.use_ipv6: - if not self.wait_for_ssh(self.data['instance'].ip_v6, - TEST_KEY): - self.fail('could not ssh to instance v6') - - def test_006_can_allocate_elastic_ip(self): - result = self.conn.allocate_address() - self.assertTrue(hasattr(result, 'public_ip')) - self.data['public_ip'] = result.public_ip - - def test_007_can_associate_ip_with_instance(self): - result = self.conn.associate_address(self.data['instance'].id, - self.data['public_ip']) - self.assertTrue(result) - - def test_008_can_ssh_with_public_ip(self): - if not self.wait_for_ssh(self.data['public_ip'], TEST_KEY): - self.fail('could not ssh to public ip') - - def test_009_can_disassociate_ip_from_instance(self): - result = self.conn.disassociate_address(self.data['public_ip']) - self.assertTrue(result) - - def test_010_can_deallocate_elastic_ip(self): - result = self.conn.release_address(self.data['public_ip']) - self.assertTrue(result) - - def test_999_tearDown(self): - self.delete_key_pair(self.conn, TEST_KEY) - self.conn.terminate_instances([self.data['instance'].id]) - - -class VolumeTests(UserSmokeTestCase): - def setUp(self): - super(VolumeTests, self).setUp() - self.device = '/dev/vdb' - - def test_000_setUp(self): - self.create_key_pair(self.conn, TEST_KEY) - reservation = self.conn.run_instances(FLAGS.test_image, - instance_type='m1.tiny', - key_name=TEST_KEY) - self.data['instance'] = reservation.instances[0] - if not self.wait_for_running(self.data['instance']): - self.fail('instance failed to start') - self.data['instance'].update() - if not self.wait_for_ping(self.data['instance'].private_dns_name): - self.fail('could not ping instance') - if not self.wait_for_ssh(self.data['instance'].private_dns_name, - TEST_KEY): - self.fail('could not ssh to instance') - - def test_001_can_create_volume(self): - volume = self.conn.create_volume(1, 'nova') - self.assertEqual(volume.size, 1) - self.data['volume'] = volume - # Give network time to find volume. - time.sleep(10) - - def test_002_can_attach_volume(self): - volume = self.data['volume'] - - for x in xrange(10): - volume.update() - if volume.status.startswith('available'): - break - time.sleep(1) - else: - self.fail('cannot attach volume with state %s' % volume.status) - - volume.attach(self.data['instance'].id, self.device) - - # wait - for x in xrange(10): - volume.update() - if volume.status.startswith('in-use'): - break - time.sleep(1) - else: - self.fail('volume never got to in use') - - self.assertTrue(volume.status.startswith('in-use')) - - # Give instance time to recognize volume. - time.sleep(10) - - def test_003_can_mount_volume(self): - ip = self.data['instance'].private_dns_name - conn = self.connect_ssh(ip, TEST_KEY) - # NOTE(vish): this will create an dev for images that don't have - # udev rules - stdin, stdout, stderr = conn.exec_command( - 'grep %s /proc/partitions | ' - '`awk \'{print "mknod /dev/"\\$4" b "\\$1" "\\$2}\'`' - % self.device.rpartition('/')[2]) - exec_list = [] - exec_list.append('mkdir -p /mnt/vol') - exec_list.append('/sbin/mke2fs %s' % self.device) - exec_list.append('mount %s /mnt/vol' % self.device) - exec_list.append('echo success') - stdin, stdout, stderr = conn.exec_command(' && '.join(exec_list)) - out = stdout.read() - conn.close() - if not out.strip().endswith('success'): - self.fail('Unable to mount: %s %s' % (out, stderr.read())) - - def test_004_can_write_to_volume(self): - ip = self.data['instance'].private_dns_name - conn = self.connect_ssh(ip, TEST_KEY) - # FIXME(devcamcar): This doesn't fail if the volume hasn't been mounted - stdin, stdout, stderr = conn.exec_command( - 'echo hello > /mnt/vol/test.txt') - err = stderr.read() - conn.close() - if len(err) > 0: - self.fail('Unable to write to mount: %s' % (err)) - - def test_005_volume_is_correct_size(self): - ip = self.data['instance'].private_dns_name - conn = self.connect_ssh(ip, TEST_KEY) - stdin, stdout, stderr = conn.exec_command( - "df -h | grep %s | awk {'print $2'}" % self.device) - out = stdout.read() - conn.close() - if not out.strip() == '1007.9M': - self.fail('Volume is not the right size: %s %s' % - (out, stderr.read())) - - def test_006_me_can_umount_volume(self): - ip = self.data['instance'].private_dns_name - conn = self.connect_ssh(ip, TEST_KEY) - stdin, stdout, stderr = conn.exec_command('umount /mnt/vol') - err = stderr.read() - conn.close() - if len(err) > 0: - self.fail('Unable to unmount: %s' % (err)) - - def test_007_me_can_detach_volume(self): - result = self.conn.detach_volume(volume_id=self.data['volume'].id) - self.assertTrue(result) - time.sleep(5) - - def test_008_me_can_delete_volume(self): - result = self.conn.delete_volume(self.data['volume'].id) - self.assertTrue(result) - - def test_999_tearDown(self): - self.conn.terminate_instances([self.data['instance'].id]) - self.conn.delete_key_pair(TEST_KEY) - - -class SecurityGroupTests(UserSmokeTestCase): - - def __public_instance_is_accessible(self): - id_url = "latest/meta-data/instance-id" - options = "-s --max-time 1" - command = "curl %s %s/%s" % (options, self.data['public_ip'], id_url) - instance_id = commands.getoutput(command).strip() - if not instance_id: - return False - if instance_id != self.data['instance_id']: - raise Exception("Wrong instance id") - return True - - def test_001_can_create_security_group(self): - self.conn.create_security_group(TEST_GROUP, description='test') - - groups = self.conn.get_all_security_groups() - self.assertTrue(TEST_GROUP in [group.name for group in groups]) - - def test_002_can_launch_instance_in_security_group(self): - self.create_key_pair(self.conn, TEST_KEY) - reservation = self.conn.run_instances(FLAGS.test_image, - key_name=TEST_KEY, - security_groups=[TEST_GROUP], - instance_type='m1.tiny') - - self.data['instance_id'] = reservation.instances[0].id - - def test_003_can_authorize_security_group_ingress(self): - self.assertTrue(self.conn.authorize_security_group(TEST_GROUP, - ip_protocol='tcp', - from_port=80, - to_port=80)) - - def test_004_can_access_instance_over_public_ip(self): - result = self.conn.allocate_address() - self.assertTrue(hasattr(result, 'public_ip')) - self.data['public_ip'] = result.public_ip - - result = self.conn.associate_address(self.data['instance_id'], - self.data['public_ip']) - start_time = time.time() - while not self.__public_instance_is_accessible(): - # 1 minute to launch - if time.time() - start_time > 60: - raise Exception("Timeout") - time.sleep(1) - - def test_005_can_revoke_security_group_ingress(self): - self.assertTrue(self.conn.revoke_security_group(TEST_GROUP, - ip_protocol='tcp', - from_port=80, - to_port=80)) - start_time = time.time() - while self.__public_instance_is_accessible(): - # 1 minute to teardown - if time.time() - start_time > 60: - raise Exception("Timeout") - time.sleep(1) - - def test_999_tearDown(self): - self.conn.delete_key_pair(TEST_KEY) - self.conn.delete_security_group(TEST_GROUP) - groups = self.conn.get_all_security_groups() - self.assertFalse(TEST_GROUP in [group.name for group in groups]) - self.conn.terminate_instances([self.data['instance_id']]) - self.assertTrue(self.conn.release_address(self.data['public_ip'])) - - -if __name__ == "__main__": - suites = {'image': unittest.makeSuite(ImageTests), - 'instance': unittest.makeSuite(InstanceTests), - #'security_group': unittest.makeSuite(SecurityGroupTests), - 'volume': unittest.makeSuite(VolumeTests) - } - sys.exit(base.run_tests(suites)) -- cgit From ef37833e6f45f99b1d16143d29685974a191c387 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 22 Feb 2011 18:04:32 -0800 Subject: add netadmin smoketests --- smoketests/netadmin_smoketests.py | 194 ++++++++++++++++++++++++++++++++++++++ smoketests/proxy.sh | 22 +++++ 2 files changed, 216 insertions(+) create mode 100644 smoketests/netadmin_smoketests.py create mode 100755 smoketests/proxy.sh diff --git a/smoketests/netadmin_smoketests.py b/smoketests/netadmin_smoketests.py new file mode 100644 index 000000000..38beb8fdc --- /dev/null +++ b/smoketests/netadmin_smoketests.py @@ -0,0 +1,194 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import commands +import os +import random +import sys +import time +import unittest + +# If ../nova/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): + sys.path.insert(0, possible_topdir) + +from smoketests import flags +from smoketests import base + + +FLAGS = flags.FLAGS + +TEST_PREFIX = 'test%s' % int(random.random() * 1000000) +TEST_BUCKET = '%s_bucket' % TEST_PREFIX +TEST_KEY = '%s_key' % TEST_PREFIX +TEST_GROUP = '%s_group' % TEST_PREFIX + + +class AddressTests(base.UserSmokeTestCase): + def test_000_setUp(self): + self.create_key_pair(self.conn, TEST_KEY) + reservation = self.conn.run_instances(FLAGS.test_image, + instance_type='m1.tiny', + key_name=TEST_KEY) + self.data['instance'] = reservation.instances[0] + if not self.wait_for_running(self.data['instance']): + self.fail('instance failed to start') + self.data['instance'].update() + if not self.wait_for_ping(self.data['instance'].private_dns_name): + self.fail('could not ping instance') + if not self.wait_for_ssh(self.data['instance'].private_dns_name, + TEST_KEY): + self.fail('could not ssh to instance') + + def test_001_can_allocate_floating_ip(self): + result = self.conn.allocate_address() + self.assertTrue(hasattr(result, 'public_ip')) + self.data['public_ip'] = result.public_ip + + def test_002_can_associate_ip_with_instance(self): + result = self.conn.associate_address(self.data['instance'].id, + self.data['public_ip']) + self.assertTrue(result) + + def test_003_can_ssh_with_public_ip(self): + ssh_authorized = False + groups = self.conn.get_all_security_groups(['default']) + for rule in groups[0].rules: + if (rule.ip_protocol == 'tcp' and + rule.from_port <= 22 and rule.to_port >= 22): + ssh_authorized = True + if not ssh_authorized: + self.conn.authorize_security_group('default', + ip_protocol='tcp', + from_port=22, + to_port=22) + try: + if not self.wait_for_ssh(self.data['public_ip'], TEST_KEY): + self.fail('could not ssh to public ip') + finally: + if not ssh_authorized: + self.conn.revoke_security_group('default', + ip_protocol='tcp', + from_port=22, + to_port=22) + + def test_004_can_disassociate_ip_from_instance(self): + result = self.conn.disassociate_address(self.data['public_ip']) + self.assertTrue(result) + + def test_005_can_deallocate_floating_ip(self): + result = self.conn.release_address(self.data['public_ip']) + self.assertTrue(result) + + def test_999_tearDown(self): + self.delete_key_pair(self.conn, TEST_KEY) + self.conn.terminate_instances([self.data['instance'].id]) + + +class SecurityGroupTests(base.UserSmokeTestCase): + + def __public_instance_is_accessible(self): + id_url = "latest/meta-data/instance-id" + options = "-s --max-time 1" + command = "curl %s %s/%s" % (options, self.data['public_ip'], id_url) + instance_id = commands.getoutput(command).strip() + if not instance_id: + return False + if instance_id != self.data['instance'].id: + raise Exception("Wrong instance id") + return True + + def test_001_can_create_security_group(self): + self.conn.create_security_group(TEST_GROUP, description='test') + + groups = self.conn.get_all_security_groups() + self.assertTrue(TEST_GROUP in [group.name for group in groups]) + + def test_002_can_launch_instance_in_security_group(self): + with open("proxy.sh") as f: + user_data = f.read() + self.create_key_pair(self.conn, TEST_KEY) + reservation = self.conn.run_instances(FLAGS.test_image, + key_name=TEST_KEY, + security_groups=[TEST_GROUP], + user_data=user_data, + instance_type='m1.tiny') + + self.data['instance'] = reservation.instances[0] + if not self.wait_for_running(self.data['instance']): + self.fail('instance failed to start') + self.data['instance'].update() + if not self.wait_for_ping(self.data['instance'].private_dns_name): + self.fail('could not ping instance') + if not self.wait_for_ssh(self.data['instance'].private_dns_name, + TEST_KEY): + self.fail('could not ssh to instance') + + def test_003_can_authorize_security_group_ingress(self): + self.assertTrue(self.conn.authorize_security_group(TEST_GROUP, + ip_protocol='tcp', + from_port=80, + to_port=80)) + + def test_004_can_access_metadata_over_public_ip(self): + result = self.conn.allocate_address() + self.assertTrue(hasattr(result, 'public_ip')) + self.data['public_ip'] = result.public_ip + + result = self.conn.associate_address(self.data['instance'].id, + self.data['public_ip']) + start_time = time.time() + try: + while not self.__public_instance_is_accessible(): + # 1 minute to launch + if time.time() - start_time > 60: + raise Exception("Timeout") + time.sleep(1) + finally: + result = self.conn.disassociate_address(self.data['public_ip']) + + def test_005_can_revoke_security_group_ingress(self): + self.assertTrue(self.conn.revoke_security_group(TEST_GROUP, + ip_protocol='tcp', + from_port=80, + to_port=80)) + start_time = time.time() + while self.__public_instance_is_accessible(): + # 1 minute to teardown + if time.time() - start_time > 60: + raise Exception("Timeout") + time.sleep(1) + + def test_999_tearDown(self): + self.conn.delete_key_pair(TEST_KEY) + self.conn.delete_security_group(TEST_GROUP) + groups = self.conn.get_all_security_groups() + self.assertFalse(TEST_GROUP in [group.name for group in groups]) + self.conn.terminate_instances([self.data['instance'].id]) + self.assertTrue(self.conn.release_address(self.data['public_ip'])) + + +if __name__ == "__main__": + suites = {'address': unittest.makeSuite(AddressTests), + 'security_group': unittest.makeSuite(SecurityGroupTests) + } + sys.exit(base.run_tests(suites)) diff --git a/smoketests/proxy.sh b/smoketests/proxy.sh new file mode 100755 index 000000000..9b3f3108a --- /dev/null +++ b/smoketests/proxy.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +# This is a simple shell script that uses netcat to set up a proxy to the +# metadata server on port 80 and to a google ip on port 8080. This is meant +# to be passed in by a script to an instance via user data, so that +# automatic testing of network connectivity can be performed. + +# Example usage: +# euca-run-instances -t m1.tiny -f proxy.sh ami-tty + +mkfifo backpipe1 +mkfifo backpipe2 + +# NOTE(vish): proxy metadata on port 80 +while true; do + nc -l -p 80 0backpipe1 +done & + +# NOTE(vish): proxy google on port 8080 +while true; do + nc -l -p 8080 0backpipe2 +done & -- cgit From 9f169fdef93898097e33b5e1c0318f543ced672e Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Tue, 22 Feb 2011 18:41:41 -0800 Subject: Reverted change to focus on the core bug - kernel_id and ramdisk_id are optional --- nova/api/openstack/servers.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 11a84687d..41b05cbb4 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -144,11 +144,13 @@ class Controller(wsgi.Controller): metadata stored in Glance as 'image_properties' """ def lookup(param): - properties = image.get('properties') - if properties: - return properties.get(param) - else: - return image.get(param) + _image_id = image_id + try: + return image['properties'][param] + except KeyError: + LOG.debug( + _("%(param)s property not found for image %(_image_id)s") % + locals()) image_id = str(image_id) image = self._image_service.show(req.environ['nova.context'], image_id) -- cgit From 3ef3dfc2f6c8b9cc14119793df4990432ff74ea2 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Tue, 22 Feb 2011 18:42:23 -0800 Subject: Return null if no kernel_id / ramdisk_id --- nova/api/openstack/servers.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 41b05cbb4..d83bd34ab 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -151,6 +151,7 @@ class Controller(wsgi.Controller): LOG.debug( _("%(param)s property not found for image %(_image_id)s") % locals()) + return None image_id = str(image_id) image = self._image_service.show(req.environ['nova.context'], image_id) -- cgit From 409ee5ff22bbd62d94a7afb1df1e6b7353c95d83 Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Tue, 22 Feb 2011 23:42:49 -0500 Subject: Update the admin client to deal with VPNs and have a function host list. --- nova/adminclient.py | 63 ++++++++++++++++++++++++---- nova/api/ec2/admin.py | 111 ++++++++++++++++++++++++++++++++++++++++++++------ 2 files changed, 152 insertions(+), 22 deletions(-) diff --git a/nova/adminclient.py b/nova/adminclient.py index c614b274c..fe2aca351 100644 --- a/nova/adminclient.py +++ b/nova/adminclient.py @@ -23,6 +23,8 @@ import base64 import boto import boto.exception import httplib +import re +import string from boto.ec2.regioninfo import RegionInfo @@ -165,19 +167,20 @@ class HostInfo(object): **Fields Include** - * Disk stats - * Running Instances - * Memory stats - * CPU stats - * Network address info - * Firewall info - * Bridge and devices - + * Hostname + * Compute service status + * Volume service status + * Instance count + * Volume count """ def __init__(self, connection=None): self.connection = connection self.hostname = None + self.compute = None + self.volume = None + self.instance_count = 0 + self.volume_count = 0 def __repr__(self): return 'Host:%s' % self.hostname @@ -188,7 +191,39 @@ class HostInfo(object): # this is needed by the sax parser, so ignore the ugly name def endElement(self, name, value, connection): - setattr(self, name, value) + fixed_name = string.lower(re.sub(r'([A-Z])', r'_\1', name)) + setattr(self, fixed_name, value) + + +class Vpn(object): + """ + Information about a Vpn, as parsed through SAX + + **Fields Include** + + * instance_id + * project_id + * public_ip + * public_port + * created_at + * internal_ip + * state + """ + + def __init__(self, connection=None): + self.connection = connection + self.instance_id = None + self.project_id = None + + def __repr__(self): + return 'Vpn:%s:%s' % (self.project_id, self.instance_id) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + fixed_name = string.lower(re.sub(r'([A-Z])', r'_\1', name)) + setattr(self, fixed_name, value) class InstanceType(object): @@ -422,6 +457,16 @@ class NovaAdminClient(object): zip = self.apiconn.get_object('GenerateX509ForUser', params, UserInfo) return zip.file + def start_vpn(self, project): + """ + Starts the vpn for a user + """ + return self.apiconn.get_object('StartVpn', {'Project': project}, Vpn) + + def get_vpns(self): + """Return a list of vpn with project name""" + return self.apiconn.get_list('DescribeVpns', {}, [('item', Vpn)]) + def get_hosts(self): return self.apiconn.get_list('DescribeHosts', {}, [('item', HostInfo)]) diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py index 735951082..e2a05fce1 100644 --- a/nova/api/ec2/admin.py +++ b/nova/api/ec2/admin.py @@ -21,14 +21,18 @@ Admin API controller, exposed through http via the api worker. """ import base64 +import datetime from nova import db from nova import exception +from nova import flags from nova import log as logging +from nova import utils from nova.auth import manager from nova.compute import instance_types +FLAGS = flags.FLAGS LOG = logging.getLogger('nova.api.ec2.admin') @@ -55,12 +59,25 @@ def project_dict(project): return {} -def host_dict(host): +def host_dict(host, compute_service, instances, volume_service, volumes, now): """Convert a host model object to a result dict""" - if host: - return host.state - else: - return {} + rv = {'hostanme': host, 'instance_count': len(instances), + 'volume_count': len(volumes)} + if compute_service: + latest = compute_service['updated_at'] or compute_service['created_at'] + delta = now - latest + if delta.seconds <= FLAGS.service_down_time: + rv['compute'] = 'up' + else: + rv['compute'] = 'down' + if volume_service: + latest = volume_service['updated_at'] or volume_service['created_at'] + delta = now - latest + if delta.seconds <= FLAGS.service_down_time: + rv['volume'] = 'up' + else: + rv['volume'] = 'down' + return rv def instance_dict(name, inst): @@ -71,6 +88,25 @@ def instance_dict(name, inst): 'flavor_id': inst['flavorid']} +def vpn_dict(project, vpn_instance): + rv = {'project_id': project.id, + 'public_ip': project.vpn_ip, + 'public_port': project.vpn_port} + if vpn_instance: + rv['instance_id'] = vpn_instance['ec2_id'] + rv['created_at'] = utils.isotime(vpn_instance['created_at']) + address = vpn_instance.get('fixed_ip', None) + if address: + rv['internal_ip'] = address['address'] + if utils.vpn_ping(project.vpn_ip, project.vpn_port): + rv['state'] = 'running' + else: + rv['state'] = 'down' + else: + rv['state'] = 'pending' + return rv + + class AdminController(object): """ API Controller for users, hosts, nodes, and workers. @@ -223,19 +259,68 @@ class AdminController(object): raise exception.ApiError(_('operation must be add or remove')) return True + def _vpn_for(self, context, project_id): + """Get the VPN instance for a project ID.""" + for instance in db.instance_get_all_by_project(context, project_id): + if (instance['image_id'] == FLAGS.vpn_image_id + and not instance['state_description'] in + ['shutting_down', 'shutdown']): + return instance + + def start_vpn(self, context, project): + instance = self._vpn_for(context, project) + if not instance: + # NOTE(vish) import delayed because of __init__.py + from nova.cloudpipe import pipelib + pipe = pipelib.CloudPipe() + try: + pipe.launch_vpn_instance(project) + except db.NoMoreNetworks: + raise exception.ApiError("Unable to claim IP for VPN instance" + ", ensure it isn't running, and try " + "again in a few minutes") + instance = self._vpn_for(context, project) + return {'instance_id': instance['ec2_id']} + + def describe_vpns(self, context): + vpns = [] + for project in manager.AuthManager().get_projects(): + instance = self._vpn_for(context, project.id) + vpns.append(vpn_dict(project, instance)) + return {'items': vpns} + # FIXME(vish): these host commands don't work yet, perhaps some of the # required data can be retrieved from service objects? - def describe_hosts(self, _context, **_kwargs): + def describe_hosts(self, context, **_kwargs): """Returns status info for all nodes. Includes: - * Disk Space - * Instance List - * RAM used - * CPU used - * DHCP servers running - * Iptables / bridges + * Hostname + * Compute (up, down, None) + * Instance count + * Volume (up, down, None) + * Volume Count """ - return {'hostSet': [host_dict(h) for h in db.host_get_all()]} + services = db.service_get_all(context) + now = datetime.datetime.utcnow() + hosts = [] + rv = [] + for host in [service['host'] for service in services]: + if not host in hosts: + hosts.append(host) + for host in hosts: + compute = [s for s in services if s['host'] == host \ + and s['binary'] == 'nova-compute'] + if compute: + compute = compute[0] + instances = db.instance_get_all_by_host(context, host) + volume = [s for s in services if s['host'] == host \ + and s['binary'] == 'nova-volume'] + if volume: + volume = volume[0] + volumes = db.volume_get_all_by_host(context, host) + rv.append(host_dict(host, compute, instances, volume, volumes, + now)) + return {'hosts': rv} def describe_host(self, _context, name, **_kwargs): """Returns status info for single node.""" -- cgit From 943b863bef09a4e2b3de36c26a3fabbcc6093411 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 22 Feb 2011 23:21:01 -0800 Subject: Lots of test fixing --- nova/api/ec2/cloud.py | 5 ++- nova/compute/api.py | 2 +- nova/db/sqlalchemy/api.py | 3 +- nova/tests/api/openstack/test_servers.py | 2 +- nova/tests/test_cloud.py | 75 ++++++++------------------------ nova/tests/test_network.py | 3 ++ nova/tests/test_scheduler.py | 3 ++ nova/tests/test_virt.py | 3 ++ nova/virt/fake.py | 4 +- 9 files changed, 36 insertions(+), 64 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 882cdcfc9..99b6d5cb6 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -529,8 +529,9 @@ class CloudController(object): def get_ajax_console(self, context, instance_id, **kwargs): ec2_id = instance_id[0] - internal_id = ec2_id_to_id(ec2_id) - return self.compute_api.get_ajax_console(context, internal_id) + instance_id = ec2_id_to_id(ec2_id) + return self.compute_api.get_ajax_console(context, + instance_id=instance_id) def describe_volumes(self, context, volume_id=None, **kwargs): if volume_id: diff --git a/nova/compute/api.py b/nova/compute/api.py index 81ea6dc53..0caadc32e 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -447,7 +447,7 @@ class API(base.Base): {'method': 'authorize_ajax_console', 'args': {'token': output['token'], 'host': output['host'], 'port': output['port']}}) - return {'url': '%s?token=%s' % (FLAGS.ajax_console_proxy_url, + return {'url': '%s/?token=%s' % (FLAGS.ajax_console_proxy_url, output['token'])} def get_console_output(self, context, instance_id): diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 2697fac73..2ab402e1c 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1046,7 +1046,8 @@ def network_create_safe(context, values): @require_admin_context def network_disassociate(context, network_id): - network_update(context, network_id, {'project_id': None}) + network_update(context, network_id, {'project_id': None, + 'host': None}) @require_admin_context diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index a7be0796e..589f3d3eb 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -84,7 +84,7 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None): "vcpus": 0, "local_gb": 0, "hostname": "", - "host": "", + "host": None, "instance_type": "", "user_data": "", "reservation_id": "", diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 1824d24bc..2c6dc5973 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -66,6 +66,7 @@ class CloudTestCase(test.TestCase): # set up services self.compute = self.start_service('compute') + self.scheduter = self.start_service('scheduler') self.network = self.start_service('network') self.manager = manager.AuthManager() @@ -73,8 +74,12 @@ class CloudTestCase(test.TestCase): self.project = self.manager.create_project('proj', 'admin', 'proj') self.context = context.RequestContext(user=self.user, project=self.project) + host = self.network.get_network_host(self.context.elevated()) def tearDown(self): + network_ref = db.project_get_network(self.context, + self.project.id) + db.network_disassociate(self.context, network_ref['id']) self.manager.delete_project(self.project) self.manager.delete_user(self.user) self.compute.kill() @@ -201,27 +206,32 @@ class CloudTestCase(test.TestCase): 'instance_type': instance_type, 'max_count': max_count} rv = self.cloud.run_instances(self.context, **kwargs) + greenthread.sleep(0.3) instance_id = rv['instancesSet'][0]['instanceId'] output = self.cloud.get_console_output(context=self.context, - instance_id=[instance_id]) + instance_id=[instance_id]) self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE OUTPUT') # TODO(soren): We need this until we can stop polling in the rpc code # for unit tests. greenthread.sleep(0.3) rv = self.cloud.terminate_instances(self.context, [instance_id]) + greenthread.sleep(0.3) def test_ajax_console(self): + image_id = FLAGS.default_image kwargs = {'image_id': image_id} - rv = yield self.cloud.run_instances(self.context, **kwargs) + rv = self.cloud.run_instances(self.context, **kwargs) instance_id = rv['instancesSet'][0]['instanceId'] - output = yield self.cloud.get_console_output(context=self.context, - instance_id=[instance_id]) - self.assertEquals(b64decode(output['output']), - 'http://fakeajaxconsole.com/?token=FAKETOKEN') + greenthread.sleep(0.3) + output = self.cloud.get_ajax_console(context=self.context, + instance_id=[instance_id]) + self.assertEquals(output['url'], + '%s/?token=FAKETOKEN' % FLAGS.ajax_console_proxy_url) # TODO(soren): We need this until we can stop polling in the rpc code # for unit tests. greenthread.sleep(0.3) - rv = yield self.cloud.terminate_instances(self.context, [instance_id]) + rv = self.cloud.terminate_instances(self.context, [instance_id]) + greenthread.sleep(0.3) def test_key_generation(self): result = self._create_key('test') @@ -297,57 +307,6 @@ class CloudTestCase(test.TestCase): db.instance_destroy(self.context, instance1['id']) db.service_destroy(self.context, comp1['id']) - def test_instance_update_state(self): - # TODO(termie): what is this code even testing? - def instance(num): - return { - 'reservation_id': 'r-1', - 'instance_id': 'i-%s' % num, - 'image_id': 'ami-%s' % num, - 'private_dns_name': '10.0.0.%s' % num, - 'dns_name': '10.0.0%s' % num, - 'ami_launch_index': str(num), - 'instance_type': 'fake', - 'availability_zone': 'fake', - 'key_name': None, - 'kernel_id': 'fake', - 'ramdisk_id': 'fake', - 'groups': ['default'], - 'product_codes': None, - 'state': 0x01, - 'user_data': ''} - rv = self.cloud._format_describe_instances(self.context) - logging.error(str(rv)) - self.assertEqual(len(rv['reservationSet']), 0) - - # simulate launch of 5 instances - # self.cloud.instances['pending'] = {} - #for i in xrange(5): - # inst = instance(i) - # self.cloud.instances['pending'][inst['instance_id']] = inst - - #rv = self.cloud._format_instances(self.admin) - #self.assert_(len(rv['reservationSet']) == 1) - #self.assert_(len(rv['reservationSet'][0]['instances_set']) == 5) - # report 4 nodes each having 1 of the instances - #for i in xrange(4): - # self.cloud.update_state('instances', - # {('node-%s' % i): {('i-%s' % i): - # instance(i)}}) - - # one instance should be pending still - #self.assert_(len(self.cloud.instances['pending'].keys()) == 1) - - # check that the reservations collapse - #rv = self.cloud._format_instances(self.admin) - #self.assert_(len(rv['reservationSet']) == 1) - #self.assert_(len(rv['reservationSet'][0]['instances_set']) == 5) - - # check that we can get metadata for each instance - #for i in xrange(4): - # data = self.cloud.get_metadata(instance(i)['private_dns_name']) - # self.assert_(data['meta-data']['ami-id'] == 'ami-%s' % i) - @staticmethod def _fake_set_image_description(ctxt, image_id, description): from nova.objectstore import handler diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index 00f9323f3..53cfea276 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -117,6 +117,9 @@ class NetworkTestCase(test.TestCase): utils.to_global_ipv6( network_ref['cidr_v6'], instance_ref['mac_address'])) + self._deallocate_address(0, address) + db.instance_destroy(context.get_admin_context(), + instance_ref['id']) def test_public_network_association(self): """Makes sure that we can allocaate a public ip""" diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index 250170072..8e4a4daf5 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -118,6 +118,7 @@ class ZoneSchedulerTestCase(test.TestCase): arg = IgnoreArg() db.service_get_all_by_topic(arg, arg).AndReturn(service_list) self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) + self.mox.StubOutWithMock(db, 'instance_create', use_mock_anything=True) rpc.cast(ctxt, 'compute.host1', {'method': 'run_instance', @@ -150,6 +151,7 @@ class SimpleDriverTestCase(test.TestCase): def tearDown(self): self.manager.delete_user(self.user) self.manager.delete_project(self.project) + super(SimpleDriverTestCase, self).tearDown() def _create_instance(self, **kwargs): """Create a test instance""" @@ -270,6 +272,7 @@ class SimpleDriverTestCase(test.TestCase): self.scheduler.driver.schedule_run_instance, self.context, instance_id) + db.instance_destroy(self.context, instance_id) for instance_id in instance_ids1: compute1.terminate_instance(self.context, instance_id) for instance_id in instance_ids2: diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 6e5a0114b..5b3247df9 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -204,6 +204,7 @@ class LibvirtConnTestCase(test.TestCase): conn = libvirt_conn.LibvirtConnection(True) uri = conn.get_uri() self.assertEquals(uri, testuri) + db.instance_destroy(user_context, instance_ref['id']) def tearDown(self): super(LibvirtConnTestCase, self).tearDown() @@ -365,6 +366,7 @@ class IptablesFirewallTestCase(test.TestCase): '--dports 80:81 -j ACCEPT' % security_group_chain \ in self.out_rules, "TCP port 80/81 acceptance rule wasn't added") + db.instance_destroy(admin_ctxt, instance_ref['id']) class NWFilterTestCase(test.TestCase): @@ -514,3 +516,4 @@ class NWFilterTestCase(test.TestCase): self.fw.apply_instance_filter(instance) _ensure_all_called() self.teardown_security_group() + db.instance_destroy(admin_ctxt, instance_ref['id']) diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 92749f38a..4346dffc1 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -319,7 +319,9 @@ class FakeConnection(object): return 'FAKE CONSOLE OUTPUT' def get_ajax_console(self, instance): - return 'http://fakeajaxconsole.com/?token=FAKETOKEN' + return {'token': 'FAKETOKEN', + 'host': 'fakeajaxconsole.com', + 'port': 6969} def get_console_pool_info(self, console_type): return {'address': '127.0.0.1', -- cgit From 015900b215805808d8cc3138b0f4deb2c0941f76 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 22 Feb 2011 23:30:52 -0800 Subject: remove unnecessary stubout --- nova/tests/test_scheduler.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index 8e4a4daf5..b6888c4d2 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -118,7 +118,6 @@ class ZoneSchedulerTestCase(test.TestCase): arg = IgnoreArg() db.service_get_all_by_topic(arg, arg).AndReturn(service_list) self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) - self.mox.StubOutWithMock(db, 'instance_create', use_mock_anything=True) rpc.cast(ctxt, 'compute.host1', {'method': 'run_instance', -- cgit From 60ed7c9c52306d08b1ad3e759e173931b0a495a8 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 23 Feb 2011 00:59:15 -0800 Subject: fix failures --- run_tests.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/run_tests.py b/run_tests.py index 4084d8d80..01a1cf8bc 100644 --- a/run_tests.py +++ b/run_tests.py @@ -181,6 +181,7 @@ class NovaTestResult(result.TextTestResult): return str(test) def addSuccess(self, test): + unittest.TestResult.addSuccess(self, test) if self.showAll: self.colorizer.write("OK", 'green') self.stream.writeln() @@ -188,7 +189,8 @@ class NovaTestResult(result.TextTestResult): self.stream.write('.') self.stream.flush() - def addFailure(self, test): + def addFailure(self, test, err): + unittest.TestResult.addFailure(self, test, err) if self.showAll: self.colorizer.write("FAIL", 'red') self.stream.writeln() -- cgit From 2bec58e35ab1f2df543e50d399433f76e98210d7 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 23 Feb 2011 01:13:41 -0800 Subject: move db creation into fixtures and clean db for each test --- nova/test.py | 17 ++--------------- nova/tests/__init__.py | 15 +++++++++++++++ nova/tests/api/openstack/__init__.py | 4 ++-- nova/tests/api/openstack/test_adminapi.py | 11 +++++------ nova/tests/api/openstack/test_api.py | 4 ++-- nova/tests/api/openstack/test_auth.py | 14 +++++++------- nova/tests/api/openstack/test_common.py | 5 +++-- nova/tests/api/openstack/test_faults.py | 4 ++-- nova/tests/api/openstack/test_flavors.py | 10 ++++------ nova/tests/api/openstack/test_images.py | 14 ++++++++++---- nova/tests/api/openstack/test_ratelimiting.py | 15 +++++++-------- nova/tests/api/openstack/test_servers.py | 10 ++++------ nova/tests/api/openstack/test_shared_ip_groups.py | 7 ++++--- nova/tests/api/openstack/test_zones.py | 10 ++++------ nova/tests/api/test_wsgi.py | 6 +++--- nova/tests/objectstore_unittest.py | 1 + nova/tests/test_direct.py | 1 + nova/tests/test_scheduler.py | 1 + nova/tests/test_virt.py | 3 ++- 19 files changed, 79 insertions(+), 73 deletions(-) diff --git a/nova/test.py b/nova/test.py index bff43b6c7..42accffa7 100644 --- a/nova/test.py +++ b/nova/test.py @@ -26,15 +26,14 @@ import datetime import unittest import mox +import shutil import stubout from nova import context from nova import db from nova import fakerabbit from nova import flags -from nova import log as logging from nova import rpc -from nova.network import manager as network_manager FLAGS = flags.FLAGS @@ -64,15 +63,7 @@ class TestCase(unittest.TestCase): # now that we have some required db setup for the system # to work properly. self.start = datetime.datetime.utcnow() - ctxt = context.get_admin_context() - if db.network_count(ctxt) != 5: - network_manager.VlanManager().create_networks(ctxt, - FLAGS.fixed_range, - 5, 16, - FLAGS.fixed_range_v6, - FLAGS.vlan_start, - FLAGS.vpn_start, - ) + shutil.copyfile("clean.sqlite", "tests.sqlite") # emulate some of the mox stuff, we can't use the metaclass # because it screws with our generators @@ -93,9 +84,6 @@ class TestCase(unittest.TestCase): self.mox.VerifyAll() # NOTE(vish): Clean up any ips associated during the test. ctxt = context.get_admin_context() - db.fixed_ip_disassociate_all_by_timeout(ctxt, FLAGS.host, - self.start) - db.network_disassociate_all(ctxt) rpc.Consumer.attach_to_eventlet = self.originalAttach for x in self.injected: try: @@ -106,7 +94,6 @@ class TestCase(unittest.TestCase): if FLAGS.fake_rabbit: fakerabbit.reset_all() - db.security_group_destroy_all(ctxt) super(TestCase, self).tearDown() finally: self.reset_flags() diff --git a/nova/tests/__init__.py b/nova/tests/__init__.py index 592d5bea9..5472bdaf2 100644 --- a/nova/tests/__init__.py +++ b/nova/tests/__init__.py @@ -37,5 +37,20 @@ setattr(__builtin__, '_', lambda x: x) def setup(): + import shutil + from nova import context + from nova import flags from nova.db import migration + from nova.network import manager as network_manager + from nova.tests import fake_flags + FLAGS = flags.FLAGS migration.db_sync() + ctxt = context.get_admin_context() + network_manager.VlanManager().create_networks(ctxt, + FLAGS.fixed_range, + 5, 16, + FLAGS.fixed_range_v6, + FLAGS.vlan_start, + FLAGS.vpn_start, + ) + shutil.copyfile("tests.sqlite", "clean.sqlite") diff --git a/nova/tests/api/openstack/__init__.py b/nova/tests/api/openstack/__init__.py index 77b1dd37f..e18120285 100644 --- a/nova/tests/api/openstack/__init__.py +++ b/nova/tests/api/openstack/__init__.py @@ -16,7 +16,7 @@ # under the License. import webob.dec -import unittest +from nova import test from nova import context from nova import flags @@ -33,7 +33,7 @@ def simple_wsgi(req): return "" -class RateLimitingMiddlewareTest(unittest.TestCase): +class RateLimitingMiddlewareTest(test.TestCase): def test_get_action_name(self): middleware = RateLimitingMiddleware(simple_wsgi) diff --git a/nova/tests/api/openstack/test_adminapi.py b/nova/tests/api/openstack/test_adminapi.py index 73120c31d..dfce1b127 100644 --- a/nova/tests/api/openstack/test_adminapi.py +++ b/nova/tests/api/openstack/test_adminapi.py @@ -15,13 +15,13 @@ # License for the specific language governing permissions and limitations # under the License. -import unittest import stubout import webob from paste import urlmap from nova import flags +from nova import test from nova.api import openstack from nova.api.openstack import ratelimiting from nova.api.openstack import auth @@ -30,9 +30,10 @@ from nova.tests.api.openstack import fakes FLAGS = flags.FLAGS -class AdminAPITest(unittest.TestCase): +class AdminAPITest(test.TestCase): def setUp(self): + super(AdminAPITest, self).setUp() self.stubs = stubout.StubOutForTesting() fakes.FakeAuthManager.auth_data = {} fakes.FakeAuthDatabase.data = {} @@ -44,6 +45,7 @@ class AdminAPITest(unittest.TestCase): def tearDown(self): self.stubs.UnsetAll() FLAGS.allow_admin_api = self.allow_admin + super(AdminAPITest, self).tearDown() def test_admin_enabled(self): FLAGS.allow_admin_api = True @@ -58,8 +60,5 @@ class AdminAPITest(unittest.TestCase): # We should still be able to access public operations. req = webob.Request.blank('/v1.0/flavors') res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 200) # TODO: Confirm admin operations are unavailable. - -if __name__ == '__main__': - unittest.main() + self.assertEqual(res.status_int, 200) diff --git a/nova/tests/api/openstack/test_api.py b/nova/tests/api/openstack/test_api.py index db0fe1060..5112c486f 100644 --- a/nova/tests/api/openstack/test_api.py +++ b/nova/tests/api/openstack/test_api.py @@ -15,17 +15,17 @@ # License for the specific language governing permissions and limitations # under the License. -import unittest import webob.exc import webob.dec from webob import Request +from nova import test from nova.api import openstack from nova.api.openstack import faults -class APITest(unittest.TestCase): +class APITest(test.TestCase): def _wsgi_app(self, inner_app): # simpler version of the app than fakes.wsgi_app diff --git a/nova/tests/api/openstack/test_auth.py b/nova/tests/api/openstack/test_auth.py index 0dd65d321..13f6c3a1c 100644 --- a/nova/tests/api/openstack/test_auth.py +++ b/nova/tests/api/openstack/test_auth.py @@ -16,7 +16,6 @@ # under the License. import datetime -import unittest import stubout import webob @@ -27,12 +26,14 @@ import nova.api.openstack.auth import nova.auth.manager from nova import auth from nova import context +from nova import test from nova.tests.api.openstack import fakes -class Test(unittest.TestCase): +class Test(test.TestCase): def setUp(self): + super(Test, self).setUp() self.stubs = stubout.StubOutForTesting() self.stubs.Set(nova.api.openstack.auth.AuthMiddleware, '__init__', fakes.fake_auth_init) @@ -45,6 +46,7 @@ class Test(unittest.TestCase): def tearDown(self): self.stubs.UnsetAll() fakes.fake_data_store = {} + super(Test, self).tearDown() def test_authorize_user(self): f = fakes.FakeAuthManager() @@ -128,8 +130,9 @@ class Test(unittest.TestCase): self.assertEqual(result.status, '401 Unauthorized') -class TestLimiter(unittest.TestCase): +class TestLimiter(test.TestCase): def setUp(self): + super(TestLimiter, self).setUp() self.stubs = stubout.StubOutForTesting() self.stubs.Set(nova.api.openstack.auth.AuthMiddleware, '__init__', fakes.fake_auth_init) @@ -141,6 +144,7 @@ class TestLimiter(unittest.TestCase): def tearDown(self): self.stubs.UnsetAll() fakes.fake_data_store = {} + super(TestLimiter, self).tearDown() def test_authorize_token(self): f = fakes.FakeAuthManager() @@ -161,7 +165,3 @@ class TestLimiter(unittest.TestCase): result = req.get_response(fakes.wsgi_app()) self.assertEqual(result.status, '200 OK') self.assertEqual(result.headers['X-Test-Success'], 'True') - - -if __name__ == '__main__': - unittest.main() diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py index 9d9837cc9..59d850157 100644 --- a/nova/tests/api/openstack/test_common.py +++ b/nova/tests/api/openstack/test_common.py @@ -19,14 +19,14 @@ Test suites for 'common' code used throughout the OpenStack HTTP API. """ -import unittest from webob import Request +from nova import test from nova.api.openstack.common import limited -class LimiterTest(unittest.TestCase): +class LimiterTest(test.TestCase): """ Unit tests for the `nova.api.openstack.common.limited` method which takes in a list of items and, depending on the 'offset' and 'limit' GET params, @@ -37,6 +37,7 @@ class LimiterTest(unittest.TestCase): """ Run before each test. """ + super(LimiterTest, self).setUp() self.tiny = range(1) self.small = range(10) self.medium = range(1000) diff --git a/nova/tests/api/openstack/test_faults.py b/nova/tests/api/openstack/test_faults.py index fda2b5ede..7667753f4 100644 --- a/nova/tests/api/openstack/test_faults.py +++ b/nova/tests/api/openstack/test_faults.py @@ -15,15 +15,15 @@ # License for the specific language governing permissions and limitations # under the License. -import unittest import webob import webob.dec import webob.exc +from nova import test from nova.api.openstack import faults -class TestFaults(unittest.TestCase): +class TestFaults(test.TestCase): def test_fault_parts(self): req = webob.Request.blank('/.xml') diff --git a/nova/tests/api/openstack/test_flavors.py b/nova/tests/api/openstack/test_flavors.py index 1bdaea161..761265965 100644 --- a/nova/tests/api/openstack/test_flavors.py +++ b/nova/tests/api/openstack/test_flavors.py @@ -15,18 +15,18 @@ # License for the specific language governing permissions and limitations # under the License. -import unittest - import stubout import webob +from nova import test import nova.api from nova.api.openstack import flavors from nova.tests.api.openstack import fakes -class FlavorsTest(unittest.TestCase): +class FlavorsTest(test.TestCase): def setUp(self): + super(FlavorsTest, self).setUp() self.stubs = stubout.StubOutForTesting() fakes.FakeAuthManager.auth_data = {} fakes.FakeAuthDatabase.data = {} @@ -36,6 +36,7 @@ class FlavorsTest(unittest.TestCase): def tearDown(self): self.stubs.UnsetAll() + super(FlavorsTest, self).tearDown() def test_get_flavor_list(self): req = webob.Request.blank('/v1.0/flavors') @@ -43,6 +44,3 @@ class FlavorsTest(unittest.TestCase): def test_get_flavor_by_id(self): pass - -if __name__ == '__main__': - unittest.main() diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 8ab4d7569..e232bc3d5 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -22,7 +22,6 @@ and as a WSGI layer import json import datetime -import unittest import stubout import webob @@ -30,6 +29,7 @@ import webob from nova import context from nova import exception from nova import flags +from nova import test from nova import utils import nova.api.openstack from nova.api.openstack import images @@ -130,12 +130,13 @@ class BaseImageServiceTests(object): self.assertEquals(1, num_images) -class LocalImageServiceTest(unittest.TestCase, +class LocalImageServiceTest(test.TestCase, BaseImageServiceTests): """Tests the local image service""" def setUp(self): + super(LocalImageServiceTest, self).setUp() self.stubs = stubout.StubOutForTesting() service_class = 'nova.image.local.LocalImageService' self.service = utils.import_object(service_class) @@ -145,14 +146,16 @@ class LocalImageServiceTest(unittest.TestCase, self.service.delete_all() self.service.delete_imagedir() self.stubs.UnsetAll() + super(LocalImageServiceTest, self).tearDown() -class GlanceImageServiceTest(unittest.TestCase, +class GlanceImageServiceTest(test.TestCase, BaseImageServiceTests): """Tests the local image service""" def setUp(self): + super(GlanceImageServiceTest, self).setUp() self.stubs = stubout.StubOutForTesting() fakes.stub_out_glance(self.stubs) fakes.stub_out_compute_api_snapshot(self.stubs) @@ -163,9 +166,10 @@ class GlanceImageServiceTest(unittest.TestCase, def tearDown(self): self.stubs.UnsetAll() + super(GlanceImageServiceTest, self).tearDown() -class ImageControllerWithGlanceServiceTest(unittest.TestCase): +class ImageControllerWithGlanceServiceTest(test.TestCase): """Test of the OpenStack API /images application controller""" @@ -194,6 +198,7 @@ class ImageControllerWithGlanceServiceTest(unittest.TestCase): 'image_type': 'ramdisk'}] def setUp(self): + super(ImageControllerWithGlanceServiceTest, self).setUp() self.orig_image_service = FLAGS.image_service FLAGS.image_service = 'nova.image.glance.GlanceImageService' self.stubs = stubout.StubOutForTesting() @@ -208,6 +213,7 @@ class ImageControllerWithGlanceServiceTest(unittest.TestCase): def tearDown(self): self.stubs.UnsetAll() FLAGS.image_service = self.orig_image_service + super(ImageControllerWithGlanceServiceTest, self).tearDown() def test_get_image_index(self): req = webob.Request.blank('/v1.0/images') diff --git a/nova/tests/api/openstack/test_ratelimiting.py b/nova/tests/api/openstack/test_ratelimiting.py index 4c9d6bc23..9ae90ee20 100644 --- a/nova/tests/api/openstack/test_ratelimiting.py +++ b/nova/tests/api/openstack/test_ratelimiting.py @@ -1,15 +1,16 @@ import httplib import StringIO import time -import unittest import webob +from nova import test import nova.api.openstack.ratelimiting as ratelimiting -class LimiterTest(unittest.TestCase): +class LimiterTest(test.TestCase): def setUp(self): + super(LimiterTest, self).setUp() self.limits = { 'a': (5, ratelimiting.PER_SECOND), 'b': (5, ratelimiting.PER_MINUTE), @@ -83,9 +84,10 @@ class FakeLimiter(object): return self._delay -class WSGIAppTest(unittest.TestCase): +class WSGIAppTest(test.TestCase): def setUp(self): + super(WSGIAppTest, self).setUp() self.limiter = FakeLimiter(self) self.app = ratelimiting.WSGIApp(self.limiter) @@ -206,7 +208,7 @@ def wire_HTTPConnection_to_WSGI(host, app): httplib.HTTPConnection = HTTPConnectionDecorator(httplib.HTTPConnection) -class WSGIAppProxyTest(unittest.TestCase): +class WSGIAppProxyTest(test.TestCase): def setUp(self): """Our WSGIAppProxy is going to call across an HTTPConnection to a @@ -218,6 +220,7 @@ class WSGIAppProxyTest(unittest.TestCase): at the WSGIApp. And the limiter isn't real -- it's a fake that behaves the way we tell it to. """ + super(WSGIAppProxyTest, self).setUp() self.limiter = FakeLimiter(self) app = ratelimiting.WSGIApp(self.limiter) wire_HTTPConnection_to_WSGI('100.100.100.100:80', app) @@ -238,7 +241,3 @@ class WSGIAppProxyTest(unittest.TestCase): self.limiter.mock('murder', 'brutus', None) self.proxy.perform('stab', 'brutus') self.assertRaises(AssertionError, shouldRaise) - - -if __name__ == '__main__': - unittest.main() diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index a7be0796e..ea29dcf9b 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -17,13 +17,13 @@ import datetime import json -import unittest import stubout import webob from nova import db from nova import flags +from nova import test import nova.api.openstack from nova.api.openstack import servers import nova.db.api @@ -108,9 +108,10 @@ def fake_compute_api(cls, req, id): return True -class ServersTest(unittest.TestCase): +class ServersTest(test.TestCase): def setUp(self): + super(ServersTest, self).setUp() self.stubs = stubout.StubOutForTesting() fakes.FakeAuthManager.auth_data = {} fakes.FakeAuthDatabase.data = {} @@ -141,6 +142,7 @@ class ServersTest(unittest.TestCase): def tearDown(self): self.stubs.UnsetAll() FLAGS.allow_admin_api = self.allow_admin + super(ServersTest, self).tearDown() def test_get_server_by_id(self): req = webob.Request.blank('/v1.0/servers/1') @@ -410,7 +412,3 @@ class ServersTest(unittest.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status, '202 Accepted') self.assertEqual(self.server_delete_called, True) - - -if __name__ == "__main__": - unittest.main() diff --git a/nova/tests/api/openstack/test_shared_ip_groups.py b/nova/tests/api/openstack/test_shared_ip_groups.py index c2fc3a203..b4de2ef41 100644 --- a/nova/tests/api/openstack/test_shared_ip_groups.py +++ b/nova/tests/api/openstack/test_shared_ip_groups.py @@ -15,19 +15,20 @@ # License for the specific language governing permissions and limitations # under the License. -import unittest - import stubout +from nova import test from nova.api.openstack import shared_ip_groups -class SharedIpGroupsTest(unittest.TestCase): +class SharedIpGroupsTest(test.TestCase): def setUp(self): + super(SharedIpGroupsTest, self).setUp() self.stubs = stubout.StubOutForTesting() def tearDown(self): self.stubs.UnsetAll() + super(SharedIpGroupsTest, self).tearDown() def test_get_shared_ip_groups(self): pass diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py index df497ef1b..555b206b9 100644 --- a/nova/tests/api/openstack/test_zones.py +++ b/nova/tests/api/openstack/test_zones.py @@ -13,7 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. -import unittest import stubout import webob @@ -22,6 +21,7 @@ import json import nova.db from nova import context from nova import flags +from nova import test from nova.api.openstack import zones from nova.tests.api.openstack import fakes @@ -60,8 +60,9 @@ def zone_get_all(context): password='qwerty')] -class ZonesTest(unittest.TestCase): +class ZonesTest(test.TestCase): def setUp(self): + super(ZonesTest, self).setUp() self.stubs = stubout.StubOutForTesting() fakes.FakeAuthManager.auth_data = {} fakes.FakeAuthDatabase.data = {} @@ -81,6 +82,7 @@ class ZonesTest(unittest.TestCase): def tearDown(self): self.stubs.UnsetAll() FLAGS.allow_admin_api = self.allow_admin + super(ZonesTest, self).tearDown() def test_get_zone_list(self): req = webob.Request.blank('/v1.0/zones') @@ -134,7 +136,3 @@ class ZonesTest(unittest.TestCase): self.assertEqual(res_dict['zone']['id'], 1) self.assertEqual(res_dict['zone']['api_url'], 'http://foo.com') self.assertFalse('username' in res_dict['zone']) - - -if __name__ == '__main__': - unittest.main() diff --git a/nova/tests/api/test_wsgi.py b/nova/tests/api/test_wsgi.py index 44e2d615c..2c7852214 100644 --- a/nova/tests/api/test_wsgi.py +++ b/nova/tests/api/test_wsgi.py @@ -21,7 +21,7 @@ Test WSGI basics and provide some helper functions for other WSGI tests. """ -import unittest +from nova import test import routes import webob @@ -29,7 +29,7 @@ import webob from nova import wsgi -class Test(unittest.TestCase): +class Test(test.TestCase): def test_debug(self): @@ -92,7 +92,7 @@ class Test(unittest.TestCase): self.assertNotEqual(result.body, "123") -class SerializerTest(unittest.TestCase): +class SerializerTest(test.TestCase): def match(self, url, accept, expect): input_dict = dict(servers=dict(a=(2, 3))) diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py index da86e6e11..5a1be08eb 100644 --- a/nova/tests/objectstore_unittest.py +++ b/nova/tests/objectstore_unittest.py @@ -311,4 +311,5 @@ class S3APITestCase(test.TestCase): self.auth_manager.delete_user('admin') self.auth_manager.delete_project('admin') stop_listening = defer.maybeDeferred(self.listening_port.stopListening) + super(S3APITestCase, self).tearDown() return defer.DeferredList([stop_listening]) diff --git a/nova/tests/test_direct.py b/nova/tests/test_direct.py index 7656f5396..b6bfab534 100644 --- a/nova/tests/test_direct.py +++ b/nova/tests/test_direct.py @@ -52,6 +52,7 @@ class DirectTestCase(test.TestCase): def tearDown(self): direct.ROUTES = {} + super(DirectTestCase, self).tearDown() def test_delegated_auth(self): req = webob.Request.blank('/fake/context') diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index 9d458244b..1bad364e5 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -150,6 +150,7 @@ class SimpleDriverTestCase(test.TestCase): def tearDown(self): self.manager.delete_user(self.user) self.manager.delete_project(self.project) + super(SimpleDriverTestCase, self).tearDown() def _create_instance(self, **kwargs): """Create a test instance""" diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 6e5a0114b..7aadd65d5 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -206,9 +206,9 @@ class LibvirtConnTestCase(test.TestCase): self.assertEquals(uri, testuri) def tearDown(self): - super(LibvirtConnTestCase, self).tearDown() self.manager.delete_project(self.project) self.manager.delete_user(self.user) + super(LibvirtConnTestCase, self).tearDown() class IptablesFirewallTestCase(test.TestCase): @@ -388,6 +388,7 @@ class NWFilterTestCase(test.TestCase): def tearDown(self): self.manager.delete_project(self.project) self.manager.delete_user(self.user) + super(NWFilterTestCase, self).tearDown() def test_cidr_rule_nwfilter_xml(self): cloud_controller = cloud.CloudController() -- cgit From 3b2a8b516fd9dbd08563c709e14323d571b8efee Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 23 Feb 2011 01:52:07 -0800 Subject: speed up network tests --- nova/tests/__init__.py | 3 ++- nova/tests/fake_flags.py | 4 ++-- nova/tests/test_network.py | 8 +++----- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/nova/tests/__init__.py b/nova/tests/__init__.py index 5472bdaf2..5afd9389d 100644 --- a/nova/tests/__init__.py +++ b/nova/tests/__init__.py @@ -48,7 +48,8 @@ def setup(): ctxt = context.get_admin_context() network_manager.VlanManager().create_networks(ctxt, FLAGS.fixed_range, - 5, 16, + FLAGS.num_networks, + FLAGS.network_size, FLAGS.fixed_range_v6, FLAGS.vlan_start, FLAGS.vpn_start, diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py index 575fefff6..a8291a968 100644 --- a/nova/tests/fake_flags.py +++ b/nova/tests/fake_flags.py @@ -29,8 +29,8 @@ FLAGS.auth_driver = 'nova.auth.dbdriver.DbDriver' flags.DECLARE('network_size', 'nova.network.manager') flags.DECLARE('num_networks', 'nova.network.manager') flags.DECLARE('fake_network', 'nova.network.manager') -FLAGS.network_size = 16 -FLAGS.num_networks = 5 +FLAGS.network_size = 8 +FLAGS.num_networks = 2 FLAGS.fake_network = True flags.DECLARE('num_shelves', 'nova.volume.driver') flags.DECLARE('blades_per_shelf', 'nova.volume.driver') diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index 00f9323f3..ccb5298bd 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -42,15 +42,13 @@ class NetworkTestCase(test.TestCase): # flags in the corresponding section in nova-dhcpbridge self.flags(connection_type='fake', fake_call=True, - fake_network=True, - network_size=16, - num_networks=5) + fake_network=True) self.manager = manager.AuthManager() self.user = self.manager.create_user('netuser', 'netuser', 'netuser') self.projects = [] self.network = utils.import_object(FLAGS.network_manager) self.context = context.RequestContext(project=None, user=self.user) - for i in range(5): + for i in range(FLAGS.num_networks): name = 'project%s' % i project = self.manager.create_project(name, 'netuser', name) self.projects.append(project) @@ -192,7 +190,7 @@ class NetworkTestCase(test.TestCase): first = self._create_address(0) lease_ip(first) instance_ids = [] - for i in range(1, 5): + for i in range(1, FLAGS.num_networks): instance_ref = self._create_instance(i, mac=utils.generate_mac()) instance_ids.append(instance_ref['id']) address = self._create_address(i, instance_ref['id']) -- cgit From a9075d4edc126b95910258face7f00073449073d Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Wed, 23 Feb 2011 10:27:30 +0000 Subject: FlatManager.init_host now inhibits call to method in superclass. Floating IP methods have been redefined in FlatManager to raise NotImplementedError --- nova/network/manager.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/nova/network/manager.py b/nova/network/manager.py index c6eba225e..a7f263daa 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -322,6 +322,17 @@ class FlatManager(NetworkManager): """ timeout_fixed_ips = False + def init_host(self): + """Do any initialization that needs to be run if this is a + standalone service. + """ + #Fix for bug 723298 - do not call init_host on superclass + #Following code has been copied for NetworkManager.init_host + ctxt = context.get_admin_context() + for network in self.db.host_get_networks(ctxt, self.host): + self._on_set_network_host(ctxt, network['id']) + + def allocate_fixed_ip(self, context, instance_id, *args, **kwargs): """Gets a fixed ip from the pool.""" # TODO(vish): when this is called by compute, we can associate compute @@ -406,6 +417,22 @@ class FlatManager(NetworkManager): net['dns'] = FLAGS.flat_network_dns self.db.network_update(context, network_id, net) + def allocate_floating_ip(self, context, project_id): + #Fix for bug 723298 + raise NotImplementedError() + + def associate_floating_ip(self, context, floating_address, fixed_address): + #Fix for bug 723298 + raise NotImplementedError() + + def disassociate_floating_ip(self, context, floating_address): + #Fix for bug 723298 + raise NotImplementedError() + + def deallocate_floating_ip(self, context, floating_address): + #Fix for bug 723298 + raise NotImplementedError() + class FlatDHCPManager(FlatManager): """Flat networking with dhcp. -- cgit From 79a4c527fbb75bc563721fa23be4ea4aa97b39ee Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Wed, 23 Feb 2011 11:49:47 +0000 Subject: Fixed pep8 errors --- nova/network/manager.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index a7f263daa..1df193be0 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -332,7 +332,6 @@ class FlatManager(NetworkManager): for network in self.db.host_get_networks(ctxt, self.host): self._on_set_network_host(ctxt, network['id']) - def allocate_fixed_ip(self, context, instance_id, *args, **kwargs): """Gets a fixed ip from the pool.""" # TODO(vish): when this is called by compute, we can associate compute -- cgit From d160455b77d7e180f252f4b412e3f65d7286b51f Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 23 Feb 2011 08:45:27 -0800 Subject: allow users to omit 'nova.tests' with run_tests --- run_tests.py | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/run_tests.py b/run_tests.py index 6d96454b9..47e3ee317 100644 --- a/run_tests.py +++ b/run_tests.py @@ -17,6 +17,23 @@ # See the License for the specific language governing permissions and # limitations under the License. +"""Unittest runner for Nova. + +To run all tests + python run_tests.py + +To run a single test: + python run_tests.py test_compute:ComputeTestCase.test_run_terminate + +To run a single test module: + python run_tests.py test_compute + + or + + python run_tests.py api.test_wsgi + +""" + import gettext import os import unittest @@ -62,6 +79,15 @@ class NovaTestRunner(core.TextTestRunner): if __name__ == '__main__': logging.setup() + # If any argument looks like a test name but doesn't have "nova.tests" in + # front of it, automatically add that so we don't have to type as much + argv = [] + for x in sys.argv: + if x.startswith('test_'): + argv.append('nova.tests.%s' % x) + else: + argv.append(x) + c = config.Config(stream=sys.stdout, env=os.environ, verbosity=3, @@ -70,4 +96,4 @@ if __name__ == '__main__': runner = NovaTestRunner(stream=c.stream, verbosity=c.verbosity, config=c) - sys.exit(not core.run(config=c, testRunner=runner)) + sys.exit(not core.run(config=c, testRunner=runner, argv=argv)) -- cgit From 2bbbfc5af62db57158a8d6aa26912ba234d0296e Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 23 Feb 2011 08:46:11 -0800 Subject: dump error output directly on short import errors --- run_tests.sh | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/run_tests.sh b/run_tests.sh index 70212cc6a..4e8159e7b 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -40,7 +40,18 @@ done function run_tests { # Just run the test suites in current environment ${wrapper} rm -f nova.sqlite - ${wrapper} $NOSETESTS + ${wrapper} $NOSETESTS 2> run_tests.err.log + # If we get some short import error right away, print the error log directly + RESULT=$? + if [ "$RESULT" -ne "0" ]; + then + ERRSIZE=`wc -l run_tests.err.log | awk '{print \$1}'` + if [ "$ERRSIZE" -lt "40" ]; + then + cat run_tests.err.log + fi + fi + return $RESULT } NOSETESTS="python run_tests.py $noseargs" -- cgit From ef0dfb6809f31cfe8ca8056892fc9dcc2f00a0d7 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Wed, 23 Feb 2011 09:40:43 -0800 Subject: Changed unit test to refer to compute API, per Todd's suggestion. Avoids needing to extend our implementation of the EC2 API. --- nova/api/ec2/cloud.py | 6 +----- nova/tests/test_quota.py | 3 ++- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 5db865b02..882cdcfc9 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -783,9 +783,6 @@ class CloudController(object): def run_instances(self, context, **kwargs): max_count = int(kwargs.get('max_count', 1)) - # NOTE(justinsb): the EC2 API doesn't support metadata here, but this - # is needed for the unit tests. Maybe the unit tests shouldn't be - # calling the EC2 code instances = self.compute_api.create(context, instance_type=instance_types.get_by_type( kwargs.get('instance_type', None)), @@ -800,8 +797,7 @@ class CloudController(object): user_data=kwargs.get('user_data'), security_group=kwargs.get('security_group'), availability_zone=kwargs.get('placement', {}).get( - 'AvailabilityZone'), - metadata=kwargs.get('metadata', [])) + 'AvailabilityZone')) return self._format_run_instances(context, instances[0]['reservation_id']) diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py index 36ccc273e..1e42fddf3 100644 --- a/nova/tests/test_quota.py +++ b/nova/tests/test_quota.py @@ -16,6 +16,7 @@ # License for the specific language governing permissions and limitations # under the License. +from nova import compute from nova import context from nova import db from nova import flags @@ -168,7 +169,7 @@ class QuotaTestCase(test.TestCase): metadata = {} for i in range(FLAGS.quota_metadata_items + 1): metadata['key%s' % i] = 'value%s' % i - self.assertRaises(quota.QuotaError, self.cloud.run_instances, + self.assertRaises(quota.QuotaError, compute.API().create, self.context, min_count=1, max_count=1, -- cgit From 3c09d486f862de7069b848e8124787cfbf4247f8 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 23 Feb 2011 11:20:52 -0800 Subject: use flags for sqlite db names and fix flags in dhcpbridge --- bin/nova-dhcpbridge | 13 +------------ nova/flags.py | 3 ++- nova/test.py | 11 +++++++---- nova/tests/__init__.py | 7 ++++++- nova/tests/fake_flags.py | 2 +- run_tests.py | 11 ++++++++--- 6 files changed, 25 insertions(+), 22 deletions(-) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index 04a1771f0..3dd9de367 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -105,18 +105,7 @@ def main(): logging.setup() interface = os.environ.get('DNSMASQ_INTERFACE', 'br0') if int(os.environ.get('TESTING', '0')): - FLAGS.fake_rabbit = True - FLAGS.network_size = 16 - FLAGS.connection_type = 'fake' - FLAGS.fake_network = True - FLAGS.auth_driver = 'nova.auth.dbdriver.DbDriver' - FLAGS.num_networks = 5 - path = os.path.abspath(os.path.join(os.path.dirname(__file__), - '..', - 'nova', - 'tests', - 'tests.sqlite')) - FLAGS.sql_connection = 'sqlite:///%s' % path + from nova.tests import fake_flags action = argv[1] if action in ['add', 'del', 'old']: mac = argv[2] diff --git a/nova/flags.py b/nova/flags.py index f64a62da9..ab1adc6e3 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -285,8 +285,9 @@ DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'), DEFINE_string('logdir', None, 'output to a per-service log file in named ' 'directory') +DEFINE_string('sqlite_db', 'nova.sqlite', 'file name for sqlite') DEFINE_string('sql_connection', - 'sqlite:///$state_path/nova.sqlite', + 'sqlite:///$state_path/$sqlite_db', 'connection string for sql database') DEFINE_integer('sql_idle_timeout', 3600, diff --git a/nova/test.py b/nova/test.py index 6cbbb9e8e..9c961a987 100644 --- a/nova/test.py +++ b/nova/test.py @@ -22,12 +22,14 @@ Allows overriding of flags for use of fakes, and some black magic for inline callbacks. """ + import datetime +import os +import shutil import uuid import unittest import mox -import shutil import stubout from nova import context @@ -39,8 +41,8 @@ from nova import service FLAGS = flags.FLAGS -flags.DEFINE_bool('flush_db', True, - 'Flush the database before running fake tests') +flags.DEFINE_string('sqlite_clean_db', 'clean.sqlite', + 'File name of clean sqlite db') flags.DEFINE_bool('fake_tests', True, 'should we use everything for testing') @@ -65,7 +67,8 @@ class TestCase(unittest.TestCase): # now that we have some required db setup for the system # to work properly. self.start = datetime.datetime.utcnow() - shutil.copyfile("clean.sqlite", "tests.sqlite") + shutil.copyfile(os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db), + os.path.join(FLAGS.state_path, FLAGS.sqlite_db)) # emulate some of the mox stuff, we can't use the metaclass # because it screws with our generators diff --git a/nova/tests/__init__.py b/nova/tests/__init__.py index 5afd9389d..dbd433054 100644 --- a/nova/tests/__init__.py +++ b/nova/tests/__init__.py @@ -37,13 +37,17 @@ setattr(__builtin__, '_', lambda x: x) def setup(): + import os import shutil + from nova import context from nova import flags from nova.db import migration from nova.network import manager as network_manager from nova.tests import fake_flags + FLAGS = flags.FLAGS + migration.db_sync() ctxt = context.get_admin_context() network_manager.VlanManager().create_networks(ctxt, @@ -54,4 +58,5 @@ def setup(): FLAGS.vlan_start, FLAGS.vpn_start, ) - shutil.copyfile("tests.sqlite", "clean.sqlite") + shutil.copyfile(os.path.join(FLAGS.state_path, FLAGS.sqlite_db), + os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db)) diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py index a8291a968..dcc8a676d 100644 --- a/nova/tests/fake_flags.py +++ b/nova/tests/fake_flags.py @@ -39,6 +39,6 @@ FLAGS.num_shelves = 2 FLAGS.blades_per_shelf = 4 FLAGS.iscsi_num_targets = 8 FLAGS.verbose = True -FLAGS.sql_connection = 'sqlite:///tests.sqlite' +FLAGS.sqlite_db = "tests.sqlite" FLAGS.use_ipv6 = True FLAGS.logfile = 'tests.log' diff --git a/run_tests.py b/run_tests.py index 01a1cf8bc..88c42bd31 100644 --- a/run_tests.py +++ b/run_tests.py @@ -46,13 +46,17 @@ import unittest import sys from nose import config -from nose import result from nose import core +from nose import result +from nova import flags from nova import log as logging from nova.tests import fake_flags +FLAGS = flags.FLAGS + + class _AnsiColorizer(object): """ A colorizer is an object that loosely wraps around a stream, allowing @@ -259,10 +263,11 @@ class NovaTestRunner(core.TextTestRunner): if __name__ == '__main__': logging.setup() - testdir = os.path.abspath(os.path.join("nova","tests")) - testdb = os.path.join(testdir, "tests.sqlite") + testdb = os.path.join(FLAGS.state_path, + FLAGS.sqlite_db) if os.path.exists(testdb): os.unlink(testdb) + testdir = os.path.abspath(os.path.join("nova","tests")) c = config.Config(stream=sys.stdout, env=os.environ, verbosity=3, -- cgit From 48d4054e093a2faccbd819de8e9e02c03d28cda0 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 23 Feb 2011 11:26:38 -0800 Subject: fix for failing describe_instances test --- nova/api/ec2/cloud.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 1b96567eb..e219fb30c 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -675,7 +675,8 @@ class CloudController(object): instances = [] for ec2_id in instance_id: internal_id = ec2_id_to_id(ec2_id) - instance = self.compute_api.get(context, internal_id) + instance = self.compute_api.get(context, + instance_id=internal_id) instances.append(instance) else: instances = self.compute_api.get_all(context, **kwargs) -- cgit From 1b2d67e769ff1a6fe68a933e8b966d72588ce8ac Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 23 Feb 2011 11:44:09 -0800 Subject: merged trunk --- nova/test.py | 18 ++++-------------- nova/tests/__init__.py | 13 +++++++++++++ nova/tests/api/openstack/__init__.py | 4 ++-- nova/tests/api/openstack/test_adminapi.py | 8 ++++---- nova/tests/api/openstack/test_api.py | 4 ++-- nova/tests/api/openstack/test_auth.py | 14 +++++++------- nova/tests/api/openstack/test_common.py | 5 +++-- nova/tests/api/openstack/test_faults.py | 4 ++-- nova/tests/api/openstack/test_flavors.py | 10 ++++------ nova/tests/api/openstack/test_images.py | 14 ++++++++++---- nova/tests/api/openstack/test_ratelimiting.py | 15 +++++++-------- nova/tests/api/openstack/test_servers.py | 10 ++++------ nova/tests/api/openstack/test_shared_ip_groups.py | 7 ++++--- nova/tests/api/openstack/test_zones.py | 10 ++++------ nova/tests/api/test_wsgi.py | 6 +++--- nova/tests/objectstore_unittest.py | 1 + nova/tests/test_direct.py | 1 + nova/tests/test_scheduler.py | 1 + nova/tests/test_virt.py | 3 ++- 19 files changed, 78 insertions(+), 70 deletions(-) diff --git a/nova/test.py b/nova/test.py index bff43b6c7..826a4bd11 100644 --- a/nova/test.py +++ b/nova/test.py @@ -26,15 +26,14 @@ import datetime import unittest import mox +import shutil import stubout from nova import context from nova import db from nova import fakerabbit from nova import flags -from nova import log as logging from nova import rpc -from nova.network import manager as network_manager FLAGS = flags.FLAGS @@ -65,14 +64,8 @@ class TestCase(unittest.TestCase): # to work properly. self.start = datetime.datetime.utcnow() ctxt = context.get_admin_context() - if db.network_count(ctxt) != 5: - network_manager.VlanManager().create_networks(ctxt, - FLAGS.fixed_range, - 5, 16, - FLAGS.fixed_range_v6, - FLAGS.vlan_start, - FLAGS.vpn_start, - ) + shutil.copyfile("tests.sqlite", "clean.sqlite") + assert(db.security_group_get_all(ctxt) == []) # emulate some of the mox stuff, we can't use the metaclass # because it screws with our generators @@ -86,6 +79,7 @@ class TestCase(unittest.TestCase): def tearDown(self): """Runs after each test method to finalize/tear down test environment.""" + shutil.copyfile("clean.sqlite", "tests.sqlite") try: self.mox.UnsetStubs() self.stubs.UnsetAll() @@ -93,9 +87,6 @@ class TestCase(unittest.TestCase): self.mox.VerifyAll() # NOTE(vish): Clean up any ips associated during the test. ctxt = context.get_admin_context() - db.fixed_ip_disassociate_all_by_timeout(ctxt, FLAGS.host, - self.start) - db.network_disassociate_all(ctxt) rpc.Consumer.attach_to_eventlet = self.originalAttach for x in self.injected: try: @@ -106,7 +97,6 @@ class TestCase(unittest.TestCase): if FLAGS.fake_rabbit: fakerabbit.reset_all() - db.security_group_destroy_all(ctxt) super(TestCase, self).tearDown() finally: self.reset_flags() diff --git a/nova/tests/__init__.py b/nova/tests/__init__.py index 592d5bea9..d3ab02887 100644 --- a/nova/tests/__init__.py +++ b/nova/tests/__init__.py @@ -38,4 +38,17 @@ setattr(__builtin__, '_', lambda x: x) def setup(): from nova.db import migration + from nova.network import manager as network_manager + from nova import context + from nova import flags + from nova.tests import fake_flags + FLAGS = flags.FLAGS migration.db_sync() + ctxt = context.get_admin_context() + network_manager.VlanManager().create_networks(ctxt, + FLAGS.fixed_range, + 5, 16, + FLAGS.fixed_range_v6, + FLAGS.vlan_start, + FLAGS.vpn_start, + ) diff --git a/nova/tests/api/openstack/__init__.py b/nova/tests/api/openstack/__init__.py index 77b1dd37f..e18120285 100644 --- a/nova/tests/api/openstack/__init__.py +++ b/nova/tests/api/openstack/__init__.py @@ -16,7 +16,7 @@ # under the License. import webob.dec -import unittest +from nova import test from nova import context from nova import flags @@ -33,7 +33,7 @@ def simple_wsgi(req): return "" -class RateLimitingMiddlewareTest(unittest.TestCase): +class RateLimitingMiddlewareTest(test.TestCase): def test_get_action_name(self): middleware = RateLimitingMiddleware(simple_wsgi) diff --git a/nova/tests/api/openstack/test_adminapi.py b/nova/tests/api/openstack/test_adminapi.py index 73120c31d..125fbe973 100644 --- a/nova/tests/api/openstack/test_adminapi.py +++ b/nova/tests/api/openstack/test_adminapi.py @@ -15,13 +15,13 @@ # License for the specific language governing permissions and limitations # under the License. -import unittest import stubout import webob from paste import urlmap from nova import flags +from nova import test from nova.api import openstack from nova.api.openstack import ratelimiting from nova.api.openstack import auth @@ -30,9 +30,10 @@ from nova.tests.api.openstack import fakes FLAGS = flags.FLAGS -class AdminAPITest(unittest.TestCase): +class AdminAPITest(test.TestCase): def setUp(self): + super(AdminAPITest, self).setUp() self.stubs = stubout.StubOutForTesting() fakes.FakeAuthManager.auth_data = {} fakes.FakeAuthDatabase.data = {} @@ -44,6 +45,7 @@ class AdminAPITest(unittest.TestCase): def tearDown(self): self.stubs.UnsetAll() FLAGS.allow_admin_api = self.allow_admin + super(AdminAPITest, self).tearDown() def test_admin_enabled(self): FLAGS.allow_admin_api = True @@ -61,5 +63,3 @@ class AdminAPITest(unittest.TestCase): self.assertEqual(res.status_int, 200) # TODO: Confirm admin operations are unavailable. -if __name__ == '__main__': - unittest.main() diff --git a/nova/tests/api/openstack/test_api.py b/nova/tests/api/openstack/test_api.py index db0fe1060..5112c486f 100644 --- a/nova/tests/api/openstack/test_api.py +++ b/nova/tests/api/openstack/test_api.py @@ -15,17 +15,17 @@ # License for the specific language governing permissions and limitations # under the License. -import unittest import webob.exc import webob.dec from webob import Request +from nova import test from nova.api import openstack from nova.api.openstack import faults -class APITest(unittest.TestCase): +class APITest(test.TestCase): def _wsgi_app(self, inner_app): # simpler version of the app than fakes.wsgi_app diff --git a/nova/tests/api/openstack/test_auth.py b/nova/tests/api/openstack/test_auth.py index 0dd65d321..13f6c3a1c 100644 --- a/nova/tests/api/openstack/test_auth.py +++ b/nova/tests/api/openstack/test_auth.py @@ -16,7 +16,6 @@ # under the License. import datetime -import unittest import stubout import webob @@ -27,12 +26,14 @@ import nova.api.openstack.auth import nova.auth.manager from nova import auth from nova import context +from nova import test from nova.tests.api.openstack import fakes -class Test(unittest.TestCase): +class Test(test.TestCase): def setUp(self): + super(Test, self).setUp() self.stubs = stubout.StubOutForTesting() self.stubs.Set(nova.api.openstack.auth.AuthMiddleware, '__init__', fakes.fake_auth_init) @@ -45,6 +46,7 @@ class Test(unittest.TestCase): def tearDown(self): self.stubs.UnsetAll() fakes.fake_data_store = {} + super(Test, self).tearDown() def test_authorize_user(self): f = fakes.FakeAuthManager() @@ -128,8 +130,9 @@ class Test(unittest.TestCase): self.assertEqual(result.status, '401 Unauthorized') -class TestLimiter(unittest.TestCase): +class TestLimiter(test.TestCase): def setUp(self): + super(TestLimiter, self).setUp() self.stubs = stubout.StubOutForTesting() self.stubs.Set(nova.api.openstack.auth.AuthMiddleware, '__init__', fakes.fake_auth_init) @@ -141,6 +144,7 @@ class TestLimiter(unittest.TestCase): def tearDown(self): self.stubs.UnsetAll() fakes.fake_data_store = {} + super(TestLimiter, self).tearDown() def test_authorize_token(self): f = fakes.FakeAuthManager() @@ -161,7 +165,3 @@ class TestLimiter(unittest.TestCase): result = req.get_response(fakes.wsgi_app()) self.assertEqual(result.status, '200 OK') self.assertEqual(result.headers['X-Test-Success'], 'True') - - -if __name__ == '__main__': - unittest.main() diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py index 9d9837cc9..59d850157 100644 --- a/nova/tests/api/openstack/test_common.py +++ b/nova/tests/api/openstack/test_common.py @@ -19,14 +19,14 @@ Test suites for 'common' code used throughout the OpenStack HTTP API. """ -import unittest from webob import Request +from nova import test from nova.api.openstack.common import limited -class LimiterTest(unittest.TestCase): +class LimiterTest(test.TestCase): """ Unit tests for the `nova.api.openstack.common.limited` method which takes in a list of items and, depending on the 'offset' and 'limit' GET params, @@ -37,6 +37,7 @@ class LimiterTest(unittest.TestCase): """ Run before each test. """ + super(LimiterTest, self).setUp() self.tiny = range(1) self.small = range(10) self.medium = range(1000) diff --git a/nova/tests/api/openstack/test_faults.py b/nova/tests/api/openstack/test_faults.py index fda2b5ede..7667753f4 100644 --- a/nova/tests/api/openstack/test_faults.py +++ b/nova/tests/api/openstack/test_faults.py @@ -15,15 +15,15 @@ # License for the specific language governing permissions and limitations # under the License. -import unittest import webob import webob.dec import webob.exc +from nova import test from nova.api.openstack import faults -class TestFaults(unittest.TestCase): +class TestFaults(test.TestCase): def test_fault_parts(self): req = webob.Request.blank('/.xml') diff --git a/nova/tests/api/openstack/test_flavors.py b/nova/tests/api/openstack/test_flavors.py index 1bdaea161..761265965 100644 --- a/nova/tests/api/openstack/test_flavors.py +++ b/nova/tests/api/openstack/test_flavors.py @@ -15,18 +15,18 @@ # License for the specific language governing permissions and limitations # under the License. -import unittest - import stubout import webob +from nova import test import nova.api from nova.api.openstack import flavors from nova.tests.api.openstack import fakes -class FlavorsTest(unittest.TestCase): +class FlavorsTest(test.TestCase): def setUp(self): + super(FlavorsTest, self).setUp() self.stubs = stubout.StubOutForTesting() fakes.FakeAuthManager.auth_data = {} fakes.FakeAuthDatabase.data = {} @@ -36,6 +36,7 @@ class FlavorsTest(unittest.TestCase): def tearDown(self): self.stubs.UnsetAll() + super(FlavorsTest, self).tearDown() def test_get_flavor_list(self): req = webob.Request.blank('/v1.0/flavors') @@ -43,6 +44,3 @@ class FlavorsTest(unittest.TestCase): def test_get_flavor_by_id(self): pass - -if __name__ == '__main__': - unittest.main() diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 8ab4d7569..e232bc3d5 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -22,7 +22,6 @@ and as a WSGI layer import json import datetime -import unittest import stubout import webob @@ -30,6 +29,7 @@ import webob from nova import context from nova import exception from nova import flags +from nova import test from nova import utils import nova.api.openstack from nova.api.openstack import images @@ -130,12 +130,13 @@ class BaseImageServiceTests(object): self.assertEquals(1, num_images) -class LocalImageServiceTest(unittest.TestCase, +class LocalImageServiceTest(test.TestCase, BaseImageServiceTests): """Tests the local image service""" def setUp(self): + super(LocalImageServiceTest, self).setUp() self.stubs = stubout.StubOutForTesting() service_class = 'nova.image.local.LocalImageService' self.service = utils.import_object(service_class) @@ -145,14 +146,16 @@ class LocalImageServiceTest(unittest.TestCase, self.service.delete_all() self.service.delete_imagedir() self.stubs.UnsetAll() + super(LocalImageServiceTest, self).tearDown() -class GlanceImageServiceTest(unittest.TestCase, +class GlanceImageServiceTest(test.TestCase, BaseImageServiceTests): """Tests the local image service""" def setUp(self): + super(GlanceImageServiceTest, self).setUp() self.stubs = stubout.StubOutForTesting() fakes.stub_out_glance(self.stubs) fakes.stub_out_compute_api_snapshot(self.stubs) @@ -163,9 +166,10 @@ class GlanceImageServiceTest(unittest.TestCase, def tearDown(self): self.stubs.UnsetAll() + super(GlanceImageServiceTest, self).tearDown() -class ImageControllerWithGlanceServiceTest(unittest.TestCase): +class ImageControllerWithGlanceServiceTest(test.TestCase): """Test of the OpenStack API /images application controller""" @@ -194,6 +198,7 @@ class ImageControllerWithGlanceServiceTest(unittest.TestCase): 'image_type': 'ramdisk'}] def setUp(self): + super(ImageControllerWithGlanceServiceTest, self).setUp() self.orig_image_service = FLAGS.image_service FLAGS.image_service = 'nova.image.glance.GlanceImageService' self.stubs = stubout.StubOutForTesting() @@ -208,6 +213,7 @@ class ImageControllerWithGlanceServiceTest(unittest.TestCase): def tearDown(self): self.stubs.UnsetAll() FLAGS.image_service = self.orig_image_service + super(ImageControllerWithGlanceServiceTest, self).tearDown() def test_get_image_index(self): req = webob.Request.blank('/v1.0/images') diff --git a/nova/tests/api/openstack/test_ratelimiting.py b/nova/tests/api/openstack/test_ratelimiting.py index 4c9d6bc23..9ae90ee20 100644 --- a/nova/tests/api/openstack/test_ratelimiting.py +++ b/nova/tests/api/openstack/test_ratelimiting.py @@ -1,15 +1,16 @@ import httplib import StringIO import time -import unittest import webob +from nova import test import nova.api.openstack.ratelimiting as ratelimiting -class LimiterTest(unittest.TestCase): +class LimiterTest(test.TestCase): def setUp(self): + super(LimiterTest, self).setUp() self.limits = { 'a': (5, ratelimiting.PER_SECOND), 'b': (5, ratelimiting.PER_MINUTE), @@ -83,9 +84,10 @@ class FakeLimiter(object): return self._delay -class WSGIAppTest(unittest.TestCase): +class WSGIAppTest(test.TestCase): def setUp(self): + super(WSGIAppTest, self).setUp() self.limiter = FakeLimiter(self) self.app = ratelimiting.WSGIApp(self.limiter) @@ -206,7 +208,7 @@ def wire_HTTPConnection_to_WSGI(host, app): httplib.HTTPConnection = HTTPConnectionDecorator(httplib.HTTPConnection) -class WSGIAppProxyTest(unittest.TestCase): +class WSGIAppProxyTest(test.TestCase): def setUp(self): """Our WSGIAppProxy is going to call across an HTTPConnection to a @@ -218,6 +220,7 @@ class WSGIAppProxyTest(unittest.TestCase): at the WSGIApp. And the limiter isn't real -- it's a fake that behaves the way we tell it to. """ + super(WSGIAppProxyTest, self).setUp() self.limiter = FakeLimiter(self) app = ratelimiting.WSGIApp(self.limiter) wire_HTTPConnection_to_WSGI('100.100.100.100:80', app) @@ -238,7 +241,3 @@ class WSGIAppProxyTest(unittest.TestCase): self.limiter.mock('murder', 'brutus', None) self.proxy.perform('stab', 'brutus') self.assertRaises(AssertionError, shouldRaise) - - -if __name__ == '__main__': - unittest.main() diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index a7be0796e..ea29dcf9b 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -17,13 +17,13 @@ import datetime import json -import unittest import stubout import webob from nova import db from nova import flags +from nova import test import nova.api.openstack from nova.api.openstack import servers import nova.db.api @@ -108,9 +108,10 @@ def fake_compute_api(cls, req, id): return True -class ServersTest(unittest.TestCase): +class ServersTest(test.TestCase): def setUp(self): + super(ServersTest, self).setUp() self.stubs = stubout.StubOutForTesting() fakes.FakeAuthManager.auth_data = {} fakes.FakeAuthDatabase.data = {} @@ -141,6 +142,7 @@ class ServersTest(unittest.TestCase): def tearDown(self): self.stubs.UnsetAll() FLAGS.allow_admin_api = self.allow_admin + super(ServersTest, self).tearDown() def test_get_server_by_id(self): req = webob.Request.blank('/v1.0/servers/1') @@ -410,7 +412,3 @@ class ServersTest(unittest.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status, '202 Accepted') self.assertEqual(self.server_delete_called, True) - - -if __name__ == "__main__": - unittest.main() diff --git a/nova/tests/api/openstack/test_shared_ip_groups.py b/nova/tests/api/openstack/test_shared_ip_groups.py index c2fc3a203..b4de2ef41 100644 --- a/nova/tests/api/openstack/test_shared_ip_groups.py +++ b/nova/tests/api/openstack/test_shared_ip_groups.py @@ -15,19 +15,20 @@ # License for the specific language governing permissions and limitations # under the License. -import unittest - import stubout +from nova import test from nova.api.openstack import shared_ip_groups -class SharedIpGroupsTest(unittest.TestCase): +class SharedIpGroupsTest(test.TestCase): def setUp(self): + super(SharedIpGroupsTest, self).setUp() self.stubs = stubout.StubOutForTesting() def tearDown(self): self.stubs.UnsetAll() + super(SharedIpGroupsTest, self).tearDown() def test_get_shared_ip_groups(self): pass diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py index df497ef1b..555b206b9 100644 --- a/nova/tests/api/openstack/test_zones.py +++ b/nova/tests/api/openstack/test_zones.py @@ -13,7 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. -import unittest import stubout import webob @@ -22,6 +21,7 @@ import json import nova.db from nova import context from nova import flags +from nova import test from nova.api.openstack import zones from nova.tests.api.openstack import fakes @@ -60,8 +60,9 @@ def zone_get_all(context): password='qwerty')] -class ZonesTest(unittest.TestCase): +class ZonesTest(test.TestCase): def setUp(self): + super(ZonesTest, self).setUp() self.stubs = stubout.StubOutForTesting() fakes.FakeAuthManager.auth_data = {} fakes.FakeAuthDatabase.data = {} @@ -81,6 +82,7 @@ class ZonesTest(unittest.TestCase): def tearDown(self): self.stubs.UnsetAll() FLAGS.allow_admin_api = self.allow_admin + super(ZonesTest, self).tearDown() def test_get_zone_list(self): req = webob.Request.blank('/v1.0/zones') @@ -134,7 +136,3 @@ class ZonesTest(unittest.TestCase): self.assertEqual(res_dict['zone']['id'], 1) self.assertEqual(res_dict['zone']['api_url'], 'http://foo.com') self.assertFalse('username' in res_dict['zone']) - - -if __name__ == '__main__': - unittest.main() diff --git a/nova/tests/api/test_wsgi.py b/nova/tests/api/test_wsgi.py index 44e2d615c..2c7852214 100644 --- a/nova/tests/api/test_wsgi.py +++ b/nova/tests/api/test_wsgi.py @@ -21,7 +21,7 @@ Test WSGI basics and provide some helper functions for other WSGI tests. """ -import unittest +from nova import test import routes import webob @@ -29,7 +29,7 @@ import webob from nova import wsgi -class Test(unittest.TestCase): +class Test(test.TestCase): def test_debug(self): @@ -92,7 +92,7 @@ class Test(unittest.TestCase): self.assertNotEqual(result.body, "123") -class SerializerTest(unittest.TestCase): +class SerializerTest(test.TestCase): def match(self, url, accept, expect): input_dict = dict(servers=dict(a=(2, 3))) diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py index da86e6e11..5a1be08eb 100644 --- a/nova/tests/objectstore_unittest.py +++ b/nova/tests/objectstore_unittest.py @@ -311,4 +311,5 @@ class S3APITestCase(test.TestCase): self.auth_manager.delete_user('admin') self.auth_manager.delete_project('admin') stop_listening = defer.maybeDeferred(self.listening_port.stopListening) + super(S3APITestCase, self).tearDown() return defer.DeferredList([stop_listening]) diff --git a/nova/tests/test_direct.py b/nova/tests/test_direct.py index 7656f5396..b6bfab534 100644 --- a/nova/tests/test_direct.py +++ b/nova/tests/test_direct.py @@ -52,6 +52,7 @@ class DirectTestCase(test.TestCase): def tearDown(self): direct.ROUTES = {} + super(DirectTestCase, self).tearDown() def test_delegated_auth(self): req = webob.Request.blank('/fake/context') diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index 9d458244b..1bad364e5 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -150,6 +150,7 @@ class SimpleDriverTestCase(test.TestCase): def tearDown(self): self.manager.delete_user(self.user) self.manager.delete_project(self.project) + super(SimpleDriverTestCase, self).tearDown() def _create_instance(self, **kwargs): """Create a test instance""" diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 6e5a0114b..7aadd65d5 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -206,9 +206,9 @@ class LibvirtConnTestCase(test.TestCase): self.assertEquals(uri, testuri) def tearDown(self): - super(LibvirtConnTestCase, self).tearDown() self.manager.delete_project(self.project) self.manager.delete_user(self.user) + super(LibvirtConnTestCase, self).tearDown() class IptablesFirewallTestCase(test.TestCase): @@ -388,6 +388,7 @@ class NWFilterTestCase(test.TestCase): def tearDown(self): self.manager.delete_project(self.project) self.manager.delete_user(self.user) + super(NWFilterTestCase, self).tearDown() def test_cidr_rule_nwfilter_xml(self): cloud_controller = cloud.CloudController() -- cgit From b09534dac05a3b4c127c633d8c050bb310a27166 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 23 Feb 2011 11:52:10 -0800 Subject: put the redirection back in to run_tests.sh and fix terminal colors by using original stdout --- nova/tests/fake_flags.py | 1 - run_tests.py | 16 +++++++++------- run_tests.sh | 2 +- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py index 575fefff6..2b1919407 100644 --- a/nova/tests/fake_flags.py +++ b/nova/tests/fake_flags.py @@ -41,4 +41,3 @@ FLAGS.iscsi_num_targets = 8 FLAGS.verbose = True FLAGS.sql_connection = 'sqlite:///tests.sqlite' FLAGS.use_ipv6 = True -FLAGS.logfile = 'tests.log' diff --git a/run_tests.py b/run_tests.py index 01a1cf8bc..c78f88831 100644 --- a/run_tests.py +++ b/run_tests.py @@ -82,7 +82,7 @@ class _AnsiColorizer(object): try: return curses.tigetnum("colors") > 2 except curses.error: - curses.setupterm(fd=stream.fileno()) + curses.setupterm() return curses.tigetnum("colors") > 2 except: raise @@ -107,13 +107,13 @@ class _Win32Colorizer(object): See _AnsiColorizer docstring. """ def __init__(self, stream): - from win32console import GetStdHandle, STD_ERROR_HANDLE, \ + from win32console import GetStdHandle, STD_OUT_HANDLE, \ FOREGROUND_RED, FOREGROUND_BLUE, FOREGROUND_GREEN, \ FOREGROUND_INTENSITY red, green, blue, bold = (FOREGROUND_RED, FOREGROUND_GREEN, FOREGROUND_BLUE, FOREGROUND_INTENSITY) self.stream = stream - self.screenBuffer = GetStdHandle(STD_ERROR_HANDLE) + self.screenBuffer = GetStdHandle(STD_OUT_HANDLE) self._colors = { 'normal': red | green | blue, 'red': red | bold, @@ -129,7 +129,7 @@ class _Win32Colorizer(object): try: import win32console screenBuffer = win32console.GetStdHandle( - win32console.STD_ERROR_HANDLE) + win32console.STD_OUT_HANDLE) except ImportError: return False import pywintypes @@ -170,12 +170,14 @@ class NovaTestResult(result.TextTestResult): result.TextTestResult.__init__(self, *args, **kw) self._last_case = None self.colorizer = None + # NOTE(vish): reset stdout for the terminal check + stdout = sys.stdout + sys.stdout = sys.__stdout__ for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]: - # NOTE(vish): nose does funky stuff with stdout, so use stderr - # to setup the colorizer - if colorizer.supported(sys.stderr): + if colorizer.supported(): self.colorizer = colorizer(self.stream) break + sys.stdout = stdout def getDescription(self, test): return str(test) diff --git a/run_tests.sh b/run_tests.sh index e8433bc06..ebe236baf 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -39,7 +39,7 @@ done function run_tests { # Just run the test suites in current environment - ${wrapper} $NOSETESTS + ${wrapper} $NOSETESTS 2> run_tests.log } NOSETESTS="python run_tests.py $noseargs" -- cgit From f7751eedc0e895f90d48104e2110bc2b320735fc Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Wed, 23 Feb 2011 13:53:02 -0600 Subject: Revert commit 709. This fixes issues with the Openstack API causing 'No user for access key admin' errors. --- nova/api/openstack/auth.py | 4 ++-- nova/tests/api/openstack/fakes.py | 8 ++------ nova/tests/api/openstack/test_auth.py | 6 +++--- 3 files changed, 7 insertions(+), 11 deletions(-) diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index c3fe0cc8c..1dfdd5318 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -120,8 +120,8 @@ class AuthMiddleware(wsgi.Middleware): req - webob.Request object """ ctxt = context.get_admin_context() - user = self.auth.get_user_from_access_key(username) - if user and user.secret == key: + user = self.auth.get_user_from_access_key(key) + if user and user.name == username: token_hash = hashlib.sha1('%s%s%f' % (username, key, time.time())).hexdigest() token_dict = {} diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index e0b7b8029..fb282f1c9 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -221,8 +221,7 @@ class FakeAuthDatabase(object): class FakeAuthManager(object): auth_data = {} - def add_user(self, user): - key = user.id + def add_user(self, key, user): FakeAuthManager.auth_data[key] = user def get_user(self, uid): @@ -235,10 +234,7 @@ class FakeAuthManager(object): return None def get_user_from_access_key(self, key): - for k, v in FakeAuthManager.auth_data.iteritems(): - if v.access == key: - return v - return None + return FakeAuthManager.auth_data.get(key, None) class FakeRateLimiter(object): diff --git a/nova/tests/api/openstack/test_auth.py b/nova/tests/api/openstack/test_auth.py index eab78b50c..0dd65d321 100644 --- a/nova/tests/api/openstack/test_auth.py +++ b/nova/tests/api/openstack/test_auth.py @@ -48,7 +48,7 @@ class Test(unittest.TestCase): def test_authorize_user(self): f = fakes.FakeAuthManager() - f.add_user(nova.auth.manager.User(1, 'herp', 'herp', 'derp', None)) + f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None)) req = webob.Request.blank('/v1.0/') req.headers['X-Auth-User'] = 'herp' @@ -62,7 +62,7 @@ class Test(unittest.TestCase): def test_authorize_token(self): f = fakes.FakeAuthManager() - f.add_user(nova.auth.manager.User(1, 'herp', 'herp', 'derp', None)) + f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None)) req = webob.Request.blank('/v1.0/', {'HTTP_HOST': 'foo'}) req.headers['X-Auth-User'] = 'herp' @@ -144,7 +144,7 @@ class TestLimiter(unittest.TestCase): def test_authorize_token(self): f = fakes.FakeAuthManager() - f.add_user(nova.auth.manager.User(1, 'herp', 'herp', 'derp', None)) + f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None)) req = webob.Request.blank('/v1.0/') req.headers['X-Auth-User'] = 'herp' -- cgit From 19dc13131b7fe512cb7897a888093b5c9a62e69d Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 23 Feb 2011 12:00:02 -0800 Subject: move the deletion of the db into fixtures --- nova/tests/__init__.py | 8 ++++++-- run_tests.py | 4 ---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/nova/tests/__init__.py b/nova/tests/__init__.py index dbd433054..7fba02a93 100644 --- a/nova/tests/__init__.py +++ b/nova/tests/__init__.py @@ -48,6 +48,9 @@ def setup(): FLAGS = flags.FLAGS + testdb = os.path.join(FLAGS.state_path, FLAGS.sqlite_db) + if os.path.exists(testdb): + os.unlink(testdb) migration.db_sync() ctxt = context.get_admin_context() network_manager.VlanManager().create_networks(ctxt, @@ -58,5 +61,6 @@ def setup(): FLAGS.vlan_start, FLAGS.vpn_start, ) - shutil.copyfile(os.path.join(FLAGS.state_path, FLAGS.sqlite_db), - os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db)) + + cleandb = os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db) + shutil.copyfile(testdb, cleandb) diff --git a/run_tests.py b/run_tests.py index bb6f0dd1e..5fb5ba32f 100644 --- a/run_tests.py +++ b/run_tests.py @@ -265,10 +265,6 @@ class NovaTestRunner(core.TextTestRunner): if __name__ == '__main__': logging.setup() - testdb = os.path.join(FLAGS.state_path, - FLAGS.sqlite_db) - if os.path.exists(testdb): - os.unlink(testdb) testdir = os.path.abspath(os.path.join("nova","tests")) c = config.Config(stream=sys.stdout, env=os.environ, -- cgit From 5283e1c131a21ea4963c702a7137536f7b894bb6 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Wed, 23 Feb 2011 12:05:49 -0800 Subject: Created mini XPath implementation, to simplify mapping logic --- nova/api/openstack/servers.py | 21 ++----- nova/tests/test_minixpath.py | 141 ++++++++++++++++++++++++++++++++++++++++++ nova/utils.py | 45 ++++++++++++++ 3 files changed, 191 insertions(+), 16 deletions(-) create mode 100644 nova/tests/test_minixpath.py diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index b54e28c0c..794705306 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -30,7 +30,7 @@ from nova.auth import manager as auth_manager from nova.compute import instance_types from nova.compute import power_state import nova.api.openstack - +import types LOG = logging.getLogger('server') LOG.setLevel(logging.DEBUG) @@ -63,22 +63,11 @@ def _translate_detail_keys(inst): inst_dict['status'] = power_mapping[inst_dict['status']] inst_dict['addresses'] = dict(public=[], private=[]) - fixed_ip = inst['fixed_ip'] - if fixed_ip: - # grab single private fixed ip - try: - private_ip = fixed_ip['address'] - if private_ip: - inst_dict['addresses']['private'].append(private_ip) - except KeyError: - LOG.debug(_("Failed to read private ip")) + private_ips = utils.minixpath_select(inst, 'fixed_ip/address') + inst_dict['addresses']['private'] = private_ips - # grab all public floating ips - try: - for floating in fixed_ip['floating_ips']: - inst_dict['addresses']['public'].append(floating['address']) - except KeyError: - LOG.debug(_("Failed to read public ip(s)")) + public_ips = utils.minixpath_select(inst, 'fixed_ip/floating_ips/address') + inst_dict['addresses']['public'] = public_ips inst_dict['metadata'] = {} inst_dict['hostId'] = '' diff --git a/nova/tests/test_minixpath.py b/nova/tests/test_minixpath.py new file mode 100644 index 000000000..7fddcf9e9 --- /dev/null +++ b/nova/tests/test_minixpath.py @@ -0,0 +1,141 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import test +from nova import utils +from nova import exception + + +class MiniXPathTestCase(test.TestCase): + def test_tolerates_nones(self): + xp = utils.minixpath_select + + input = [] + self.assertEquals([], xp(input, "a")) + self.assertEquals([], xp(input, "a/b")) + self.assertEquals([], xp(input, "a/b/c")) + + input = [None] + self.assertEquals([], xp(input, "a")) + self.assertEquals([], xp(input, "a/b")) + self.assertEquals([], xp(input, "a/b/c")) + + input = [{'a': None}] + self.assertEquals([], xp(input, "a")) + self.assertEquals([], xp(input, "a/b")) + self.assertEquals([], xp(input, "a/b/c")) + + input = [{'a': {'b': None}}] + self.assertEquals([{'b': None}], xp(input, "a")) + self.assertEquals([], xp(input, "a/b")) + self.assertEquals([], xp(input, "a/b/c")) + + input = [{'a': {'b': {'c': None}}}] + self.assertEquals([{'b': {'c': None}}], xp(input, "a")) + self.assertEquals([{'c': None}], xp(input, "a/b")) + self.assertEquals([], xp(input, "a/b/c")) + + input = [{'a': {'b': {'c': None}}}, {'a': None}] + self.assertEquals([{'b': {'c': None}}], xp(input, "a")) + self.assertEquals([{'c': None}], xp(input, "a/b")) + self.assertEquals([], xp(input, "a/b/c")) + + input = [{'a': {'b': {'c': None}}}, {'a': {'b': None}}] + self.assertEquals([{'b': {'c': None}}, {'b': None}], xp(input, "a")) + self.assertEquals([{'c': None}], xp(input, "a/b")) + self.assertEquals([], xp(input, "a/b/c")) + + def test_does_select(self): + xp = utils.minixpath_select + + input = [{'a': 'a_1'}] + self.assertEquals(['a_1'], xp(input, "a")) + self.assertEquals([], xp(input, "a/b")) + self.assertEquals([], xp(input, "a/b/c")) + + input = [{'a': {'b': 'b_1'}}] + self.assertEquals([{'b': 'b_1'}], xp(input, "a")) + self.assertEquals(['b_1'], xp(input, "a/b")) + self.assertEquals([], xp(input, "a/b/c")) + + input = [{'a': {'b': {'c': 'c_1'}}}] + self.assertEquals([{'b': {'c': 'c_1'}}], xp(input, "a")) + self.assertEquals([{'c': 'c_1'}], xp(input, "a/b")) + self.assertEquals(['c_1'], xp(input, "a/b/c")) + + input = [{'a': {'b': {'c': 'c_1'}}}, {'a': None}] + self.assertEquals([{'b': {'c': 'c_1'}}], + xp(input, "a")) + self.assertEquals([{'c': 'c_1'}], xp(input, "a/b")) + self.assertEquals(['c_1'], xp(input, "a/b/c")) + + input = [{'a': {'b': {'c': 'c_1'}}}, + {'a': {'b': None}}] + self.assertEquals([{'b': {'c': 'c_1'}}, {'b': None}], + xp(input, "a")) + self.assertEquals([{'c': 'c_1'}], xp(input, "a/b")) + self.assertEquals(['c_1'], xp(input, "a/b/c")) + + input = [{'a': {'b': {'c': 'c_1'}}}, + {'a': {'b': {'c': 'c_2'}}}] + self.assertEquals([{'b': {'c': 'c_1'}}, {'b': {'c': 'c_2'}}], + xp(input, "a")) + self.assertEquals([{'c': 'c_1'}, {'c': 'c_2'}], + xp(input, "a/b")) + self.assertEquals(['c_1', 'c_2'], xp(input, "a/b/c")) + + self.assertEquals([], xp(input, "a/b/c/d")) + self.assertEquals([], xp(input, "c/a/b/d")) + self.assertEquals([], xp(input, "i/r/t")) + + def test_flattens_lists(self): + xp = utils.minixpath_select + + input = [{'a': [1, 2, 3]}] + self.assertEquals([1, 2, 3], xp(input, "a")) + self.assertEquals([], xp(input, "a/b")) + self.assertEquals([], xp(input, "a/b/c")) + + input = [{'a': {'b': [1, 2, 3]}}] + self.assertEquals([{'b': [1, 2, 3]}], xp(input, "a")) + self.assertEquals([1, 2, 3], xp(input, "a/b")) + self.assertEquals([], xp(input, "a/b/c")) + + input = [{'a': {'b': [1, 2, 3]}}, {'a': {'b': [4, 5, 6]}}] + self.assertEquals([1, 2, 3, 4, 5, 6], xp(input, "a/b")) + self.assertEquals([], xp(input, "a/b/c")) + + input = [{'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]}] + self.assertEquals([1, 2, 3, 4, 5, 6], xp(input, "a/b")) + self.assertEquals([], xp(input, "a/b/c")) + + input = [{'a': [1, 2, {'b': 'b_1'}]}] + self.assertEquals([1, 2, {'b': 'b_1'}], xp(input, "a")) + self.assertEquals(['b_1'], xp(input, "a/b")) + + def test_bad_xpath(self): + xp = utils.minixpath_select + + self.assertRaises(exception.Error, xp, [], None) + self.assertRaises(exception.Error, xp, [], "") + self.assertRaises(exception.Error, xp, [], "/") + self.assertRaises(exception.Error, xp, [], "/a") + self.assertRaises(exception.Error, xp, [], "/a/") + self.assertRaises(exception.Error, xp, [], "//") + self.assertRaises(exception.Error, xp, [], "//a") + self.assertRaises(exception.Error, xp, [], "a//a") + self.assertRaises(exception.Error, xp, [], "a//a/") + self.assertRaises(exception.Error, xp, [], "a/a/") diff --git a/nova/utils.py b/nova/utils.py index 42efa0008..2f926bd82 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -2,6 +2,7 @@ # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -34,6 +35,7 @@ import time from xml.sax import saxutils import re import netaddr +import types from eventlet import event from eventlet import greenthread @@ -499,3 +501,46 @@ def ensure_b64_encoding(val): return val except TypeError: return base64.b64encode(val) + + +def minixpath_select(items, minixpath): + """ Takes an xpath-like expression e.g. prop1/prop2/prop3, and for each + item in items, looks up items[prop1][prop2][prop3]. Like XPath, if any of + the intermediate results are lists it will treat each list item + individually. A 'None' in items or any child expressions will be ignored, + this function will not throw because of None (anywhere) in items""" + + if minixpath is None: + raise exception.Error("Invalid mini_xpath") + + (first_token, sep, remainder) = minixpath.partition("/") + + if first_token == "": + raise exception.Error("Invalid mini_xpath") + + results = [] + + if items is None: + return results + + for item in items: + if item is None: + continue + get_method = getattr(item, "get", None) + if get_method is None: + continue + child = get_method(first_token) + if child is None: + continue + if isinstance(child, types.ListType): + # Flatten intermediate lists + for x in child: + results.append(x) + else: + results.append(child) + + if not sep: + # No more tokens + return results + else: + return minixpath_select(results, remainder) -- cgit From b3b005f50de54b5ef6c62e387dcec5a123f93cf6 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Wed, 23 Feb 2011 12:36:09 -0800 Subject: Cope when we pass a non-list to xpath_select - wrap it in a list --- nova/tests/test_minixpath.py | 38 ++++++++++++++++++++++++++++++++++++++ nova/utils.py | 8 +++++++- 2 files changed, 45 insertions(+), 1 deletion(-) diff --git a/nova/tests/test_minixpath.py b/nova/tests/test_minixpath.py index 7fddcf9e9..3b1bdf40b 100644 --- a/nova/tests/test_minixpath.py +++ b/nova/tests/test_minixpath.py @@ -139,3 +139,41 @@ class MiniXPathTestCase(test.TestCase): self.assertRaises(exception.Error, xp, [], "a//a") self.assertRaises(exception.Error, xp, [], "a//a/") self.assertRaises(exception.Error, xp, [], "a/a/") + + def test_real_failure1(self): + # Real world failure case... + # We weren't coping when the input was a Dictionary instead of a List + # This led to test_accepts_dictionaries + xp = utils.minixpath_select + + inst = {'fixed_ip': {'floating_ips': [{'address': '1.2.3.4'}], + 'address': '192.168.0.3'}, + 'hostname': ''} + + private_ips = xp(inst, 'fixed_ip/address') + public_ips = xp(inst, 'fixed_ip/floating_ips/address') + self.assertEquals(['192.168.0.3'], private_ips) + self.assertEquals(['1.2.3.4'], public_ips) + + def test_accepts_dictionaries(self): + xp = utils.minixpath_select + + input = {'a': [1, 2, 3]} + self.assertEquals([1, 2, 3], xp(input, "a")) + self.assertEquals([], xp(input, "a/b")) + self.assertEquals([], xp(input, "a/b/c")) + + input = {'a': {'b': [1, 2, 3]}} + self.assertEquals([{'b': [1, 2, 3]}], xp(input, "a")) + self.assertEquals([1, 2, 3], xp(input, "a/b")) + self.assertEquals([], xp(input, "a/b/c")) + + input = {'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]} + self.assertEquals([1, 2, 3, 4, 5, 6], xp(input, "a/b")) + self.assertEquals([], xp(input, "a/b/c")) + + input = {'a': [1, 2, {'b': 'b_1'}]} + self.assertEquals([1, 2, {'b': 'b_1'}], xp(input, "a")) + self.assertEquals(['b_1'], xp(input, "a/b")) + + diff --git a/nova/utils.py b/nova/utils.py index 2f926bd82..c2cbeb2a7 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -508,7 +508,8 @@ def minixpath_select(items, minixpath): item in items, looks up items[prop1][prop2][prop3]. Like XPath, if any of the intermediate results are lists it will treat each list item individually. A 'None' in items or any child expressions will be ignored, - this function will not throw because of None (anywhere) in items""" + this function will not throw because of None (anywhere) in items. The + returned list will contain no None values.""" if minixpath is None: raise exception.Error("Invalid mini_xpath") @@ -523,6 +524,10 @@ def minixpath_select(items, minixpath): if items is None: return results + if not isinstance(items, types.ListType): + # Wrap single objects in a list + items = [items] + for item in items: if item is None: continue @@ -532,6 +537,7 @@ def minixpath_select(items, minixpath): child = get_method(first_token) if child is None: continue + #print "%s => %s" % (first_token, child) if isinstance(child, types.ListType): # Flatten intermediate lists for x in child: -- cgit From 21ebea24b4b77f8bd1fd42152454f1b0189843d4 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 23 Feb 2011 12:54:46 -0800 Subject: fix describe_availability_zones --- nova/api/ec2/cloud.py | 5 +++-- nova/db/api.py | 4 ++-- nova/db/sqlalchemy/api.py | 9 +++------ 3 files changed, 8 insertions(+), 10 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 99b6d5cb6..9e8764836 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -198,8 +198,9 @@ class CloudController(object): return self._describe_availability_zones(context, **kwargs) def _describe_availability_zones(self, context, **kwargs): - enabled_services = db.service_get_all(context) - disabled_services = db.service_get_all(context, True) + ctxt = context.elevated() + enabled_services = db.service_get_all(ctxt) + disabled_services = db.service_get_all(ctxt, True) available_zones = [] for zone in [service.availability_zone for service in enabled_services]: diff --git a/nova/db/api.py b/nova/db/api.py index d7f3746d2..0a010e727 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -85,8 +85,8 @@ def service_get(context, service_id): def service_get_all(context, disabled=False): - """Get all service.""" - return IMPL.service_get_all(context, None, disabled) + """Get all services.""" + return IMPL.service_get_all(context, disabled) def service_get_all_by_topic(context, topic): diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index be29fe2a0..d8751bef4 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -136,15 +136,12 @@ def service_get(context, service_id, session=None): @require_admin_context -def service_get_all(context, session=None, disabled=False): - if not session: - session = get_session() - - result = session.query(models.Service).\ +def service_get_all(context, disabled=False): + session = get_session() + return session.query(models.Service).\ filter_by(deleted=can_read_deleted(context)).\ filter_by(disabled=disabled).\ all() - return result @require_admin_context -- cgit From 89ade95d2eaabf77f9c81a8d50c7cc11aa175464 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Wed, 23 Feb 2011 12:55:07 -0800 Subject: Fix pep8 violation (trailing whitespace) --- nova/tests/test_minixpath.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nova/tests/test_minixpath.py b/nova/tests/test_minixpath.py index 3b1bdf40b..cc4a35ef3 100644 --- a/nova/tests/test_minixpath.py +++ b/nova/tests/test_minixpath.py @@ -175,5 +175,3 @@ class MiniXPathTestCase(test.TestCase): input = {'a': [1, 2, {'b': 'b_1'}]} self.assertEquals([1, 2, {'b': 'b_1'}], xp(input, "a")) self.assertEquals(['b_1'], xp(input, "a/b")) - - -- cgit From 1183c9e11b12984b1f5007ace831864e80483712 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Wed, 23 Feb 2011 14:07:08 -0800 Subject: Rename minixpath_select to get_from_path --- nova/api/openstack/servers.py | 5 +- nova/tests/test_minixpath.py | 177 ------------------------------------------ nova/tests/test_utils.py | 174 +++++++++++++++++++++++++++++++++++++++++ nova/utils.py | 24 +++--- 4 files changed, 188 insertions(+), 192 deletions(-) delete mode 100644 nova/tests/test_minixpath.py create mode 100644 nova/tests/test_utils.py diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 794705306..ce4a6256a 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -30,7 +30,6 @@ from nova.auth import manager as auth_manager from nova.compute import instance_types from nova.compute import power_state import nova.api.openstack -import types LOG = logging.getLogger('server') LOG.setLevel(logging.DEBUG) @@ -63,10 +62,10 @@ def _translate_detail_keys(inst): inst_dict['status'] = power_mapping[inst_dict['status']] inst_dict['addresses'] = dict(public=[], private=[]) - private_ips = utils.minixpath_select(inst, 'fixed_ip/address') + private_ips = utils.get_from_path(inst, 'fixed_ip/address') inst_dict['addresses']['private'] = private_ips - public_ips = utils.minixpath_select(inst, 'fixed_ip/floating_ips/address') + public_ips = utils.get_from_path(inst, 'fixed_ip/floating_ips/address') inst_dict['addresses']['public'] = public_ips inst_dict['metadata'] = {} diff --git a/nova/tests/test_minixpath.py b/nova/tests/test_minixpath.py deleted file mode 100644 index cc4a35ef3..000000000 --- a/nova/tests/test_minixpath.py +++ /dev/null @@ -1,177 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 Justin Santa Barbara -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova import test -from nova import utils -from nova import exception - - -class MiniXPathTestCase(test.TestCase): - def test_tolerates_nones(self): - xp = utils.minixpath_select - - input = [] - self.assertEquals([], xp(input, "a")) - self.assertEquals([], xp(input, "a/b")) - self.assertEquals([], xp(input, "a/b/c")) - - input = [None] - self.assertEquals([], xp(input, "a")) - self.assertEquals([], xp(input, "a/b")) - self.assertEquals([], xp(input, "a/b/c")) - - input = [{'a': None}] - self.assertEquals([], xp(input, "a")) - self.assertEquals([], xp(input, "a/b")) - self.assertEquals([], xp(input, "a/b/c")) - - input = [{'a': {'b': None}}] - self.assertEquals([{'b': None}], xp(input, "a")) - self.assertEquals([], xp(input, "a/b")) - self.assertEquals([], xp(input, "a/b/c")) - - input = [{'a': {'b': {'c': None}}}] - self.assertEquals([{'b': {'c': None}}], xp(input, "a")) - self.assertEquals([{'c': None}], xp(input, "a/b")) - self.assertEquals([], xp(input, "a/b/c")) - - input = [{'a': {'b': {'c': None}}}, {'a': None}] - self.assertEquals([{'b': {'c': None}}], xp(input, "a")) - self.assertEquals([{'c': None}], xp(input, "a/b")) - self.assertEquals([], xp(input, "a/b/c")) - - input = [{'a': {'b': {'c': None}}}, {'a': {'b': None}}] - self.assertEquals([{'b': {'c': None}}, {'b': None}], xp(input, "a")) - self.assertEquals([{'c': None}], xp(input, "a/b")) - self.assertEquals([], xp(input, "a/b/c")) - - def test_does_select(self): - xp = utils.minixpath_select - - input = [{'a': 'a_1'}] - self.assertEquals(['a_1'], xp(input, "a")) - self.assertEquals([], xp(input, "a/b")) - self.assertEquals([], xp(input, "a/b/c")) - - input = [{'a': {'b': 'b_1'}}] - self.assertEquals([{'b': 'b_1'}], xp(input, "a")) - self.assertEquals(['b_1'], xp(input, "a/b")) - self.assertEquals([], xp(input, "a/b/c")) - - input = [{'a': {'b': {'c': 'c_1'}}}] - self.assertEquals([{'b': {'c': 'c_1'}}], xp(input, "a")) - self.assertEquals([{'c': 'c_1'}], xp(input, "a/b")) - self.assertEquals(['c_1'], xp(input, "a/b/c")) - - input = [{'a': {'b': {'c': 'c_1'}}}, {'a': None}] - self.assertEquals([{'b': {'c': 'c_1'}}], - xp(input, "a")) - self.assertEquals([{'c': 'c_1'}], xp(input, "a/b")) - self.assertEquals(['c_1'], xp(input, "a/b/c")) - - input = [{'a': {'b': {'c': 'c_1'}}}, - {'a': {'b': None}}] - self.assertEquals([{'b': {'c': 'c_1'}}, {'b': None}], - xp(input, "a")) - self.assertEquals([{'c': 'c_1'}], xp(input, "a/b")) - self.assertEquals(['c_1'], xp(input, "a/b/c")) - - input = [{'a': {'b': {'c': 'c_1'}}}, - {'a': {'b': {'c': 'c_2'}}}] - self.assertEquals([{'b': {'c': 'c_1'}}, {'b': {'c': 'c_2'}}], - xp(input, "a")) - self.assertEquals([{'c': 'c_1'}, {'c': 'c_2'}], - xp(input, "a/b")) - self.assertEquals(['c_1', 'c_2'], xp(input, "a/b/c")) - - self.assertEquals([], xp(input, "a/b/c/d")) - self.assertEquals([], xp(input, "c/a/b/d")) - self.assertEquals([], xp(input, "i/r/t")) - - def test_flattens_lists(self): - xp = utils.minixpath_select - - input = [{'a': [1, 2, 3]}] - self.assertEquals([1, 2, 3], xp(input, "a")) - self.assertEquals([], xp(input, "a/b")) - self.assertEquals([], xp(input, "a/b/c")) - - input = [{'a': {'b': [1, 2, 3]}}] - self.assertEquals([{'b': [1, 2, 3]}], xp(input, "a")) - self.assertEquals([1, 2, 3], xp(input, "a/b")) - self.assertEquals([], xp(input, "a/b/c")) - - input = [{'a': {'b': [1, 2, 3]}}, {'a': {'b': [4, 5, 6]}}] - self.assertEquals([1, 2, 3, 4, 5, 6], xp(input, "a/b")) - self.assertEquals([], xp(input, "a/b/c")) - - input = [{'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]}] - self.assertEquals([1, 2, 3, 4, 5, 6], xp(input, "a/b")) - self.assertEquals([], xp(input, "a/b/c")) - - input = [{'a': [1, 2, {'b': 'b_1'}]}] - self.assertEquals([1, 2, {'b': 'b_1'}], xp(input, "a")) - self.assertEquals(['b_1'], xp(input, "a/b")) - - def test_bad_xpath(self): - xp = utils.minixpath_select - - self.assertRaises(exception.Error, xp, [], None) - self.assertRaises(exception.Error, xp, [], "") - self.assertRaises(exception.Error, xp, [], "/") - self.assertRaises(exception.Error, xp, [], "/a") - self.assertRaises(exception.Error, xp, [], "/a/") - self.assertRaises(exception.Error, xp, [], "//") - self.assertRaises(exception.Error, xp, [], "//a") - self.assertRaises(exception.Error, xp, [], "a//a") - self.assertRaises(exception.Error, xp, [], "a//a/") - self.assertRaises(exception.Error, xp, [], "a/a/") - - def test_real_failure1(self): - # Real world failure case... - # We weren't coping when the input was a Dictionary instead of a List - # This led to test_accepts_dictionaries - xp = utils.minixpath_select - - inst = {'fixed_ip': {'floating_ips': [{'address': '1.2.3.4'}], - 'address': '192.168.0.3'}, - 'hostname': ''} - - private_ips = xp(inst, 'fixed_ip/address') - public_ips = xp(inst, 'fixed_ip/floating_ips/address') - self.assertEquals(['192.168.0.3'], private_ips) - self.assertEquals(['1.2.3.4'], public_ips) - - def test_accepts_dictionaries(self): - xp = utils.minixpath_select - - input = {'a': [1, 2, 3]} - self.assertEquals([1, 2, 3], xp(input, "a")) - self.assertEquals([], xp(input, "a/b")) - self.assertEquals([], xp(input, "a/b/c")) - - input = {'a': {'b': [1, 2, 3]}} - self.assertEquals([{'b': [1, 2, 3]}], xp(input, "a")) - self.assertEquals([1, 2, 3], xp(input, "a/b")) - self.assertEquals([], xp(input, "a/b/c")) - - input = {'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]} - self.assertEquals([1, 2, 3, 4, 5, 6], xp(input, "a/b")) - self.assertEquals([], xp(input, "a/b/c")) - - input = {'a': [1, 2, {'b': 'b_1'}]} - self.assertEquals([1, 2, {'b': 'b_1'}], xp(input, "a")) - self.assertEquals(['b_1'], xp(input, "a/b")) diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py new file mode 100644 index 000000000..34a407f1a --- /dev/null +++ b/nova/tests/test_utils.py @@ -0,0 +1,174 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import test +from nova import utils +from nova import exception + + +class GetFromPathTestCase(test.TestCase): + def test_tolerates_nones(self): + f = utils.get_from_path + + input = [] + self.assertEquals([], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [None] + self.assertEquals([], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': None}] + self.assertEquals([], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': None}}] + self.assertEquals([{'b': None}], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': None}}}] + self.assertEquals([{'b': {'c': None}}], f(input, "a")) + self.assertEquals([{'c': None}], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': None}}}, {'a': None}] + self.assertEquals([{'b': {'c': None}}], f(input, "a")) + self.assertEquals([{'c': None}], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': None}}}, {'a': {'b': None}}] + self.assertEquals([{'b': {'c': None}}, {'b': None}], f(input, "a")) + self.assertEquals([{'c': None}], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + def test_does_select(self): + f = utils.get_from_path + + input = [{'a': 'a_1'}] + self.assertEquals(['a_1'], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': 'b_1'}}] + self.assertEquals([{'b': 'b_1'}], f(input, "a")) + self.assertEquals(['b_1'], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': 'c_1'}}}] + self.assertEquals([{'b': {'c': 'c_1'}}], f(input, "a")) + self.assertEquals([{'c': 'c_1'}], f(input, "a/b")) + self.assertEquals(['c_1'], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': 'c_1'}}}, {'a': None}] + self.assertEquals([{'b': {'c': 'c_1'}}], f(input, "a")) + self.assertEquals([{'c': 'c_1'}], f(input, "a/b")) + self.assertEquals(['c_1'], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': 'c_1'}}}, + {'a': {'b': None}}] + self.assertEquals([{'b': {'c': 'c_1'}}, {'b': None}], f(input, "a")) + self.assertEquals([{'c': 'c_1'}], f(input, "a/b")) + self.assertEquals(['c_1'], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': 'c_1'}}}, + {'a': {'b': {'c': 'c_2'}}}] + self.assertEquals([{'b': {'c': 'c_1'}}, {'b': {'c': 'c_2'}}], + f(input, "a")) + self.assertEquals([{'c': 'c_1'}, {'c': 'c_2'}], f(input, "a/b")) + self.assertEquals(['c_1', 'c_2'], f(input, "a/b/c")) + + self.assertEquals([], f(input, "a/b/c/d")) + self.assertEquals([], f(input, "c/a/b/d")) + self.assertEquals([], f(input, "i/r/t")) + + def test_flattens_lists(self): + f = utils.get_from_path + + input = [{'a': [1, 2, 3]}] + self.assertEquals([1, 2, 3], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': [1, 2, 3]}}] + self.assertEquals([{'b': [1, 2, 3]}], f(input, "a")) + self.assertEquals([1, 2, 3], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': [1, 2, 3]}}, {'a': {'b': [4, 5, 6]}}] + self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]}] + self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': [1, 2, {'b': 'b_1'}]}] + self.assertEquals([1, 2, {'b': 'b_1'}], f(input, "a")) + self.assertEquals(['b_1'], f(input, "a/b")) + + def test_bad_xpath(self): + f = utils.get_from_path + + self.assertRaises(exception.Error, f, [], None) + self.assertRaises(exception.Error, f, [], "") + self.assertRaises(exception.Error, f, [], "/") + self.assertRaises(exception.Error, f, [], "/a") + self.assertRaises(exception.Error, f, [], "/a/") + self.assertRaises(exception.Error, f, [], "//") + self.assertRaises(exception.Error, f, [], "//a") + self.assertRaises(exception.Error, f, [], "a//a") + self.assertRaises(exception.Error, f, [], "a//a/") + self.assertRaises(exception.Error, f, [], "a/a/") + + def test_real_failure1(self): + # Real world failure case... + # We weren't coping when the input was a Dictionary instead of a List + # This led to test_accepts_dictionaries + f = utils.get_from_path + + inst = {'fixed_ip': {'floating_ips': [{'address': '1.2.3.4'}], + 'address': '192.168.0.3'}, + 'hostname': ''} + + private_ips = f(inst, 'fixed_ip/address') + public_ips = f(inst, 'fixed_ip/floating_ips/address') + self.assertEquals(['192.168.0.3'], private_ips) + self.assertEquals(['1.2.3.4'], public_ips) + + def test_accepts_dictionaries(self): + f = utils.get_from_path + + input = {'a': [1, 2, 3]} + self.assertEquals([1, 2, 3], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = {'a': {'b': [1, 2, 3]}} + self.assertEquals([{'b': [1, 2, 3]}], f(input, "a")) + self.assertEquals([1, 2, 3], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = {'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]} + self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = {'a': [1, 2, {'b': 'b_1'}]} + self.assertEquals([1, 2, {'b': 'b_1'}], f(input, "a")) + self.assertEquals(['b_1'], f(input, "a/b")) diff --git a/nova/utils.py b/nova/utils.py index c2cbeb2a7..65e28c648 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -32,10 +32,10 @@ import string import struct import sys import time +import types from xml.sax import saxutils import re import netaddr -import types from eventlet import event from eventlet import greenthread @@ -503,18 +503,19 @@ def ensure_b64_encoding(val): return base64.b64encode(val) -def minixpath_select(items, minixpath): - """ Takes an xpath-like expression e.g. prop1/prop2/prop3, and for each - item in items, looks up items[prop1][prop2][prop3]. Like XPath, if any of - the intermediate results are lists it will treat each list item - individually. A 'None' in items or any child expressions will be ignored, - this function will not throw because of None (anywhere) in items. The - returned list will contain no None values.""" +def get_from_path(items, path): + """ Returns a list of items matching the specified path. Takes an + XPath-like expression e.g. prop1/prop2/prop3, and for each item in items, + looks up items[prop1][prop2][prop3]. Like XPath, if any of the + intermediate results are lists it will treat each list item individually. + A 'None' in items or any child expressions will be ignored, this function + will not throw because of None (anywhere) in items. The returned list + will contain no None values.""" - if minixpath is None: + if path is None: raise exception.Error("Invalid mini_xpath") - (first_token, sep, remainder) = minixpath.partition("/") + (first_token, sep, remainder) = path.partition("/") if first_token == "": raise exception.Error("Invalid mini_xpath") @@ -537,7 +538,6 @@ def minixpath_select(items, minixpath): child = get_method(first_token) if child is None: continue - #print "%s => %s" % (first_token, child) if isinstance(child, types.ListType): # Flatten intermediate lists for x in child: @@ -549,4 +549,4 @@ def minixpath_select(items, minixpath): # No more tokens return results else: - return minixpath_select(results, remainder) + return get_from_path(results, remainder) -- cgit From a508e2dce781b98db5a719df75a451d9a2727fca Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Wed, 23 Feb 2011 14:12:43 -0800 Subject: Make sure there are two blank links after the import --- nova/api/openstack/servers.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index ce4a6256a..6c227d71a 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -31,6 +31,7 @@ from nova.compute import instance_types from nova.compute import power_state import nova.api.openstack + LOG = logging.getLogger('server') LOG.setLevel(logging.DEBUG) -- cgit From 8c007b56b586257d048b6db4ecfbed8f502381fd Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Wed, 23 Feb 2011 14:16:31 -0800 Subject: Put back the comments I accidentally removed --- nova/api/openstack/servers.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 6c227d71a..97323f66f 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -63,9 +63,11 @@ def _translate_detail_keys(inst): inst_dict['status'] = power_mapping[inst_dict['status']] inst_dict['addresses'] = dict(public=[], private=[]) + # grab single private fixed ip private_ips = utils.get_from_path(inst, 'fixed_ip/address') inst_dict['addresses']['private'] = private_ips + # grab all public floating ips public_ips = utils.get_from_path(inst, 'fixed_ip/floating_ips/address') inst_dict['addresses']['public'] = public_ips -- cgit From 24090232272e0db163060e0ca32dbf97c05120c9 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 23 Feb 2011 15:14:16 -0800 Subject: updates to nova.flags to get help working better Fixes some old bugs that were brought up on the mailing list. First step towards moving flags into the places where they belong. Also moves manager import into service's init so that we can get all the dynamically loaded flags shortly after loading. --- nova/flags.py | 49 +++++++++++++++++++++++++++++++++++++++++----- nova/service.py | 31 +++++++++++++++++------------ nova/tests/test_service.py | 7 ------- 3 files changed, 62 insertions(+), 25 deletions(-) diff --git a/nova/flags.py b/nova/flags.py index f64a62da9..24bca0caf 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -160,9 +160,45 @@ class StrWrapper(object): raise KeyError(name) -FLAGS = FlagValues() -gflags.FLAGS = FLAGS -gflags.DEFINE_flag(gflags.HelpFlag(), FLAGS) +# Copied from gflags with small mods to get the naming correct. +# Originally gflags checks for the first module that is not gflags that is +# in the call chain, we want to check for the first module that is not gflags +# and not this module. +def _GetCallingModule(): + """Returns the name of the module that's calling into this module. + + We generally use this function to get the name of the module calling a + DEFINE_foo... function. + """ + # Walk down the stack to find the first globals dict that's not ours. + for depth in range(1, sys.getrecursionlimit()): + if not sys._getframe(depth).f_globals is globals(): + module_name = __GetModuleName(sys._getframe(depth).f_globals) + if module_name == 'gflags': + continue + if module_name is not None: + return module_name + raise AssertionError("No module was found") + + +# Copied from gflags because it is a private function +def __GetModuleName(globals_dict): + """Given a globals dict, returns the name of the module that defines it. + + Args: + globals_dict: A dictionary that should correspond to an environment + providing the values of the globals. + + Returns: + A string (the name of the module) or None (if the module could not + be identified. + """ + for name, module in sys.modules.iteritems(): + if getattr(module, '__dict__', None) is globals_dict: + if name == '__main__': + return sys.argv[0] + return name + return None def _wrapper(func): @@ -173,6 +209,11 @@ def _wrapper(func): return _wrapped +FLAGS = FlagValues() +gflags.FLAGS = FLAGS +gflags._GetCallingModule = _GetCallingModule + + DEFINE = _wrapper(gflags.DEFINE) DEFINE_string = _wrapper(gflags.DEFINE_string) DEFINE_integer = _wrapper(gflags.DEFINE_integer) @@ -185,8 +226,6 @@ DEFINE_spaceseplist = _wrapper(gflags.DEFINE_spaceseplist) DEFINE_multistring = _wrapper(gflags.DEFINE_multistring) DEFINE_multi_int = _wrapper(gflags.DEFINE_multi_int) DEFINE_flag = _wrapper(gflags.DEFINE_flag) - - HelpFlag = gflags.HelpFlag HelpshortFlag = gflags.HelpshortFlag HelpXMLFlag = gflags.HelpXMLFlag diff --git a/nova/service.py b/nova/service.py index cc88ac233..f47358089 100644 --- a/nova/service.py +++ b/nova/service.py @@ -45,15 +45,10 @@ FLAGS = flags.FLAGS flags.DEFINE_integer('report_interval', 10, 'seconds between nodes reporting state to datastore', lower_bound=1) - flags.DEFINE_integer('periodic_interval', 60, 'seconds between running periodic tasks', lower_bound=1) -flags.DEFINE_flag(flags.HelpFlag()) -flags.DEFINE_flag(flags.HelpshortFlag()) -flags.DEFINE_flag(flags.HelpXMLFlag()) - class Service(object): """Base class for workers that run on hosts.""" @@ -64,6 +59,8 @@ class Service(object): self.binary = binary self.topic = topic self.manager_class_name = manager + manager_class = utils.import_class(self.manager_class_name) + self.manager = manager_class(host=self.host, *args, **kwargs) self.report_interval = report_interval self.periodic_interval = periodic_interval super(Service, self).__init__(*args, **kwargs) @@ -71,9 +68,9 @@ class Service(object): self.timers = [] def start(self): - manager_class = utils.import_class(self.manager_class_name) - self.manager = manager_class(host=self.host, *self.saved_args, - **self.saved_kwargs) + vcs_string = version.version_string_with_vcs() + logging.audit(_("Starting %(topic)s node (version %(vcs_string)s)"), + {'topic': self.topic, 'vcs_string': vcs_string}) self.manager.init_host() self.model_disconnected = False ctxt = context.get_admin_context() @@ -153,9 +150,6 @@ class Service(object): report_interval = FLAGS.report_interval if not periodic_interval: periodic_interval = FLAGS.periodic_interval - vcs_string = version.version_string_with_vcs() - logging.audit(_("Starting %(topic)s node (version %(vcs_string)s)") - % locals()) service_obj = cls(host, binary, topic, manager, report_interval, periodic_interval) @@ -217,8 +211,19 @@ class Service(object): def serve(*services): - if not services: - services = [Service.create()] + try: + if not services: + services = [Service.create()] + except Exception: + logging.exception('in Service.create()') + raise + finally: + # After we've loaded up all our dynamic bits, check + # whether we should print help + flags.DEFINE_flag(flags.HelpFlag()) + flags.DEFINE_flag(flags.HelpshortFlag()) + flags.DEFINE_flag(flags.HelpXMLFlag()) + FLAGS.ParseNewFlags() name = '_'.join(x.binary for x in services) logging.debug(_("Serving %s"), name) diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py index a67c8d1e8..45d9afa6c 100644 --- a/nova/tests/test_service.py +++ b/nova/tests/test_service.py @@ -50,13 +50,6 @@ class ExtendedService(service.Service): class ServiceManagerTestCase(test.TestCase): """Test cases for Services""" - def test_attribute_error_for_no_manager(self): - serv = service.Service('test', - 'test', - 'test', - 'nova.tests.test_service.FakeManager') - self.assertRaises(AttributeError, getattr, serv, 'test_method') - def test_message_gets_to_manager(self): serv = service.Service('test', 'test', -- cgit From b7d89758af54b291492eecae74cee29461ca28b9 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 23 Feb 2011 15:16:56 -0800 Subject: remove processName from debug output since we aren't using multiprocessing and it doesn't exist in python 2.6.1 --- nova/log.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/log.py b/nova/log.py index 10c14d74b..7866c34f7 100644 --- a/nova/log.py +++ b/nova/log.py @@ -54,7 +54,7 @@ flags.DEFINE_string('logging_default_format_string', 'format string to use for log messages without context') flags.DEFINE_string('logging_debug_format_suffix', - 'from %(processName)s (pid=%(process)d) %(funcName)s' + 'from (pid=%(process)d) %(funcName)s' ' %(pathname)s:%(lineno)d', 'data to append to log format when level is DEBUG') -- cgit From 3115a65f9981371ea8587a288b360c3c519de865 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 23 Feb 2011 15:26:52 -0800 Subject: add help back to the scripts that don't use service.py --- bin/nova-ajax-console-proxy | 4 +++- bin/nova-api | 3 +++ bin/nova-direct-api | 4 ++++ bin/nova-manage | 3 +++ 4 files changed, 13 insertions(+), 1 deletion(-) diff --git a/bin/nova-ajax-console-proxy b/bin/nova-ajax-console-proxy index 392b328b1..1e11c6d58 100755 --- a/bin/nova-ajax-console-proxy +++ b/bin/nova-ajax-console-proxy @@ -47,9 +47,11 @@ from nova import utils from nova import wsgi FLAGS = flags.FLAGS - flags.DEFINE_integer('ajax_console_idle_timeout', 300, 'Seconds before idle connection destroyed') +flags.DEFINE_flag(flags.HelpFlag()) +flags.DEFINE_flag(flags.HelpshortFlag()) +flags.DEFINE_flag(flags.HelpXMLFlag()) LOG = logging.getLogger('nova.ajax_console_proxy') LOG.setLevel(logging.DEBUG) diff --git a/bin/nova-api b/bin/nova-api index d5efb4687..cf140570a 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -48,6 +48,9 @@ flags.DEFINE_integer('ec2_listen_port', 8773, 'port for ec2 api to listen') flags.DEFINE_string('osapi_listen', "0.0.0.0", 'IP address for OpenStack API to listen') flags.DEFINE_integer('osapi_listen_port', 8774, 'port for os api to listen') +flags.DEFINE_flag(flags.HelpFlag()) +flags.DEFINE_flag(flags.HelpshortFlag()) +flags.DEFINE_flag(flags.HelpXMLFlag()) API_ENDPOINTS = ['ec2', 'osapi'] diff --git a/bin/nova-direct-api b/bin/nova-direct-api index 6c63bd26b..bf29d9a5e 100755 --- a/bin/nova-direct-api +++ b/bin/nova-direct-api @@ -45,6 +45,10 @@ from nova.compute import api as compute_api FLAGS = flags.FLAGS flags.DEFINE_integer('direct_port', 8001, 'Direct API port') flags.DEFINE_string('direct_host', '0.0.0.0', 'Direct API host') +flags.DEFINE_flag(flags.HelpFlag()) +flags.DEFINE_flag(flags.HelpshortFlag()) +flags.DEFINE_flag(flags.HelpXMLFlag()) + if __name__ == '__main__': utils.default_flagfile() diff --git a/bin/nova-manage b/bin/nova-manage index 5189de0e1..b603c8b07 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -93,6 +93,9 @@ flags.DECLARE('network_size', 'nova.network.manager') flags.DECLARE('vlan_start', 'nova.network.manager') flags.DECLARE('vpn_start', 'nova.network.manager') flags.DECLARE('fixed_range_v6', 'nova.network.manager') +flags.DEFINE_flag(flags.HelpFlag()) +flags.DEFINE_flag(flags.HelpshortFlag()) +flags.DEFINE_flag(flags.HelpXMLFlag()) def param2id(object_id): -- cgit From 5e2f82b1487b8f8e43539d0c71466fbbfed23121 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 23 Feb 2011 15:29:02 -0800 Subject: revert logfile redirection and make colors work by temporarily switching stdout --- nova/tests/fake_flags.py | 1 - run_tests.py | 16 +++++++++------- run_tests.sh | 4 ++-- 3 files changed, 11 insertions(+), 10 deletions(-) diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py index 575fefff6..2b1919407 100644 --- a/nova/tests/fake_flags.py +++ b/nova/tests/fake_flags.py @@ -41,4 +41,3 @@ FLAGS.iscsi_num_targets = 8 FLAGS.verbose = True FLAGS.sql_connection = 'sqlite:///tests.sqlite' FLAGS.use_ipv6 = True -FLAGS.logfile = 'tests.log' diff --git a/run_tests.py b/run_tests.py index 877849ab5..2c04a0641 100644 --- a/run_tests.py +++ b/run_tests.py @@ -97,7 +97,7 @@ class _AnsiColorizer(object): try: return curses.tigetnum("colors") > 2 except curses.error: - curses.setupterm(fd=stream.fileno()) + curses.setupterm() return curses.tigetnum("colors") > 2 except: raise @@ -122,13 +122,13 @@ class _Win32Colorizer(object): See _AnsiColorizer docstring. """ def __init__(self, stream): - from win32console import GetStdHandle, STD_ERROR_HANDLE, \ + from win32console import GetStdHandle, STD_OUT_HANDLE, \ FOREGROUND_RED, FOREGROUND_BLUE, FOREGROUND_GREEN, \ FOREGROUND_INTENSITY red, green, blue, bold = (FOREGROUND_RED, FOREGROUND_GREEN, FOREGROUND_BLUE, FOREGROUND_INTENSITY) self.stream = stream - self.screenBuffer = GetStdHandle(STD_ERROR_HANDLE) + self.screenBuffer = GetStdHandle(STD_OUT_HANDLE) self._colors = { 'normal': red | green | blue, 'red': red | bold, @@ -144,7 +144,7 @@ class _Win32Colorizer(object): try: import win32console screenBuffer = win32console.GetStdHandle( - win32console.STD_ERROR_HANDLE) + win32console.STD_OUT_HANDLE) except ImportError: return False import pywintypes @@ -185,12 +185,14 @@ class NovaTestResult(result.TextTestResult): result.TextTestResult.__init__(self, *args, **kw) self._last_case = None self.colorizer = None + # NOTE(vish): reset stdout for the terminal check + stdout = sys.stdout + sys.stdout = sys.__stdout__ for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]: - # NOTE(vish): nose does funky stuff with stdout, so use stderr - # to setup the colorizer - if colorizer.supported(sys.stderr): + if colorizer.supported(): self.colorizer = colorizer(self.stream) break + sys.stdout = stdout def getDescription(self, test): return str(test) diff --git a/run_tests.sh b/run_tests.sh index 7aab9ecd9..d4586a57e 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -39,12 +39,12 @@ done function run_tests { # Just run the test suites in current environment - ${wrapper} $NOSETESTS + ${wrapper} $NOSETESTS 2> run_tests.log # If we get some short import error right away, print the error log directly RESULT=$? if [ "$RESULT" -ne "0" ]; then - ERRSIZE=`wc -l run_tests.err.log | awk '{print \$1}'` + ERRSIZE=`wc -l run_tests.log | awk '{print \$1}'` if [ "$ERRSIZE" -lt "40" ]; then cat run_tests.err.log -- cgit From c27c19ea316b343f1623a7c1bf21c53cd426603b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 23 Feb 2011 15:35:30 -0800 Subject: fix pep8 --- run_tests.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/run_tests.py b/run_tests.py index 2c04a0641..eef0120bc 100644 --- a/run_tests.py +++ b/run_tests.py @@ -87,7 +87,7 @@ class _AnsiColorizer(object): coloring terminal output using this method. Returns False otherwise. """ if not stream.isatty(): - return False # auto color only on TTYs + return False # auto color only on TTYs try: import curses except ImportError: @@ -180,6 +180,7 @@ class _NullColorizer(object): def write(self, text, color): self.stream.write(text) + class NovaTestResult(result.TextTestResult): def __init__(self, *args, **kw): result.TextTestResult.__init__(self, *args, **kw) @@ -276,7 +277,7 @@ class NovaTestRunner(core.TextTestRunner): if __name__ == '__main__': logging.setup() - testdir = os.path.abspath(os.path.join("nova","tests")) + testdir = os.path.abspath(os.path.join("nova", "tests")) testdb = os.path.join(testdir, "tests.sqlite") if os.path.exists(testdb): os.unlink(testdb) -- cgit From 78eddce564cccf0d9be19b303cbc122966f5fa71 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 23 Feb 2011 15:42:59 -0800 Subject: added comments about where code came from --- run_tests.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/run_tests.py b/run_tests.py index eef0120bc..8025548e5 100644 --- a/run_tests.py +++ b/run_tests.py @@ -198,6 +198,7 @@ class NovaTestResult(result.TextTestResult): def getDescription(self, test): return str(test) + # NOTE(vish): copied from unittest with edit to add color def addSuccess(self, test): unittest.TestResult.addSuccess(self, test) if self.showAll: @@ -207,6 +208,7 @@ class NovaTestResult(result.TextTestResult): self.stream.write('.') self.stream.flush() + # NOTE(vish): copied from unittest with edit to add color def addFailure(self, test, err): unittest.TestResult.addFailure(self, test, err) if self.showAll: @@ -216,6 +218,7 @@ class NovaTestResult(result.TextTestResult): self.stream.write('F') self.stream.flush() + # NOTE(vish): copied from nose with edit to add color def addError(self, test, err): """Overrides normal addError to add support for errorClasses. If the exception is a registered class, the -- cgit From 3aa0183bca5585c706b4b02e0a7542422015c693 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 23 Feb 2011 18:54:13 -0800 Subject: fix missed err.log --- run_tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/run_tests.sh b/run_tests.sh index d4586a57e..7ac3ff33f 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -47,7 +47,7 @@ function run_tests { ERRSIZE=`wc -l run_tests.log | awk '{print \$1}'` if [ "$ERRSIZE" -lt "40" ]; then - cat run_tests.err.log + cat run_tests.log fi fi return $RESULT -- cgit From f86c45764d6cf62b1ded928e807e93198ed6ffd1 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Wed, 23 Feb 2011 22:02:06 -0800 Subject: PEP 257 fixes --- nova/volume/driver.py | 42 +++++++++++++++++++++++------------------- nova/volume/san.py | 22 +++++++++++++++------- 2 files changed, 38 insertions(+), 26 deletions(-) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 22c2c2fc3..4263a6a8d 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -227,14 +227,16 @@ class FakeAOEDriver(AOEDriver): class ISCSIDriver(VolumeDriver): - """Executes commands relating to ISCSI volumes. We make use of model - provider properties as follows: - provider_location - if present, contains the iSCSI target information - in the same format as an ietadm discovery - i.e. ', ' - provider_auth - if present, contains a space-separated triple: - ' '. CHAP is the only - auth_method in use at the moment.""" + """Executes commands relating to ISCSI volumes. + + We make use of model provider properties as follows: + :provider_location: if present, contains the iSCSI target information + in the same format as an ietadm discovery + i.e. ':, ' + :provider_auth: if present, contains a space-separated triple: + ' '. + `CHAP` is the only auth_method in use at the moment. + """ def ensure_export(self, context, volume): """Synchronously recreates an export for a logical volume.""" @@ -321,17 +323,19 @@ class ISCSIDriver(VolumeDriver): return None def _get_iscsi_properties(self, volume): - """Gets iscsi configuration, ideally from saved information in the - volume entity, but falling back to discovery if need be. The - properties are: - target_discovered - boolean indicating whether discovery was used, - target_iqn - the IQN of the iSCSI target, - target_portal - the portal of the iSCSI target, - and auth_method, auth_username and auth_password - - the authentication details. Right now, either - auth_method is not present meaning no authentication, or - auth_method == 'CHAP' meaning use CHAP with the specified - credentials.""" + """Gets iscsi configuration + + We ideally get saved information in the volume entity, but fall back + to discovery if need be. Discovery may be completely removed in future + The properties are: + :target_discovered: boolean indicating whether discovery was used + :target_iqn: the IQN of the iSCSI target, + :target_portal: the portal of the iSCSI target + :auth_method:, :auth_username:, :auth_password: + the authentication details. Right now, either auth_method is not + present meaning no authentication, or auth_method == `CHAP` + meaning use CHAP with the specified credentials. + """ properties = {} diff --git a/nova/volume/san.py b/nova/volume/san.py index 09192bc9f..d6598d16e 100644 --- a/nova/volume/san.py +++ b/nova/volume/san.py @@ -16,8 +16,9 @@ # under the License. """ Drivers for san-stored volumes. + The unique thing about a SAN is that we don't expect that we can run the volume - controller on the SAN hardware. We expect to access it over SSH or some API. +controller on the SAN hardware. We expect to access it over SSH or some API. """ import os @@ -51,7 +52,11 @@ flags.DEFINE_integer('san_ssh_port', 22, class SanISCSIDriver(ISCSIDriver): """ Base class for SAN-style storage volumes - (storage providers we access over SSH)""" + + A SAN-style storage value is 'different' because the volume controller + probably won't run on it, so we need to access is over SSH or another + remote protocol. + """ def _build_iscsi_target_name(self, volume): return "%s%s" % (FLAGS.iscsi_target_prefix, volume['name']) @@ -137,6 +142,7 @@ def _get_prefixed_values(data, prefix): class SolarisISCSIDriver(SanISCSIDriver): """Executes commands relating to Solaris-hosted ISCSI volumes. + Basic setup for a Solaris iSCSI server: pkg install storage-server SUNWiscsit svcadm enable stmf @@ -330,13 +336,14 @@ class SolarisISCSIDriver(SanISCSIDriver): class HpSanISCSIDriver(SanISCSIDriver): """Executes commands relating to HP/Lefthand SAN ISCSI volumes. + We use the CLIQ interface, over SSH. Rough overview of CLIQ commands used: - CLIQ createVolume (creates the volume) - CLIQ getVolumeInfo (to discover the IQN etc) - CLIQ getClusterInfo (to discover the iSCSI target IP address) - CLIQ assignVolumeChap (exports it with CHAP security) + :createVolume: (creates the volume) + :getVolumeInfo: (to discover the IQN etc) + :getClusterInfo: (to discover the iSCSI target IP address) + :assignVolumeChap: (exports it with CHAP security) The 'trick' here is that the HP SAN enforces security by default, so normally a volume mount would need both to configure the SAN in the volume @@ -344,7 +351,8 @@ class HpSanISCSIDriver(SanISCSIDriver): not catered for at the moment in the nova architecture, so instead we share the volume using CHAP at volume creation time. Then the mount need only use those CHAP credentials, so can take place exclusively in the - compute layer""" + compute layer. + """ def _cliq_run(self, verb, cliq_args): """Runs a CLIQ command over SSH, without doing any result parsing""" -- cgit From 0a649493d21a8766f416ceca2c0122c20945ca1b Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Wed, 23 Feb 2011 22:47:57 -0800 Subject: Documentation fixes so that output looks better --- nova/volume/driver.py | 9 ++++++++- nova/volume/san.py | 13 +++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 4263a6a8d..e3744c790 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -230,9 +230,11 @@ class ISCSIDriver(VolumeDriver): """Executes commands relating to ISCSI volumes. We make use of model provider properties as follows: + :provider_location: if present, contains the iSCSI target information in the same format as an ietadm discovery i.e. ':, ' + :provider_auth: if present, contains a space-separated triple: ' '. `CHAP` is the only auth_method in use at the moment. @@ -328,10 +330,15 @@ class ISCSIDriver(VolumeDriver): We ideally get saved information in the volume entity, but fall back to discovery if need be. Discovery may be completely removed in future The properties are: + :target_discovered: boolean indicating whether discovery was used - :target_iqn: the IQN of the iSCSI target, + + :target_iqn: the IQN of the iSCSI target + :target_portal: the portal of the iSCSI target + :auth_method:, :auth_username:, :auth_password: + the authentication details. Right now, either auth_method is not present meaning no authentication, or auth_method == `CHAP` meaning use CHAP with the specified credentials. diff --git a/nova/volume/san.py b/nova/volume/san.py index d6598d16e..9532c8116 100644 --- a/nova/volume/san.py +++ b/nova/volume/san.py @@ -144,16 +144,25 @@ class SolarisISCSIDriver(SanISCSIDriver): """Executes commands relating to Solaris-hosted ISCSI volumes. Basic setup for a Solaris iSCSI server: + pkg install storage-server SUNWiscsit + svcadm enable stmf + svcadm enable -r svc:/network/iscsi/target:default + pfexec itadm create-tpg e1000g0 ${MYIP} + pfexec itadm create-target -t e1000g0 + Then grant the user that will be logging on lots of permissions. I'm not sure exactly which though: + zfs allow justinsb create,mount,destroy rpool + usermod -P'File System Management' justinsb + usermod -P'Primary Administrator' justinsb Also make sure you can login using san_login & san_password/san_privatekey @@ -340,9 +349,13 @@ class HpSanISCSIDriver(SanISCSIDriver): We use the CLIQ interface, over SSH. Rough overview of CLIQ commands used: + :createVolume: (creates the volume) + :getVolumeInfo: (to discover the IQN etc) + :getClusterInfo: (to discover the iSCSI target IP address) + :assignVolumeChap: (exports it with CHAP security) The 'trick' here is that the HP SAN enforces security by default, so -- cgit From 701e1c15944062f7d229e59f2ede06398226b165 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Wed, 23 Feb 2011 22:53:44 -0800 Subject: Hotfix to not require metadata --- nova/api/openstack/servers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 63e047b39..841bab6d0 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -177,7 +177,7 @@ class Controller(wsgi.Controller): # However, the CloudServers API is not definitive on this front, # and we want to be compatible. metadata = [] - if env['server']['metadata']: + if env['server'].get('metadata'): for k, v in env['server']['metadata'].items(): metadata.append({'key': k, 'value': v}) -- cgit From 7e463d262339acc7c611b86471b1422dea50d1ee Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Thu, 24 Feb 2011 11:43:40 +0000 Subject: xenapi plugin function now checks whether /boot/guest already exists. If not, it creates the directory --- plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 8cb439259..61b947c25 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -73,6 +73,10 @@ def _copy_kernel_vdi(dest, copy_args): logging.debug("copying kernel/ramdisk file from %s to /boot/guest/%s", dest, vdi_uuid) filename = KERNEL_DIR + '/' + vdi_uuid + #make sure KERNEL_DIR exists, otherwise create it + if not os.path.isdir(KERNEL_DIR): + logging.debug("Creating directory %s", KERNEL_DIR) + os.makedirs(KERNEL_DIR) #read data from /dev/ and write into a file on /boot/guest of = open(filename, 'wb') f = open(dest, 'rb') -- cgit From f8640adff4beff1e7d77bbd67771a6d072d42140 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 24 Feb 2011 13:58:31 +0000 Subject: Get DNS value from Flag, when working in FlatNetworking mode. Passing the flag was ineffective previously. This is an easy fix. I think we would need nova-manage to accept dns also from command line --- nova/network/manager.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/network/manager.py b/nova/network/manager.py index 1df193be0..12a0c5018 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -369,6 +369,7 @@ class FlatManager(NetworkManager): project_net = IPy.IP(cidr) net = {} net['bridge'] = FLAGS.flat_network_bridge + net['dns'] = FLAGS.flat_network_dns net['cidr'] = cidr net['netmask'] = str(project_net.netmask()) net['gateway'] = str(project_net[1]) -- cgit From a721d2208ed9d88884925d6f1e2c8e26d7d1ea27 Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Thu, 24 Feb 2011 15:19:29 +0100 Subject: Globally exclude .pyc files from tarball contents --- MANIFEST.in | 1 + 1 file changed, 1 insertion(+) diff --git a/MANIFEST.in b/MANIFEST.in index f0a9cffb3..2ceed34f3 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -38,3 +38,4 @@ include nova/tests/db/nova.austin.sqlite include plugins/xenapi/README include plugins/xenapi/etc/xapi.d/plugins/objectstore include plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py +global-exclude *.pyc -- cgit From 8e2ebb1a963f58514d8fb6aab4a75627e72484b9 Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Thu, 24 Feb 2011 16:04:13 +0000 Subject: stubbing out _is_vdi_pv for test purposes --- nova/tests/test_xenapi.py | 1 + nova/tests/xenapi/stubs.py | 6 ++++++ nova/virt/xenapi/vm_utils.py | 27 ++++++++++++++------------- 3 files changed, 21 insertions(+), 13 deletions(-) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 6b8efc9d8..2cbe58aab 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -167,6 +167,7 @@ class XenAPIVMTestCase(test.TestCase): stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) stubs.stubout_get_this_vm_uuid(self.stubs) stubs.stubout_stream_disk(self.stubs) + stubs.stubout_is_vdi_pv(self.stubs) self.stubs.Set(VMOps, 'reset_network', reset_network) glance_stubs.stubout_glance_client(self.stubs, glance_stubs.FakeGlance) diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 624995ada..4fec2bd75 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -130,6 +130,12 @@ def stubout_stream_disk(stubs): stubs.Set(vm_utils, '_stream_disk', f) +def stubout_is_vdi_pv(stubs): + def f(_1): + return False + stubs.Set(vm_utils, '_is_vdi_pv', f) + + class FakeSessionForVMTests(fake.SessionBase): """ Stubs out a XenAPISession for VM tests """ def __init__(self, uri): diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 80cc3035d..564a25057 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -400,19 +400,7 @@ class VMHelper(HelperBase): @classmethod def _lookup_image_glance(cls, session, vdi_ref): LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref) - - def is_vdi_pv(dev): - LOG.debug(_("Running pygrub against %s"), dev) - output = os.popen('pygrub -qn /dev/%s' % dev) - for line in output.readlines(): - #try to find kernel string - m = re.search('(?<=kernel:)/.*(?:>)', line) - if m and m.group(0).find('xen') != -1: - LOG.debug(_("Found Xen kernel %s") % m.group(0)) - return True - LOG.debug(_("No Xen kernel found. Booting HVM.")) - return False - return with_vdi_attached_here(session, vdi_ref, True, is_vdi_pv) + return with_vdi_attached_here(session, vdi_ref, True, _is_vdi_pv) @classmethod def lookup(cls, session, i): @@ -714,6 +702,19 @@ def get_this_vm_ref(session): return session.get_xenapi().VM.get_by_uuid(get_this_vm_uuid()) +def _is_vdi_pv(dev): + LOG.debug(_("Running pygrub against %s"), dev) + output = os.popen('pygrub -qn /dev/%s' % dev) + for line in output.readlines(): + #try to find kernel string + m = re.search('(?<=kernel:)/.*(?:>)', line) + if m and m.group(0).find('xen') != -1: + LOG.debug(_("Found Xen kernel %s") % m.group(0)) + return True + LOG.debug(_("No Xen kernel found. Booting HVM.")) + return False + + def _stream_disk(dev, type, virtual_size, image_file): offset = 0 if type == ImageType.DISK: -- cgit From 377b59ec5ae13d37cc375dd9b9cf7eb8d89be196 Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Thu, 24 Feb 2011 14:36:15 -0500 Subject: Fix copypasta pep8 violation. --- nova/adminclient.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/adminclient.py b/nova/adminclient.py index fe2aca351..fc3c5c5fe 100644 --- a/nova/adminclient.py +++ b/nova/adminclient.py @@ -198,7 +198,7 @@ class HostInfo(object): class Vpn(object): """ Information about a Vpn, as parsed through SAX - + **Fields Include** * instance_id -- cgit From 8635f7a306e4cfa41ad09a18602efa7793f6da95 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 24 Feb 2011 15:04:07 -0600 Subject: moved network injection and vif creation to above vm start in vmops spawn --- nova/virt/xenapi/vmops.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 76b88a8bd..3aa5a23e5 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -104,6 +104,10 @@ class VMOps(object): instance, kernel, ramdisk, pv_kernel) VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True) + # inject_network_info and create vifs + networks = self.inject_network_info(instance) + self.create_vifs(instance, networks) + LOG.debug(_('Starting VM %s...'), vm_ref) self._session.call_xenapi('VM.start', vm_ref, False, False) instance_name = instance.name @@ -134,9 +138,7 @@ class VMOps(object): timer.f = _wait_for_boot - # call to reset network to inject network info and configure - networks = self.inject_network_info(instance) - self.create_vifs(instance, networks) + # call to reset network to configure network from xenstore self.reset_network(instance) return timer.start(interval=0.5, now=True) -- cgit From f4221346418ef103635b104fc152a2507d60a8dc Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 24 Feb 2011 17:25:00 -0600 Subject: forgot to get vm_opaque_ref --- nova/virt/xenapi/vmops.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 3aa5a23e5..8abaa14fa 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -450,6 +450,7 @@ class VMOps(object): Creates vifs for an instance """ + vm_opaque_ref = self._get_vm_opaque_ref(instance.id) logging.debug(_("creating vif(s) for vm: |%s|"), vm_opaque_ref) if networks is None: networks = db.network_get_all_by_instance(admin_context, -- cgit From 2218cb025adca1ded3e6596acc182b88742e3a51 Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Thu, 24 Feb 2011 21:59:36 -0500 Subject: Rename auth_token db methods to follow standard. --- nova/api/openstack/auth.py | 6 +++--- nova/db/api.py | 12 ++++++------ nova/db/sqlalchemy/api.py | 6 +++--- nova/tests/api/openstack/fakes.py | 6 +++--- nova/tests/api/openstack/test_auth.py | 4 ++-- 5 files changed, 17 insertions(+), 17 deletions(-) diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index 1dfdd5318..c844c6231 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -103,11 +103,11 @@ class AuthMiddleware(wsgi.Middleware): 2 days ago. """ ctxt = context.get_admin_context() - token = self.db.auth_get_token(ctxt, token_hash) + token = self.db.auth_token_get(ctxt, token_hash) if token: delta = datetime.datetime.now() - token.created_at if delta.days >= 2: - self.db.auth_destroy_token(ctxt, token) + self.db.auth_token_destroy(ctxt, token) else: return self.auth.get_user(token.user_id) return None @@ -131,6 +131,6 @@ class AuthMiddleware(wsgi.Middleware): token_dict['server_management_url'] = req.url token_dict['storage_url'] = '' token_dict['user_id'] = user.id - token = self.db.auth_create_token(ctxt, token_dict) + token = self.db.auth_token_create(ctxt, token_dict) return token, user return None, None diff --git a/nova/db/api.py b/nova/db/api.py index 0a010e727..aeb9b7ebf 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -630,19 +630,19 @@ def iscsi_target_create_safe(context, values): ############### -def auth_destroy_token(context, token): +def auth_token_destroy(context, token): """Destroy an auth token.""" - return IMPL.auth_destroy_token(context, token) + return IMPL.auth_token_destroy(context, token) -def auth_get_token(context, token_hash): +def auth_token_get(context, token_hash): """Retrieves a token given the hash representing it.""" - return IMPL.auth_get_token(context, token_hash) + return IMPL.auth_token_get(context, token_hash) -def auth_create_token(context, token): +def auth_token_create(context, token): """Creates a new token.""" - return IMPL.auth_create_token(context, token) + return IMPL.auth_token_create(context, token) ################### diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index d8751bef4..0c11b2982 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1262,13 +1262,13 @@ def iscsi_target_create_safe(context, values): @require_admin_context -def auth_destroy_token(_context, token): +def auth_token_destroy(_context, token): session = get_session() session.delete(token) @require_admin_context -def auth_get_token(_context, token_hash): +def auth_token_get(_context, token_hash): session = get_session() tk = session.query(models.AuthToken).\ filter_by(token_hash=token_hash).\ @@ -1279,7 +1279,7 @@ def auth_get_token(_context, token_hash): @require_admin_context -def auth_create_token(_context, token): +def auth_token_create(_context, token): tk = models.AuthToken() tk.update(token) tk.save() diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index fb282f1c9..142626de9 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -203,17 +203,17 @@ class FakeAuthDatabase(object): data = {} @staticmethod - def auth_get_token(context, token_hash): + def auth_token_get(context, token_hash): return FakeAuthDatabase.data.get(token_hash, None) @staticmethod - def auth_create_token(context, token): + def auth_token_create(context, token): fake_token = FakeToken(created_at=datetime.datetime.now(), **token) FakeAuthDatabase.data[fake_token.token_hash] = fake_token return fake_token @staticmethod - def auth_destroy_token(context, token): + def auth_token_destroy(context, token): if token.token_hash in FakeAuthDatabase.data: del FakeAuthDatabase.data['token_hash'] diff --git a/nova/tests/api/openstack/test_auth.py b/nova/tests/api/openstack/test_auth.py index 13f6c3a1c..86dfb110f 100644 --- a/nova/tests/api/openstack/test_auth.py +++ b/nova/tests/api/openstack/test_auth.py @@ -99,10 +99,10 @@ class Test(test.TestCase): token_hash=token_hash, created_at=datetime.datetime(1990, 1, 1)) - self.stubs.Set(fakes.FakeAuthDatabase, 'auth_destroy_token', + self.stubs.Set(fakes.FakeAuthDatabase, 'auth_token_destroy', destroy_token_mock) - self.stubs.Set(fakes.FakeAuthDatabase, 'auth_get_token', + self.stubs.Set(fakes.FakeAuthDatabase, 'auth_token_get', bad_token) req = webob.Request.blank('/v1.0/') -- cgit From 865c3d57f8b84dfcc493ecead12816874b160e35 Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Thu, 24 Feb 2011 23:51:17 -0500 Subject: Pass id of token to be deleted to the db api, not the actual object. --- nova/api/openstack/auth.py | 2 +- nova/db/api.py | 4 ++-- nova/db/sqlalchemy/api.py | 9 ++++++--- nova/tests/api/openstack/fakes.py | 13 ++++++++++--- 4 files changed, 19 insertions(+), 9 deletions(-) diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index c844c6231..dff69a7f2 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -107,7 +107,7 @@ class AuthMiddleware(wsgi.Middleware): if token: delta = datetime.datetime.now() - token.created_at if delta.days >= 2: - self.db.auth_token_destroy(ctxt, token) + self.db.auth_token_destroy(ctxt, token.id) else: return self.auth.get_user(token.user_id) return None diff --git a/nova/db/api.py b/nova/db/api.py index aeb9b7ebf..4c7eb857f 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -630,9 +630,9 @@ def iscsi_target_create_safe(context, values): ############### -def auth_token_destroy(context, token): +def auth_token_destroy(context, token_id): """Destroy an auth token.""" - return IMPL.auth_token_destroy(context, token) + return IMPL.auth_token_destroy(context, token_id) def auth_token_get(context, token_hash): diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 0c11b2982..0be08c4d1 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1262,16 +1262,19 @@ def iscsi_target_create_safe(context, values): @require_admin_context -def auth_token_destroy(_context, token): +def auth_token_destroy(context, token_id): session = get_session() - session.delete(token) + with session.begin(): + token_ref = auth_token_get(context, token_id, session=session) + token_ref.delete(session=session) @require_admin_context -def auth_token_get(_context, token_hash): +def auth_token_get(context, token_hash): session = get_session() tk = session.query(models.AuthToken).\ filter_by(token_hash=token_hash).\ + filter_by(deleted=can_read_deleted(context)).\ first() if not tk: raise exception.NotFound(_('Token %s does not exist') % token_hash) diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 142626de9..49ce8c1b5 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -188,7 +188,11 @@ def stub_out_glance(stubs, initial_fixtures=None): class FakeToken(object): + id = 0 + def __init__(self, **kwargs): + FakeToken.id += 1 + self.id = FakeToken.id for k, v in kwargs.iteritems(): setattr(self, k, v) @@ -210,12 +214,15 @@ class FakeAuthDatabase(object): def auth_token_create(context, token): fake_token = FakeToken(created_at=datetime.datetime.now(), **token) FakeAuthDatabase.data[fake_token.token_hash] = fake_token + FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token return fake_token @staticmethod - def auth_token_destroy(context, token): - if token.token_hash in FakeAuthDatabase.data: - del FakeAuthDatabase.data['token_hash'] + def auth_token_destroy(context, token_id): + token = FakeAuthDatabase.data.get('id_%i' % token_id) + if token and token.token_hash in FakeAuthDatabase.data: + del FakeAuthDatabase.data[token.token_hash] + del FakeAuthDatabase.data['id_%i' % token_id] class FakeAuthManager(object): -- cgit