From 5ef94944514c3f81e31cc60d3d63b903859dca45 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 18 Aug 2011 22:15:13 -0700 Subject: add user_id and description. without user_id, there is no way for a tenant to tell which user created the server. description should be added for ec2 parity. --- nova/api/openstack/create_instance_helper.py | 3 ++- nova/api/openstack/views/servers.py | 2 ++ nova/tests/api/openstack/test_servers.py | 22 +++++++++++++++++++--- 3 files changed, 23 insertions(+), 4 deletions(-) diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index 978741682..96f817d38 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -154,7 +154,8 @@ class CreateInstanceHelper(object): kernel_id=kernel_id, ramdisk_id=ramdisk_id, display_name=name, - display_description=name, + display_description=\ + server_dict.get('description', ''), key_name=key_name, key_data=key_data, metadata=server_dict.get('metadata', {}), diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index edc328129..c5f1e6021 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -64,6 +64,8 @@ class ViewBuilder(object): inst_dict = { 'id': inst['id'], 'name': inst['display_name'], + 'user_id': inst['user_id'], + 'description': inst['display_description'], 'status': common.status_from_power_state(inst.get('state'))} ctxt = nova.context.get_admin_context() diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 437620854..7ca58b24d 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -194,7 +194,7 @@ def stub_instance(id, user_id='fake', project_id='fake', private_address=None, "terminated_at": utils.utcnow(), "availability_zone": "", "display_name": server_name, - "display_description": "", + "display_description": "fakedescription", "locked": False, "metadata": metadata, "uuid": uuid, @@ -329,10 +329,12 @@ class ServersTest(test.TestCase): "server": { "id": 1, "uuid": FAKE_UUID, + "user_id": "fake", "updated": "2010-11-11T11:00:00Z", "created": "2010-10-10T12:00:00Z", "progress": 0, "name": "server1", + "description": "fakedescription", "status": "BUILD", "hostId": '', "image": { @@ -491,10 +493,12 @@ class ServersTest(test.TestCase): "server": { "id": 1, "uuid": FAKE_UUID, + "user_id": "fake", "updated": "2010-11-11T11:00:00Z", "created": "2010-10-10T12:00:00Z", "progress": 100, "name": "server1", + "description": "fakedescription", "status": "ACTIVE", "hostId": '', "image": { @@ -582,10 +586,12 @@ class ServersTest(test.TestCase): "server": { "id": 1, "uuid": FAKE_UUID, + "user_id": "fake", "updated": "2010-11-11T11:00:00Z", "created": "2010-10-10T12:00:00Z", "progress": 100, "name": "server1", + "description": "fakedescription", "status": "ACTIVE", "hostId": '', "image": { @@ -1380,6 +1386,8 @@ class ServersTest(test.TestCase): 'uuid': FAKE_UUID, 'instance_type': dict(inst_type), 'image_ref': image_ref, + 'display_description': 'fakedescription', + 'user_id': 'fake', "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0), "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0), } @@ -2719,6 +2727,8 @@ class TestServerInstanceCreation(test.TestCase): else: self.injected_files = None return [{'id': '1234', 'display_name': 'fakeinstance', + 'user_id': 'fake', + 'display_description': 'fakedescription', 'uuid': FAKE_UUID}] def set_admin_password(self, *args, **kwargs): @@ -3010,7 +3020,7 @@ class ServersViewBuilderV11Test(test.TestCase): "created_at": created_at, "updated_at": updated_at, "admin_pass": "", - "user_id": "", + "user_id": "fake", "project_id": "", "image_ref": "5", "kernel_id": "", @@ -3036,7 +3046,7 @@ class ServersViewBuilderV11Test(test.TestCase): "terminated_at": utils.utcnow(), "availability_zone": "", "display_name": "test_server", - "display_description": "", + "display_description": "fakedescription", "locked": False, "metadata": [], #"address": , @@ -3088,10 +3098,12 @@ class ServersViewBuilderV11Test(test.TestCase): "server": { "id": 1, "uuid": self.instance['uuid'], + "user_id": "fake", "updated": "2010-11-11T11:00:00Z", "created": "2010-10-10T12:00:00Z", "progress": 0, "name": "test_server", + "description": "fakedescription", "status": "BUILD", "hostId": '', "image": { @@ -3139,10 +3151,12 @@ class ServersViewBuilderV11Test(test.TestCase): "server": { "id": 1, "uuid": self.instance['uuid'], + "user_id": "fake", "updated": "2010-11-11T11:00:00Z", "created": "2010-10-10T12:00:00Z", "progress": 100, "name": "test_server", + "description": "fakedescription", "status": "ACTIVE", "hostId": '', "image": { @@ -3194,10 +3208,12 @@ class ServersViewBuilderV11Test(test.TestCase): "server": { "id": 1, "uuid": self.instance['uuid'], + "user_id": "fake", "updated": "2010-11-11T11:00:00Z", "created": "2010-10-10T12:00:00Z", "progress": 0, "name": "test_server", + "description": "fakedescription", "status": "BUILD", "hostId": '', "image": { -- cgit From 34ef09beb3bf00fd9eb16b8517c520af24641e8c Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 19 Aug 2011 10:10:51 -0700 Subject: add tenant_id to api. without tenant_id, admins can't tell which servers belong to which tenants when retrieving lists --- nova/api/openstack/servers.py | 5 +++++ nova/api/openstack/views/servers.py | 1 + nova/tests/api/openstack/test_servers.py | 29 ++++++++++++++++++++++++++++- 3 files changed, 34 insertions(+), 1 deletion(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 41e63ec3c..57ed5f45e 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -107,6 +107,11 @@ class Controller(object): LOG.error(reason) raise exception.InvalidInput(reason=reason) + # translate tenant_id filter to internal project_id + if 'tenant_id' in search_opts: + search_opts['project_id'] = search_opts['tenant_id'] + del search_opts['tenant_id'] + # By default, compute's get_all() will return deleted instances. # If an admin hasn't specified a 'deleted' search option, we need # to filter out deleted instances by setting the filter ourselves. diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index c5f1e6021..37f48b3b2 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -65,6 +65,7 @@ class ViewBuilder(object): 'id': inst['id'], 'name': inst['display_name'], 'user_id': inst['user_id'], + 'tenant_id': inst['project_id'], 'description': inst['display_description'], 'status': common.status_from_power_state(inst.get('state'))} diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 7ca58b24d..480d6a370 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -330,6 +330,7 @@ class ServersTest(test.TestCase): "id": 1, "uuid": FAKE_UUID, "user_id": "fake", + "tenant_id": "fake", "updated": "2010-11-11T11:00:00Z", "created": "2010-10-10T12:00:00Z", "progress": 0, @@ -494,6 +495,7 @@ class ServersTest(test.TestCase): "id": 1, "uuid": FAKE_UUID, "user_id": "fake", + "tenant_id": "fake", "updated": "2010-11-11T11:00:00Z", "created": "2010-10-10T12:00:00Z", "progress": 100, @@ -587,6 +589,7 @@ class ServersTest(test.TestCase): "id": 1, "uuid": FAKE_UUID, "user_id": "fake", + "tenant_id": "fake", "updated": "2010-11-11T11:00:00Z", "created": "2010-10-10T12:00:00Z", "progress": 100, @@ -1152,6 +1155,25 @@ class ServersTest(test.TestCase): self.assertEqual(len(servers), 1) self.assertEqual(servers[0]['id'], 100) + def test_tenant_id_filter_converts_to_project_id_for_admin(self): + def fake_get_all(compute_self, context, search_opts=None): + self.assertNotEqual(search_opts, None) + self.assertEqual(search_opts['project_id'], 'faketenant') + self.assertFalse(search_opts.get('tenant_id')) + return [stub_instance(100)] + + self.stubs.Set(nova.compute.API, 'get_all', fake_get_all) + self.flags(allow_admin_api=True) + + req = webob.Request.blank('/v1.1/servers?tenant_id=faketenant') + # Use admin context + context = nova.context.RequestContext('testuser', 'testproject', + is_admin=True) + res = req.get_response(fakes.wsgi_app(fake_auth_context=context)) + res_dict = json.loads(res.body) + # Failure in fake_get_all returns non 200 status code + self.assertEqual(res.status_int, 200) + def test_get_servers_allows_flavor_v1_1(self): def fake_get_all(compute_self, context, search_opts=None): self.assertNotEqual(search_opts, None) @@ -1388,6 +1410,7 @@ class ServersTest(test.TestCase): 'image_ref': image_ref, 'display_description': 'fakedescription', 'user_id': 'fake', + 'project_id': 'fake', "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0), "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0), } @@ -2728,6 +2751,7 @@ class TestServerInstanceCreation(test.TestCase): self.injected_files = None return [{'id': '1234', 'display_name': 'fakeinstance', 'user_id': 'fake', + 'project_id': 'fake', 'display_description': 'fakedescription', 'uuid': FAKE_UUID}] @@ -3021,7 +3045,7 @@ class ServersViewBuilderV11Test(test.TestCase): "updated_at": updated_at, "admin_pass": "", "user_id": "fake", - "project_id": "", + "project_id": "fake", "image_ref": "5", "kernel_id": "", "ramdisk_id": "", @@ -3099,6 +3123,7 @@ class ServersViewBuilderV11Test(test.TestCase): "id": 1, "uuid": self.instance['uuid'], "user_id": "fake", + "tenant_id": "fake", "updated": "2010-11-11T11:00:00Z", "created": "2010-10-10T12:00:00Z", "progress": 0, @@ -3152,6 +3177,7 @@ class ServersViewBuilderV11Test(test.TestCase): "id": 1, "uuid": self.instance['uuid'], "user_id": "fake", + "tenant_id": "fake", "updated": "2010-11-11T11:00:00Z", "created": "2010-10-10T12:00:00Z", "progress": 100, @@ -3209,6 +3235,7 @@ class ServersViewBuilderV11Test(test.TestCase): "id": 1, "uuid": self.instance['uuid'], "user_id": "fake", + "tenant_id": "fake", "updated": "2010-11-11T11:00:00Z", "created": "2010-10-10T12:00:00Z", "progress": 0, -- cgit From f8ec1a5b9002f4a4cda5d7156c6006aac0035c33 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 19 Aug 2011 16:12:33 -0500 Subject: initial committ --- nova/tests/test_compute.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 4f5d36f14..925ac733b 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -35,6 +35,7 @@ from nova import rpc from nova import test from nova import utils from nova.notifier import test_notifier +from nova.tests import fake_network_info LOG = logging.getLogger('nova.tests.compute') FLAGS = flags.FLAGS @@ -133,6 +134,9 @@ class ComputeTestCase(test.TestCase): def test_create_instance_defaults_display_name(self): """Verify that an instance cannot be created without a display_name.""" + import pretty_print as pp + pp(fake_get_instance_nw_info(self.stubs, 1, 2)) + self.assertEqual(True, False) cases = [dict(), dict(display_name=None)] for instance in cases: ref = self.compute_api.create(self.context, -- cgit From 773f64af47e38f10b20399fcfaa43cc52eb1d2e6 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 19 Aug 2011 16:41:02 -0500 Subject: added fake network info --- nova/tests/fake_network_info.py | 107 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 107 insertions(+) create mode 100644 nova/tests/fake_network_info.py diff --git a/nova/tests/fake_network_info.py b/nova/tests/fake_network_info.py new file mode 100644 index 000000000..f5bc0368c --- /dev/null +++ b/nova/tests/fake_network_info.py @@ -0,0 +1,107 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Rackspace +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import db +from nova import test +from nova.network import manager as network_manager + + +HOST = "testhost" + + +class FakeModel(dict): + """Represent a model from the db""" + def __init__(self, *args, **kwargs): + self.update(kwargs) + + def __getattr__(self, name): + return self[name] + + +def fake_network(n): + return {'id': n, + 'label': 'test%d' % n, + 'injected': False, + 'multi_host': False, + 'cidr': '192.168.%d.0/24' % n, + 'cidr_v6': '2001:db8:0:%x::/64' % n, + 'gateway_v6': '2001:db8:0:%x::1' % n, + 'netmask_v6': '64', + 'netmask': '255.255.255.0', + 'bridge': 'fa%d' % n, + 'bridge_interface': 'fake_br%d' % n, + 'gateway': '192.168.%d.1' % n, + 'broadcast': '192.168.%d.255' % n, + 'dns1': '192.168.%d.3' % n, + 'dns2': '192.168.%d.4' % n, + 'vlan': None, + 'host': None, + 'project_id': 'fake_project', + 'vpn_public_address': '192.168.%d.2' % n} + + +def fixed_ips(num_networks, num_ips): + for network in xrange(num_networks): + for ip in xrange(num_ips): + yield {'id': network * ip, + 'network_id': network, + 'address': '192.168.%d.100' % network, + 'instance_id': 0, + 'allocated': False, + # and since network_id and vif_id happen to be equivalent + 'virtual_interface_id': network, + 'floating_ips': [FakeModel(**floating_ip)]} + + +flavor = {'id': 0, + 'rxtx_cap': 3} + + +floating_ip = {'id': 0, + 'address': '10.10.10.10', + 'fixed_ip_id': 0, + 'project_id': None, + 'auto_assigned': False} + + +def vifs(n): + for x in xrange(n): + yield {'id': x, + 'address': 'DE:AD:BE:EF:00:%2x' % x, + 'uuid': '00000000-0000-0000-0000-00000000000000%2d' % x, + 'network_id': x, + 'network': FakeModel(**fake_network(x)), + 'instance_id': 0} + + +def fake_get_instance_nw_info(stubs, n=1, ips_per_vif=2): + network = network_manager.FlatManager(host=HOST) + + def fixed_ips_fake(*args, **kwargs): + return fixed_ips() + + def virtual_interfaces_fake(*args, **kwargs): + return [vif for vif in vifs(n)] + + def instance_type_fake(*args, **kwargs): + return flavor + + stubs.Set(db, 'fixed_ip_get_by_instance', fixed_ips_fake) + stubs.Set(db, 'virtual_interface_get_by_instance', virtual_interface_fake) + stubs.Set(db, 'instance_type_get', instance_type_fake) + + nw_info = self.network.get_instance_nw_info(None, 0, 0, None) -- cgit From 608a6c3e719fffe9af6f8f7cc6d18824d0c28c36 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 19 Aug 2011 16:42:52 -0500 Subject: typo --- nova/tests/fake_network_info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/fake_network_info.py b/nova/tests/fake_network_info.py index f5bc0368c..991599987 100644 --- a/nova/tests/fake_network_info.py +++ b/nova/tests/fake_network_info.py @@ -101,7 +101,7 @@ def fake_get_instance_nw_info(stubs, n=1, ips_per_vif=2): return flavor stubs.Set(db, 'fixed_ip_get_by_instance', fixed_ips_fake) - stubs.Set(db, 'virtual_interface_get_by_instance', virtual_interface_fake) + stubs.Set(db, 'virtual_interface_get_by_instance', virtual_interfaces_fake) stubs.Set(db, 'instance_type_get', instance_type_fake) nw_info = self.network.get_instance_nw_info(None, 0, 0, None) -- cgit From c4d73b638dee9dc4c1b2883affc0c00cdfc5ecb6 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 19 Aug 2011 16:44:23 -0500 Subject: typo --- nova/tests/fake_network_info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/fake_network_info.py b/nova/tests/fake_network_info.py index 991599987..61b35a9d2 100644 --- a/nova/tests/fake_network_info.py +++ b/nova/tests/fake_network_info.py @@ -104,4 +104,4 @@ def fake_get_instance_nw_info(stubs, n=1, ips_per_vif=2): stubs.Set(db, 'virtual_interface_get_by_instance', virtual_interfaces_fake) stubs.Set(db, 'instance_type_get', instance_type_fake) - nw_info = self.network.get_instance_nw_info(None, 0, 0, None) + nw_info = network_manager.get_instance_nw_info(None, 0, 0, None) -- cgit From ca83ca57646ba76908e9b5e600208fe9afde78a4 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 19 Aug 2011 16:47:10 -0500 Subject: typo --- nova/tests/fake_network_info.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/tests/fake_network_info.py b/nova/tests/fake_network_info.py index 61b35a9d2..1fb3584dd 100644 --- a/nova/tests/fake_network_info.py +++ b/nova/tests/fake_network_info.py @@ -18,6 +18,7 @@ from nova import db from nova import test from nova.network import manager as network_manager +from network_ HOST = "testhost" @@ -104,4 +105,4 @@ def fake_get_instance_nw_info(stubs, n=1, ips_per_vif=2): stubs.Set(db, 'virtual_interface_get_by_instance', virtual_interfaces_fake) stubs.Set(db, 'instance_type_get', instance_type_fake) - nw_info = network_manager.get_instance_nw_info(None, 0, 0, None) + nw_info = network.get_instance_nw_info(None, 0, 0, None) -- cgit From 23e163199e3b3208d7ba16049b7a93cbd59a8eaf Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 19 Aug 2011 16:48:01 -0500 Subject: typo --- nova/tests/fake_network_info.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/tests/fake_network_info.py b/nova/tests/fake_network_info.py index 1fb3584dd..87f6b9c56 100644 --- a/nova/tests/fake_network_info.py +++ b/nova/tests/fake_network_info.py @@ -18,7 +18,6 @@ from nova import db from nova import test from nova.network import manager as network_manager -from network_ HOST = "testhost" -- cgit From 7e6451b722b1e77ccab702482d5d5ad516056825 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 19 Aug 2011 16:58:39 -0500 Subject: typo --- nova/tests/fake_network_info.py | 1 + nova/tests/test_compute.py | 3 +-- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/tests/fake_network_info.py b/nova/tests/fake_network_info.py index 87f6b9c56..3360675d6 100644 --- a/nova/tests/fake_network_info.py +++ b/nova/tests/fake_network_info.py @@ -90,6 +90,7 @@ def vifs(n): def fake_get_instance_nw_info(stubs, n=1, ips_per_vif=2): network = network_manager.FlatManager(host=HOST) + network.db = db def fixed_ips_fake(*args, **kwargs): return fixed_ips() diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 925ac733b..dd65d81cc 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -134,8 +134,7 @@ class ComputeTestCase(test.TestCase): def test_create_instance_defaults_display_name(self): """Verify that an instance cannot be created without a display_name.""" - import pretty_print as pp - pp(fake_get_instance_nw_info(self.stubs, 1, 2)) + print fake_network_info.fake_get_instance_nw_info(self.stubs, 1, 2) self.assertEqual(True, False) cases = [dict(), dict(display_name=None)] for instance in cases: -- cgit From 429d28df87e887ef297453f3dc186c1a99ba0a7a Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 19 Aug 2011 17:00:13 -0500 Subject: typo --- nova/tests/fake_network_info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/fake_network_info.py b/nova/tests/fake_network_info.py index 3360675d6..d920ab1a2 100644 --- a/nova/tests/fake_network_info.py +++ b/nova/tests/fake_network_info.py @@ -93,7 +93,7 @@ def fake_get_instance_nw_info(stubs, n=1, ips_per_vif=2): network.db = db def fixed_ips_fake(*args, **kwargs): - return fixed_ips() + return fixed_ips(n, ips_per_vif) def virtual_interfaces_fake(*args, **kwargs): return [vif for vif in vifs(n)] -- cgit From 6ab9559b5f1f398cdaac4eca7bfcfcda859d8bc8 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 19 Aug 2011 17:02:21 -0500 Subject: fixed formatting string --- nova/tests/fake_network_info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/fake_network_info.py b/nova/tests/fake_network_info.py index d920ab1a2..e0ff71c2c 100644 --- a/nova/tests/fake_network_info.py +++ b/nova/tests/fake_network_info.py @@ -81,7 +81,7 @@ floating_ip = {'id': 0, def vifs(n): for x in xrange(n): yield {'id': x, - 'address': 'DE:AD:BE:EF:00:%2x' % x, + 'address': 'DE:AD:BE:EF:00:%02x' % x, 'uuid': '00000000-0000-0000-0000-00000000000000%2d' % x, 'network_id': x, 'network': FakeModel(**fake_network(x)), -- cgit From 95851baea252d801cbb704e869f78162ea995ceb Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 19 Aug 2011 17:05:29 -0500 Subject: added return --- nova/tests/fake_network_info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/fake_network_info.py b/nova/tests/fake_network_info.py index e0ff71c2c..53785635c 100644 --- a/nova/tests/fake_network_info.py +++ b/nova/tests/fake_network_info.py @@ -105,4 +105,4 @@ def fake_get_instance_nw_info(stubs, n=1, ips_per_vif=2): stubs.Set(db, 'virtual_interface_get_by_instance', virtual_interfaces_fake) stubs.Set(db, 'instance_type_get', instance_type_fake) - nw_info = network.get_instance_nw_info(None, 0, 0, None) + return network.get_instance_nw_info(None, 0, 0, None) -- cgit From 44af602dbc6f02de44e2b844a8d53a87680b7a94 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 19 Aug 2011 17:18:57 -0500 Subject: who cares --- nova/tests/test_compute.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index dd65d81cc..fcb86e322 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -134,7 +134,7 @@ class ComputeTestCase(test.TestCase): def test_create_instance_defaults_display_name(self): """Verify that an instance cannot be created without a display_name.""" - print fake_network_info.fake_get_instance_nw_info(self.stubs, 1, 2) + print fake_network_info.fake_get_instance_nw_info(self.stubs, 2, 1) self.assertEqual(True, False) cases = [dict(), dict(display_name=None)] for instance in cases: -- cgit From 32e5341af311faf9838bd5d039b2153549726a71 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 19 Aug 2011 17:36:52 -0500 Subject: updated a maths --- nova/tests/fake_network_info.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/tests/fake_network_info.py b/nova/tests/fake_network_info.py index 53785635c..e73fe0442 100644 --- a/nova/tests/fake_network_info.py +++ b/nova/tests/fake_network_info.py @@ -57,7 +57,7 @@ def fake_network(n): def fixed_ips(num_networks, num_ips): for network in xrange(num_networks): for ip in xrange(num_ips): - yield {'id': network * ip, + yield {'id': network * num_ips + ip, 'network_id': network, 'address': '192.168.%d.100' % network, 'instance_id': 0, @@ -82,7 +82,7 @@ def vifs(n): for x in xrange(n): yield {'id': x, 'address': 'DE:AD:BE:EF:00:%02x' % x, - 'uuid': '00000000-0000-0000-0000-00000000000000%2d' % x, + 'uuid': '00000000-0000-0000-0000-00000000000000%02d' % x, 'network_id': x, 'network': FakeModel(**fake_network(x)), 'instance_id': 0} @@ -93,7 +93,7 @@ def fake_get_instance_nw_info(stubs, n=1, ips_per_vif=2): network.db = db def fixed_ips_fake(*args, **kwargs): - return fixed_ips(n, ips_per_vif) + return list(fixed_ips(n, ips_per_vif)) def virtual_interfaces_fake(*args, **kwargs): return [vif for vif in vifs(n)] -- cgit From 154f1fbcc7098aca210514ce9f458fb755b4b50b Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 19 Aug 2011 17:51:14 -0500 Subject: updated a maths --- nova/tests/fake_network_info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/fake_network_info.py b/nova/tests/fake_network_info.py index e73fe0442..0c33898cb 100644 --- a/nova/tests/fake_network_info.py +++ b/nova/tests/fake_network_info.py @@ -59,7 +59,7 @@ def fixed_ips(num_networks, num_ips): for ip in xrange(num_ips): yield {'id': network * num_ips + ip, 'network_id': network, - 'address': '192.168.%d.100' % network, + 'address': '192.168.%d.1%02d' % (network, ip), 'instance_id': 0, 'allocated': False, # and since network_id and vif_id happen to be equivalent -- cgit From 4d975772a6a488a99bec616f0118dd1ce74e9403 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 19 Aug 2011 18:04:08 -0500 Subject: finished fake network info, removed testing shims --- nova/tests/test_compute.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index fcb86e322..4f5d36f14 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -35,7 +35,6 @@ from nova import rpc from nova import test from nova import utils from nova.notifier import test_notifier -from nova.tests import fake_network_info LOG = logging.getLogger('nova.tests.compute') FLAGS = flags.FLAGS @@ -134,8 +133,6 @@ class ComputeTestCase(test.TestCase): def test_create_instance_defaults_display_name(self): """Verify that an instance cannot be created without a display_name.""" - print fake_network_info.fake_get_instance_nw_info(self.stubs, 2, 1) - self.assertEqual(True, False) cases = [dict(), dict(display_name=None)] for instance in cases: ref = self.compute_api.create(self.context, -- cgit From d2ea838bfad96bae172458ac6389a9f98111a7df Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Mon, 22 Aug 2011 17:46:47 -0500 Subject: update test_network test_get_instance_nw_info() --- nova/network/manager.py | 12 +++---- nova/tests/fake_network_info.py | 21 ++++++------ nova/tests/test_network.py | 71 +++++++++++++++++------------------------ 3 files changed, 48 insertions(+), 56 deletions(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index 921c27e45..dcd5aad9a 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -468,17 +468,17 @@ class NetworkManager(manager.SchedulerDependentManager): # TODO(tr3buchet) eventually "enabled" should be determined def ip_dict(ip): return { - "ip": ip, - "netmask": network["netmask"], - "enabled": "1"} + 'ip': ip, + 'netmask': network['netmask'], + 'enabled': '1'} def ip6_dict(): return { - "ip": ipv6.to_global(network['cidr_v6'], + 'ip': ipv6.to_global(network['cidr_v6'], vif['address'], network['project_id']), - "netmask": network['netmask_v6'], - "enabled": "1"} + 'netmask': network['netmask_v6'], + 'enabled': '1'} network_dict = { 'bridge': network['bridge'], 'id': network['id'], diff --git a/nova/tests/fake_network_info.py b/nova/tests/fake_network_info.py index 0c33898cb..072585195 100644 --- a/nova/tests/fake_network_info.py +++ b/nova/tests/fake_network_info.py @@ -42,8 +42,8 @@ def fake_network(n): 'gateway_v6': '2001:db8:0:%x::1' % n, 'netmask_v6': '64', 'netmask': '255.255.255.0', - 'bridge': 'fa%d' % n, - 'bridge_interface': 'fake_br%d' % n, + 'bridge': 'fake_br%d' % n, + 'bridge_interface': 'fake_eth%d' % n, 'gateway': '192.168.%d.1' % n, 'broadcast': '192.168.%d.255' % n, 'dns1': '192.168.%d.3' % n, @@ -54,26 +54,29 @@ def fake_network(n): 'vpn_public_address': '192.168.%d.2' % n} -def fixed_ips(num_networks, num_ips): +def fixed_ips(num_networks, num_ips, num_floating_ips=0): for network in xrange(num_networks): for ip in xrange(num_ips): - yield {'id': network * num_ips + ip, + id = network * num_ips + ip + f_ips = [floating_ips(id).next() for i in xrange(num_floating_ips)] + yield {'id': id, 'network_id': network, 'address': '192.168.%d.1%02d' % (network, ip), 'instance_id': 0, 'allocated': False, # and since network_id and vif_id happen to be equivalent 'virtual_interface_id': network, - 'floating_ips': [FakeModel(**floating_ip)]} + 'floating_ips': [FakeModel(**ip) for ip in f_ips]} flavor = {'id': 0, 'rxtx_cap': 3} - -floating_ip = {'id': 0, - 'address': '10.10.10.10', - 'fixed_ip_id': 0, +def floating_ips(fixed_ip_id): + for i in xrange(154): + yield {'id': 0, + 'address': '10.10.10.%d' % (i + 100), + 'fixed_ip_id': fixed_ip_id, 'project_id': None, 'auto_assigned': False} diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index e5c80b6f6..7822fbc70 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -14,15 +14,14 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import mox from nova import db from nova import exception from nova import log as logging from nova import test from nova.network import manager as network_manager - - -import mox +from nova.tests import fake_network_info LOG = logging.getLogger('nova.tests.network') @@ -128,60 +127,50 @@ class FlatNetworkTestCase(test.TestCase): self.network.db = db def test_get_instance_nw_info(self): - self.mox.StubOutWithMock(db, 'fixed_ip_get_by_instance') - self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance') - self.mox.StubOutWithMock(db, 'instance_type_get') - - db.fixed_ip_get_by_instance(mox.IgnoreArg(), - mox.IgnoreArg()).AndReturn(fixed_ips) - db.virtual_interface_get_by_instance(mox.IgnoreArg(), - mox.IgnoreArg()).AndReturn(vifs) - db.instance_type_get(mox.IgnoreArg(), - mox.IgnoreArg()).AndReturn(flavor) - self.mox.ReplayAll() - - nw_info = self.network.get_instance_nw_info(None, 0, 0, None) + fake_get_instance_nw_info = fake_network_info.fake_get_instance_nw_info - self.assertTrue(nw_info) + nw_info = fake_get_instance_nw_info(self.stubs, 0, 2) + self.assertFalse(nw_info) - for i, nw in enumerate(nw_info): - i8 = i + 8 - check = {'bridge': 'fa%s' % i, + for i, (nw, info) in enumerate(nw_info): + check = {'bridge': 'fake_br%d' % i, 'cidr': '192.168.%s.0/24' % i, - 'cidr_v6': '2001:db%s::/64' % i8, + 'cidr_v6': '2001:db8:0:%x::/64' % i, 'id': i, 'multi_host': False, - 'injected': 'DONTCARE', - 'bridge_interface': 'fake_fa%s' % i, + 'injected': False, + 'bridge_interface': 'fake_eth%d' % i, 'vlan': None} - self.assertDictMatch(nw[0], check) + self.assertDictMatch(nw, check) - check = {'broadcast': '192.168.%s.255' % i, - 'dhcp_server': '192.168.%s.1' % i, - 'dns': 'DONTCARE', - 'gateway': '192.168.%s.1' % i, - 'gateway6': '2001:db%s::1' % i8, + check = {'broadcast': '192.168.%d.255' % i, + 'dhcp_server': '192.168.%d.1' % i, + 'dns': ['192.168.%d.3' % n, '192.168.%d.4' % n] + 'gateway': '192.168.%d.1' % i, + 'gateway6': '2001:db8:0:%x::1' % i, 'ip6s': 'DONTCARE', 'ips': 'DONTCARE', - 'label': 'test%s' % i, - 'mac': 'DE:AD:BE:EF:00:0%s' % i, - 'vif_uuid': ('00000000-0000-0000-0000-000000000000000%s' % - i), - 'rxtx_cap': 'DONTCARE', + 'label': 'test%d' % i, + 'mac': 'DE:AD:BE:EF:00:%02x' % i, + 'vif_uuid': + '00000000-0000-0000-0000-00000000000000%02d' % i, + 'rxtx_cap': 3, 'should_create_vlan': False, 'should_create_bridge': False} - self.assertDictMatch(nw[1], check) + self.assertDictMatch(info, check) check = [{'enabled': 'DONTCARE', - 'ip': '2001:db%s::dcad:beff:feef:%s' % (i8, i), + 'ip': '2001:db8::dcad:beff:feef:%s' % i, 'netmask': '64'}] - self.assertDictListMatch(nw[1]['ip6s'], check) + self.assertDictListMatch(info['ip6s'], check) - check = [{'enabled': '1', - 'ip': '192.168.%s.100' % i, - 'netmask': '255.255.255.0'}] - self.assertDictListMatch(nw[1]['ips'], check) + num_fixed_ips = len(info['ips']) + check = [{'enabled': 'DONTCARE', + 'ip': '192.168.%d.1%02d' % (i, ip_num), + 'netmask': '255.255.255.0'} + for ip_num in xrange(num_fixed_ips)] + self.assertDictListMatch(info['ips'], check) class VlanNetworkTestCase(test.TestCase): -- cgit From 158a953f98f8d4ee365cbc4936754fe7a8c89082 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Mon, 22 Aug 2011 17:52:54 -0500 Subject: syntax --- nova/tests/test_network.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index 7822fbc70..91105ece3 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -146,7 +146,7 @@ class FlatNetworkTestCase(test.TestCase): check = {'broadcast': '192.168.%d.255' % i, 'dhcp_server': '192.168.%d.1' % i, - 'dns': ['192.168.%d.3' % n, '192.168.%d.4' % n] + 'dns': ['192.168.%d.3' % n, '192.168.%d.4' % n], 'gateway': '192.168.%d.1' % i, 'gateway6': '2001:db8:0:%x::1' % i, 'ip6s': 'DONTCARE', -- cgit From 49ef06ba21115a64c2efbb6fa81e0e6ee3f9095d Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Mon, 22 Aug 2011 16:21:29 -0700 Subject: xml deserialization, and test fixes --- nova/api/openstack/servers.py | 9 ++++- nova/tests/api/openstack/test_servers.py | 66 +++++++++++++++++++++++++++++++- 2 files changed, 72 insertions(+), 3 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 57ed5f45e..7faeb7278 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -183,6 +183,10 @@ class Controller(object): self.helper._validate_server_name(name) update_dict['display_name'] = name.strip() + if 'description' in body['server']: + description = body['server']['description'] + update_dict['display_description'] = description.strip() + try: self.compute_api.update(ctxt, id, **update_dict) except exception.NotFound: @@ -836,9 +840,12 @@ class ServerXMLSerializer(wsgi.XMLDictSerializer): def _add_server_attributes(self, node, server): node.setAttribute('id', str(server['id'])) + node.setAttribute('userId', str(server['user_id'])) + node.setAttribute('tenantId', str(server['tenant_id'])) node.setAttribute('uuid', str(server['uuid'])) node.setAttribute('hostId', str(server['hostId'])) node.setAttribute('name', server['name']) + node.setAttribute('description', server['description']) node.setAttribute('created', str(server['created'])) node.setAttribute('updated', str(server['updated'])) node.setAttribute('status', server['status']) @@ -945,7 +952,7 @@ def create_resource(version='1.0'): "attributes": { "server": ["id", "imageId", "name", "flavorId", "hostId", "status", "progress", "adminPass", "flavorRef", - "imageRef"], + "imageRef", "userId", "tenantId", "description"], "link": ["rel", "type", "href"], }, "dict_collections": { diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 480d6a370..2f849f07a 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -145,7 +145,8 @@ def instance_addresses(context, instance_id): def stub_instance(id, user_id='fake', project_id='fake', private_address=None, public_addresses=None, host=None, power_state=0, reservation_id="", uuid=FAKE_UUID, image_ref="10", - flavor_id="1", interfaces=None, name=None): + flavor_id="1", interfaces=None, name=None, + description='fakedescription'): metadata = [] metadata.append(InstanceMetadata(key='seq', value=id)) @@ -194,7 +195,7 @@ def stub_instance(id, user_id='fake', project_id='fake', private_address=None, "terminated_at": utils.utcnow(), "availability_zone": "", "display_name": server_name, - "display_description": "fakedescription", + "display_description": description, "locked": False, "metadata": metadata, "uuid": uuid, @@ -427,9 +428,12 @@ class ServersTest(test.TestCase): expected = minidom.parseString(""" Date: Mon, 22 Aug 2011 17:02:54 -0700 Subject: fix pep8 issue --- nova/api/openstack/create_instance_helper.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index 96f817d38..b0cdd87ea 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -154,8 +154,8 @@ class CreateInstanceHelper(object): kernel_id=kernel_id, ramdisk_id=ramdisk_id, display_name=name, - display_description=\ - server_dict.get('description', ''), + display_description=server_dict.\ + get('description', ''), key_name=key_name, key_data=key_data, metadata=server_dict.get('metadata', {}), -- cgit From 632526f0cf7a5be3a26c3ae14683b75bfb6afbfd Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Mon, 22 Aug 2011 22:18:43 -0700 Subject: pulling all qmanager changes into a branch based on trunk, as they were previously stacked on top of melange --- bin/nova-manage | 35 ++- nova/db/api.py | 5 + nova/db/sqlalchemy/api.py | 20 +- .../versions/041_add_network_priority.py | 45 +++ nova/db/sqlalchemy/models.py | 1 + nova/network/manager.py | 37 ++- nova/network/quantum/__init__.py | 16 ++ nova/network/quantum/client.py | 306 +++++++++++++++++++++ nova/network/quantum/fake.py | 213 ++++++++++++++ nova/network/quantum/manager.py | 232 ++++++++++++++++ nova/network/quantum/melange_connection.py | 133 +++++++++ nova/network/quantum/melange_ipam_lib.py | 135 +++++++++ nova/network/quantum/nova_ipam_lib.py | 152 ++++++++++ nova/network/quantum/quantum_connection.py | 97 +++++++ nova/tests/test_quantum.py | 261 ++++++++++++++++++ 15 files changed, 1660 insertions(+), 28 deletions(-) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/041_add_network_priority.py create mode 100644 nova/network/quantum/__init__.py create mode 100644 nova/network/quantum/client.py create mode 100644 nova/network/quantum/fake.py create mode 100644 nova/network/quantum/manager.py create mode 100644 nova/network/quantum/melange_connection.py create mode 100644 nova/network/quantum/melange_ipam_lib.py create mode 100644 nova/network/quantum/nova_ipam_lib.py create mode 100644 nova/network/quantum/quantum_connection.py create mode 100644 nova/tests/test_quantum.py diff --git a/bin/nova-manage b/bin/nova-manage index 1b29d7196..9819ef206 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -58,11 +58,11 @@ import glob import json import math import netaddr +from optparse import OptionParser import os import sys import time -from optparse import OptionParser # If ../nova/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... @@ -681,10 +681,15 @@ class NetworkCommands(object): help='Multi host') @args('--dns1', dest="dns1", metavar="", help='First DNS') @args('--dns2', dest="dns2", metavar="", help='Second DNS') + @args('--project_id', dest="project_id", metavar="", + help='Project id') + @args('--priority', dest="priority", metavar="", + help='Network interface priority') def create(self, label=None, fixed_range_v4=None, num_networks=None, network_size=None, multi_host=None, vlan_start=None, vpn_start=None, fixed_range_v6=None, gateway_v6=None, - bridge=None, bridge_interface=None, dns1=None, dns2=None): + bridge=None, bridge_interface=None, dns1=None, dns2=None, + project_id=None, priority=None): """Creates fixed ips for host by range""" # check for certain required inputs @@ -761,11 +766,14 @@ class NetworkCommands(object): bridge=bridge, bridge_interface=bridge_interface, dns1=dns1, - dns2=dns2) + dns2=dns2, + project_id=project_id, + priority=priority) def list(self): """List all created networks""" - _fmt = "%-5s\t%-18s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s" + _fmt = "%-5s\t%-18s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s"\ + "\t%-15s\t%-15s\t-15s\t-15s" print _fmt % (_('id'), _('IPv4'), _('IPv6'), @@ -774,7 +782,9 @@ class NetworkCommands(object): _('DNS2'), _('VlanID'), _('project'), - _("uuid")) + _("uuid"), + _('priority'), + _('bridge')) for network in db.network_get_all(context.get_admin_context()): print _fmt % (network.id, network.cidr, @@ -784,18 +794,19 @@ class NetworkCommands(object): network.dns2, network.vlan, network.project_id, - network.uuid) + network.uuid, + network.priority, + network.bridge) @args('--network', dest="fixed_range", metavar='', help='Network to delete') def delete(self, fixed_range): """Deletes a network""" - network = db.network_get_by_cidr(context.get_admin_context(), \ - fixed_range) - if network.project_id is not None: - raise ValueError(_('Network must be disassociated from project %s' - ' before delete' % network.project_id)) - db.network_delete_safe(context.get_admin_context(), network.id) + + # delete the network + net_manager = utils.import_object(FLAGS.network_manager) + net_manager.delete_network(context.get_admin_context(), fixed_range) + class VmCommands(object): diff --git a/nova/db/api.py b/nova/db/api.py index 2d854f24c..9ff3a1c74 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -419,6 +419,11 @@ def virtual_interface_get_by_address(context, address): return IMPL.virtual_interface_get_by_address(context, address) +def virtual_interface_get_by_uuid(context, vif_uuid): + """Gets a virtual interface from the table filtering on vif uuid.""" + return IMPL.virtual_interface_get_by_uuid(context, vif_uuid) + + def virtual_interface_get_by_fixed_ip(context, fixed_ip_id): """Gets the virtual interface fixed_ip is associated with.""" return IMPL.virtual_interface_get_by_fixed_ip(context, fixed_ip_id) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 04b5405f6..d96b951a1 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -688,10 +688,8 @@ def fixed_ip_associate(context, address, instance_id, network_id=None): def fixed_ip_associate_pool(context, network_id, instance_id=None, host=None): session = get_session() with session.begin(): - network_or_none = or_(models.FixedIp.network_id == network_id, - models.FixedIp.network_id == None) fixed_ip_ref = session.query(models.FixedIp).\ - filter(network_or_none).\ + filter_by(network_id=network_id).\ filter_by(reserved=False).\ filter_by(deleted=False).\ filter_by(instance=None).\ @@ -928,6 +926,22 @@ def virtual_interface_get_by_address(context, address): return vif_ref +@require_context +def virtual_interface_get_by_uuid(context, vif_uuid): + """Gets a virtual interface from the table. + + :param vif_uuid: = the uuid of the interface you're looking to get + """ + session = get_session() + vif_ref = session.query(models.VirtualInterface).\ + filter_by(uuid=vif_uuid).\ + options(joinedload('network')).\ + options(joinedload('instance')).\ + options(joinedload('fixed_ips')).\ + first() + return vif_ref + + @require_context def virtual_interface_get_by_fixed_ip(context, fixed_ip_id): """Gets the virtual interface fixed_ip is associated with. diff --git a/nova/db/sqlalchemy/migrate_repo/versions/041_add_network_priority.py b/nova/db/sqlalchemy/migrate_repo/versions/041_add_network_priority.py new file mode 100644 index 000000000..e619b1fcd --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/041_add_network_priority.py @@ -0,0 +1,45 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +from sqlalchemy import * +from migrate import * + +from nova import log as logging +from nova import utils + +meta = MetaData() + +# Add priority column to networks table +priority = Column('priority', Integer()) + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + + # grab tables and (column for dropping later) + networks = Table('networks', meta, autoload=True) + + try: + networks.create_column(priority) + except Exception: + logging.error(_("priority column not added to networks table")) + raise + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + networks.drop_column(priority) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 19dc3302e..11b147802 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -560,6 +560,7 @@ class Network(BASE, NovaBase): dhcp_start = Column(String(255)) project_id = Column(String(255)) + priority = Column(Integer) host = Column(String(255)) # , ForeignKey('hosts.id')) uuid = Column(String(36)) diff --git a/nova/network/manager.py b/nova/network/manager.py index aa2a3700c..b778377a0 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -443,7 +443,7 @@ class NetworkManager(manager.SchedulerDependentManager): try: fixed_ips = kwargs.get('fixed_ips') or \ self.db.fixed_ip_get_by_instance(context, instance_id) - except exceptions.FixedIpNotFoundForInstance: + except exception.FixedIpNotFoundForInstance: fixed_ips = [] LOG.debug(_("network deallocation for instance |%s|"), instance_id, context=context) @@ -541,21 +541,23 @@ class NetworkManager(manager.SchedulerDependentManager): def _allocate_mac_addresses(self, context, instance_id, networks): """Generates mac addresses and creates vif rows in db for them.""" for network in networks: - vif = {'address': self.generate_mac_address(), + self.add_virtual_interface(context, instance_id, network['id']) + + def add_virtual_interface(self, context, instance_id, network_id): + vif = {'address': self.generate_mac_address(), 'instance_id': instance_id, - 'network_id': network['id'], + 'network_id': network_id, 'uuid': str(utils.gen_uuid())} - # try FLAG times to create a vif record with a unique mac_address - for i in range(FLAGS.create_unique_mac_address_attempts): - try: - self.db.virtual_interface_create(context, vif) - break - except exception.VirtualInterfaceCreateException: - vif['address'] = self.generate_mac_address() - else: - self.db.virtual_interface_delete_by_instance(context, + # try FLAG times to create a vif record with a unique mac_address + for i in range(FLAGS.create_unique_mac_address_attempts): + try: + return self.db.virtual_interface_create(context, vif) + except exception.VirtualInterfaceCreateException: + vif['address'] = self.generate_mac_address() + else: + self.db.virtual_interface_delete_by_instance(context, instance_id) - raise exception.VirtualInterfaceMacAddressException() + raise exception.VirtualInterfaceMacAddressException() def generate_mac_address(self): """Generate an Ethernet MAC address.""" @@ -784,6 +786,15 @@ class NetworkManager(manager.SchedulerDependentManager): self._create_fixed_ips(context, network['id']) return networks + def delete_network(self, context, fixed_range, require_disassociated=True): + + network = db.network_get_by_cidr(context, fixed_range) + + if require_disassociated and network.project_id is not None: + raise ValueError(_('Network must be disassociated from project %s' + ' before delete' % network.project_id)) + self.db.network_delete_safe(context, network.id) + @property def _bottom_reserved_ips(self): # pylint: disable=R0201 """Number of reserved ips at the bottom of the range.""" diff --git a/nova/network/quantum/__init__.py b/nova/network/quantum/__init__.py new file mode 100644 index 000000000..f7fbfb511 --- /dev/null +++ b/nova/network/quantum/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Nicira Networks +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/nova/network/quantum/client.py b/nova/network/quantum/client.py new file mode 100644 index 000000000..a0c7dc6d8 --- /dev/null +++ b/nova/network/quantum/client.py @@ -0,0 +1,306 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Citrix Systems +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Tyler Smith, Cisco Systems + +import httplib +import json +import socket +import urllib + + +# this is a simple json-only serializer to use until +# we can just grab the standard serializer +# from the quantum library +class Serializer: + + def serialize(self, data, content_type): + try: + return json.dumps(data) + except TypeError: + pass + return json.dumps(to_primitive(data)) + + def deserialize(self, data, content_type): + return json.loads(data) + + +class api_call(object): + """A Decorator to add support for format and tenant overriding""" + def __init__(self, f): + self.f = f + + def __get__(self, instance, owner): + def with_params(*args, **kwargs): + # Temporarily set format and tenant for this request + (format, tenant) = (instance.format, instance.tenant) + + if 'format' in kwargs: + instance.format = kwargs['format'] + if 'tenant' in kwargs: + instance.tenant = kwargs['tenant'] + + ret = self.f(instance, *args) + (instance.format, instance.tenant) = (format, tenant) + return ret + return with_params + + +class Client(object): + + """A base client class - derived from Glance.BaseClient""" + + action_prefix = '/v0.1/tenants/{tenant_id}' + + """Action query strings""" + networks_path = "/networks" + network_path = "/networks/%s" + ports_path = "/networks/%s/ports" + port_path = "/networks/%s/ports/%s" + attachment_path = "/networks/%s/ports/%s/attachment" + + def __init__(self, host="127.0.0.1", port=9696, use_ssl=False, tenant=None, + format="xml", testingStub=None, key_file=None, cert_file=None, + logger=None): + """ + Creates a new client to some service. + + :param host: The host where service resides + :param port: The port where service resides + :param use_ssl: True to use SSL, False to use HTTP + :param tenant: The tenant ID to make requests with + :param format: The format to query the server with + :param testingStub: A class that stubs basic server methods for tests + :param key_file: The SSL key file to use if use_ssl is true + :param cert_file: The SSL cert file to use if use_ssl is true + """ + self.host = host + self.port = port + self.use_ssl = use_ssl + self.tenant = tenant + self.format = format + self.connection = None + self.testingStub = testingStub + self.key_file = key_file + self.cert_file = cert_file + self.logger = logger + + def get_connection_type(self): + """ + Returns the proper connection type + """ + if self.testingStub: + return self.testingStub + if self.use_ssl: + return httplib.HTTPSConnection + else: + return httplib.HTTPConnection + + def do_request(self, method, action, body=None, + headers=None, params=None): + """ + Connects to the server and issues a request. + Returns the result data, or raises an appropriate exception if + HTTP status code is not 2xx + + :param method: HTTP method ("GET", "POST", "PUT", etc...) + :param body: string of data to send, or None (default) + :param headers: mapping of key/value pairs to add as headers + :param params: dictionary of key/value pairs to add to append + to action + + """ + + # Ensure we have a tenant id + if not self.tenant: + raise Exception("Tenant ID not set") + + # Add format and tenant_id + action += ".%s" % self.format + action = Client.action_prefix + action + action = action.replace('{tenant_id}', self.tenant) + + if type(params) is dict: + action += '?' + urllib.urlencode(params) + + try: + connection_type = self.get_connection_type() + headers = headers or {"Content-Type": + "application/%s" % self.format} + + # Open connection and send request, handling SSL certs + certs = {'key_file': self.key_file, 'cert_file': self.cert_file} + certs = dict((x, certs[x]) for x in certs if certs[x] != None) + + if self.use_ssl and len(certs): + c = connection_type(self.host, self.port, **certs) + else: + c = connection_type(self.host, self.port) + + if self.logger: + self.logger.debug("Quantum Client Request:\n" \ + + method + " " + action + "\n") + if body: + self.logger.debug(body) + + c.request(method, action, body, headers) + res = c.getresponse() + status_code = self.get_status_code(res) + data = res.read() + + if self.logger: + self.logger.debug("Quantum Client Reply (code = %s) :\n %s" \ + % (str(status_code), data)) + + if status_code in (httplib.OK, + httplib.CREATED, + httplib.ACCEPTED, + httplib.NO_CONTENT): + return self.deserialize(data, status_code) + else: + raise Exception("Server returned error: %s" % res.read()) + + except (socket.error, IOError), e: + raise Exception("Unable to connect to " + "server. Got error: %s" % e) + + def get_status_code(self, response): + """ + Returns the integer status code from the response, which + can be either a Webob.Response (used in testing) or httplib.Response + """ + if hasattr(response, 'status_int'): + return response.status_int + else: + return response.status + + def serialize(self, data): + if data is None: + return None + elif type(data) is dict: + return Serializer().serialize(data, self.content_type()) + else: + raise Exception("unable to deserialize object of type = '%s'" \ + % type(data)) + + def deserialize(self, data, status_code): + if status_code == 202: + return data + return Serializer().deserialize(data, self.content_type()) + + def content_type(self, format=None): + if not format: + format = self.format + return "application/%s" % (format) + + @api_call + def list_networks(self): + """ + Fetches a list of all networks for a tenant + """ + return self.do_request("GET", self.networks_path) + + @api_call + def show_network_details(self, network): + """ + Fetches the details of a certain network + """ + return self.do_request("GET", self.network_path % (network)) + + @api_call + def create_network(self, body=None): + """ + Creates a new network + """ + body = self.serialize(body) + return self.do_request("POST", self.networks_path, body=body) + + @api_call + def update_network(self, network, body=None): + """ + Updates a network + """ + body = self.serialize(body) + return self.do_request("PUT", self.network_path % (network), body=body) + + @api_call + def delete_network(self, network): + """ + Deletes the specified network + """ + return self.do_request("DELETE", self.network_path % (network)) + + @api_call + def list_ports(self, network): + """ + Fetches a list of ports on a given network + """ + return self.do_request("GET", self.ports_path % (network)) + + @api_call + def show_port_details(self, network, port): + """ + Fetches the details of a certain port + """ + return self.do_request("GET", self.port_path % (network, port)) + + @api_call + def create_port(self, network, body=None): + """ + Creates a new port on a given network + """ + body = self.serialize(body) + return self.do_request("POST", self.ports_path % (network), body=body) + + @api_call + def delete_port(self, network, port): + """ + Deletes the specified port from a network + """ + return self.do_request("DELETE", self.port_path % (network, port)) + + @api_call + def set_port_state(self, network, port, body=None): + """ + Sets the state of the specified port + """ + body = self.serialize(body) + return self.do_request("PUT", + self.port_path % (network, port), body=body) + + @api_call + def show_port_attachment(self, network, port): + """ + Fetches the attachment-id associated with the specified port + """ + return self.do_request("GET", self.attachment_path % (network, port)) + + @api_call + def attach_resource(self, network, port, body=None): + """ + Sets the attachment-id of the specified port + """ + body = self.serialize(body) + return self.do_request("PUT", + self.attachment_path % (network, port), body=body) + + @api_call + def detach_resource(self, network, port): + """ + Removes the attachment-id of the specified port + """ + return self.do_request("DELETE", + self.attachment_path % (network, port)) diff --git a/nova/network/quantum/fake.py b/nova/network/quantum/fake.py new file mode 100644 index 000000000..ff2b1e9d5 --- /dev/null +++ b/nova/network/quantum/fake.py @@ -0,0 +1,213 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Nicira Networks, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import exception +from nova import ipv6 +from nova import log as logging +from nova import utils +import math +from netaddr import IPNetwork + + +LOG = logging.getLogger("network.quantum.fake") + + +# this class can be used for unit functional/testing on nova, +# as it does not actually make remote calls to the Quantum service +class FakeQuantumClientConnection: + + def __init__(self): + self.nets = {} + + def get_networks_for_tenant(self, tenant_id): + net_ids = [] + for net_id, n in self.nets.items(): + if n['tenant-id'] == tenant_id: + net_ids.append(net_id) + return net_ids + + def create_network(self, tenant_id, network_name): + + uuid = str(utils.gen_uuid()) + self.nets[uuid] = {'net-name': network_name, + 'tenant-id': tenant_id, + 'ports': {}} + return uuid + + def delete_network(self, tenant_id, net_id): + if self.nets[net_id]['tenant-id'] == tenant_id: + del self.nets[net_id] + + def network_exists(self, tenant_id, net_id): + try: + return self.nets[net_id]['tenant-id'] == tenant_id + except: + return False + + def _confirm_not_attached(self, interface_id): + for n in self.nets.values(): + for p in n['ports'].values(): + if p['attachment-id'] == interface_id: + raise Exception("interface '%s' is already attached" %\ + interface_id) + + def create_and_attach_port(self, tenant_id, net_id, interface_id): + if not self.network_exists(tenant_id, net_id): + raise Exception("network %s does not exist for tenant %s" %\ + (net_id, tenant_id)) + + self._confirm_not_attached(interface_id) + uuid = str(utils.gen_uuid()) + self.nets[net_id]['ports'][uuid] = \ + {"port-state": "ACTIVE", + "attachment-id": interface_id} + + def detach_and_delete_port(self, tenant_id, net_id, port_id): + if not self.network_exists(tenant_id, net_id): + raise Exception("network %s does not exist for tenant %s" %\ + (net_id, tenant_id)) + del self.nets[net_id]['ports'][port_id] + + def get_port_by_attachment(self, tenant_id, attachment_id): + for net_id, n in self.nets.items(): + if n['tenant-id'] == tenant_id: + for port_id, p in n['ports'].items(): + if p['attachment-id'] == attachment_id: + return (net_id, port_id) + + return (None, None) + + +def get_ipam_lib(net_man): + return FakeQuantumIPAMLib() + + +class FakeQuantumIPAMLib(): + + def __init__(self): + self.subnets = {} + + def create_subnet(self, context, label, tenant_id, quantum_net_id, + cidr=None, gateway_v6=None, cidr_v6=None, + dns1=None, dns2=None): + if int(cidr.split("/")[1]) != 24: + raise Exception("fake ipam_lib only supports /24s") + v4_ips = [] + net_start = cidr[0:cidr.rfind(".") + 1] + subnet_size = int(math.pow(2, (32 - int(cidr.split("/")[1])))) + for i in xrange(2, subnet_size - 1): + v4_ips.append({"ip": net_start + str(i), + "allocated": False, + "virtual_interface_id": None, + "instance_id": None}) + self.subnets[quantum_net_id] = {\ + "label": label, + "gateway": net_start + "1", + "netmask": "255.255.255.0", + "broadcast": net_start + "255", + "cidr": cidr, + "gateway_v6": gateway_v6, + "cidr_v6": cidr_v6, + "dns1": dns1, + "dns2": dns2, + "project_id": tenant_id, + "v4_ips": v4_ips} + + def get_network_id_by_cidr(self, context, cidr, project_id): + for net_id, s in self.subnets.items(): + if s['cidr'] == cidr or s['cidr_v6'] == cidr: + return net_id + return None + + def delete_subnets_by_net_id(self, context, net_id, project_id): + self.verify_subnet_exists(context, project_id, net_id) + del self.subnets[net_id] + + def get_project_and_global_net_ids(self, context, project_id): + net_ids = [] + for nid, s in self.subnets.items(): + if s['project_id'] == project_id or \ + s['project_id'] == None: + net_ids.append((nid, s['project_id'])) + return net_ids + + def allocate_fixed_ip(self, context, tenant_id, quantum_net_id, vif_rec): + subnet = self.subnets[quantum_net_id] + for i in xrange(0, len(subnet['v4_ips'])): + ip = subnet['v4_ips'][i] + if not ip['allocated']: + subnet['v4_ips'][i] = {'ip': ip['ip'], + 'allocated': True, + 'virtual_interface_id': vif_rec['uuid'], + 'instance_id': vif_rec['instance_id']} + return + raise Exception("Unable to find available IP for net '%s'" %\ + quantum_net_id) + + def get_subnets_by_net_id(self, context, tenant_id, net_id): + self.verify_subnet_exists(context, tenant_id, net_id) + + subnet_data = self.subnets[net_id] + subnet_data_v4 = { + 'network_id': net_id, + 'cidr': subnet_data['cidr'], + 'gateway': subnet_data['gateway'], + 'broadcast': subnet_data['broadcast'], + 'netmask': subnet_data['netmask'], + 'dns1': subnet_data['dns1'], + 'dns2': subnet_data['dns2']} + subnet_data_v6 = { + 'network_id': net_id, + 'cidr': subnet_data['cidr_v6'], + 'gateway': subnet_data['gateway_v6'], + 'broadcast': None, + 'netmask': None, + 'dns1': None, + 'dns2': None} + return (subnet_data_v4, subnet_data_v6) + + def get_v6_ips_by_interface(self, context, net_id, vif_id, project_id): + self.verify_subnet_exists(context, project_id, net_id) + + subnet_data = self.subnets[net_id] + if subnet_data['cidr_v6']: + ip = ipv6.to_global(subnet_data['cidr_v6'], + "ca:fe:de:ad:be:ef", + project_id) + return [ip] + return [] + + def get_v4_ips_by_interface(self, context, net_id, vif_id, project_id): + self.verify_subnet_exists(context, project_id, net_id) + + subnet_data = self.subnets[net_id] + for ip in subnet_data['v4_ips']: + if ip['virtual_interface_id'] == vif_id: + return [ip['ip']] + return [] + + def verify_subnet_exists(self, context, tenant_id, quantum_net_id): + if quantum_net_id not in self.subnets: + raise exception.NetworkNotFound(network_id=quantum_net_id) + + def deallocate_ips_by_vif(self, context, tenant_id, net_id, vif_ref): + s = self.subnets[net_id] + for ip in s['v4_ips']: + if ip['virtual_interface_id'] == vif_ref['id']: + ip['allocated'] = False + ip['instance_id'] = None + ip['virtual_interface_id'] = None diff --git a/nova/network/quantum/manager.py b/nova/network/quantum/manager.py new file mode 100644 index 000000000..f712a93c4 --- /dev/null +++ b/nova/network/quantum/manager.py @@ -0,0 +1,232 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Nicira Networks, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import db +from nova import exception +from nova import flags +from nova import log as logging +from nova import manager +from nova import utils +from nova.network import manager +from nova.network.quantum import quantum_connection +from nova.network.quantum import fake + +LOG = logging.getLogger("quantum_manager") + +FLAGS = flags.FLAGS + +flags.DEFINE_string('quantum_ipam_lib', + 'nova.network.quantum.nova_ipam_lib', + "Indicates underlying IP address management library") + + +class QuantumManager(manager.FlatManager): + + def __init__(self, ipam_lib=None, *args, **kwargs): + + if FLAGS.fake_network: + self.q_conn = fake.FakeQuantumClientConnection() + else: + self.q_conn = quantum_connection.QuantumClientConnection() + + if not ipam_lib: + ipam_lib = FLAGS.quantum_ipam_lib + self.ipam = utils.import_object(ipam_lib).get_ipam_lib(self) + + super(QuantumManager, self).__init__(*args, **kwargs) + + def create_networks(self, context, label, cidr, multi_host, num_networks, + network_size, cidr_v6, gateway_v6, bridge, + bridge_interface, dns1=None, dns2=None, **kwargs): + if num_networks != 1: + raise Exception("QuantumManager requires that only one" + " network is created per call") + q_tenant_id = kwargs["project_id"] or \ + FLAGS.quantum_default_tenant_id + quantum_net_id = bridge + if quantum_net_id: + if not q_conn.network_exists(q_tenant_id, quantum_net_id): + raise Exception("Unable to find existing quantum " \ + " network for tenant '%s' with net-id '%s'" % \ + (q_tenant_id, quantum_net_id)) + else: + # otherwise, create network from default quantum pool + quantum_net_id = self.q_conn.create_network(q_tenant_id, label) + + ipam_tenant_id = kwargs.get("project_id", None) + self.ipam.create_subnet(context, label, ipam_tenant_id, quantum_net_id, + cidr, gateway_v6, cidr_v6, dns1, dns2) + + def delete_network(self, context, fixed_range): + project_id = context.project_id + quantum_net_id = self.ipam.get_network_id_by_cidr( + context, fixed_range, project_id) + self.ipam.delete_subnets_by_net_id(context, quantum_net_id, + project_id) + try: + q_tenant_id = project_id or FLAGS.quantum_default_tenant_id + self.q_conn.delete_network(q_tenant_id, quantum_net_id) + except Exception, e: + raise Exception("Unable to delete Quantum Network with " + "fixed_range = %s (ports still in use?)." % fixed_range) + + def allocate_for_instance(self, context, **kwargs): + instance_id = kwargs.pop('instance_id') + instance_type_id = kwargs['instance_type_id'] + host = kwargs.pop('host') + project_id = kwargs.pop('project_id') + LOG.debug(_("network allocations for instance %s"), instance_id) + + # if using the create-server-networks extension, 'requested_networks' + # will be defined, otherwise, just grab relevant nets from IPAM + requested_networks = kwargs.get('requested_networks') + + if requested_networks: + net_proj_pairs = [(net_id, project_id) \ + for (net_id, _i) in requested_networks] + else: + net_proj_pairs = self.ipam.get_project_and_global_net_ids(context, + project_id) + + # Create a port via quantum and attach the vif + for (net_id, project_id) in net_proj_pairs: + vif_rec = manager.FlatManager.add_virtual_interface(self, + context, instance, None) + + q_tenant_id = project_id or FLAGS.quantum_default_tenant_id + self.q_conn.create_and_attach_port(q_tenant_id, net_id, + vif_rec['uuid']) + self.ipam.allocate_fixed_ip(context, project_id, net_id, vif_rec) + + return self.get_instance_nw_info(context, instance_id, + instance_type_id, host) + + def get_instance_nw_info(self, context, instance_id, + instance_type_id, host): + network_info = [] + project_id = context.project_id + + admin_context = context.elevated() + vifs = db.virtual_interface_get_by_instance(admin_context, + instance_id) + for vif in vifs: + q_tenant_id = project_id + ipam_tenant_id = project_id + net_id, port_id = self.q_conn.get_port_by_attachment(q_tenant_id, + vif['uuid']) + if not net_id: + q_tenant_id = FLAGS.quantum_default_tenant_id + ipam_tenant_id = None + net_id, port_id = self.q_conn.get_port_by_attachment( + q_tenant_id, vif['uuid']) + if not net_id: + raise Exception(_("No network for for virtual interface %s") %\ + vif['uuid']) + (v4_subnet, v6_subnet) = self.ipam.get_subnets_by_net_id(context, + ipam_tenant_id, net_id) + v4_ips = self.ipam.get_v4_ips_by_interface(context, + net_id, vif['uuid'], + project_id=ipam_tenant_id) + v6_ips = self.ipam.get_v6_ips_by_interface(context, + net_id, vif['uuid'], + project_id=ipam_tenant_id) + + quantum_net_id = v4_subnet['network_id'] or v6_subnet['network_id'] + + def ip_dict(ip, subnet): + return { + "ip": ip, + "netmask": subnet["netmask"], + "enabled": "1"} + + network_dict = { + 'cidr': v4_subnet['cidr'], + 'injected': True, + 'multi_host': False} + + info = { + 'gateway': v4_subnet['gateway'], + 'dhcp_server': v4_subnet['gateway'], + 'broadcast': v4_subnet['broadcast'], + 'mac': vif['address'], + 'vif_uuid': vif['uuid'], + 'dns': [], + 'ips': [ip_dict(ip, v4_subnet) for ip in v4_ips]} + + if v6_subnet['cidr']: + network_dict['cidr_v6'] = v6_subnet['cidr'] + info['ip6s'] = [ip_dict(ip, v6_subnet) for ip in v6_ips] + # TODO(tr3buchet): handle ip6 routes here as well + if v6_subnet['gateway']: + info['gateway6'] = v6_subnet['gateway'] + + dns_dict = {} + for s in [v4_subnet, v6_subnet]: + for k in ['dns1', 'dns2']: + if s[k]: + dns_dict[s[k]] = None + info['dns'] = [d for d in dns_dict.keys()] + + network_info.append((network_dict, info)) + return network_info + + def deallocate_for_instance(self, context, **kwargs): + instance_id = kwargs.get('instance_id') + project_id = kwargs.pop('project_id', None) + + admin_context = context.elevated() + vifs = db.virtual_interface_get_by_instance(admin_context, + instance_id) + for vif_ref in vifs: + interface_id = vif_ref['uuid'] + q_tenant_id = project_id + ipam_tenant_id = project_id + (net_id, port_id) = self.q_conn.get_port_by_attachment(q_tenant_id, + interface_id) + if not net_id: + q_tenant_id = FLAGS.quantum_default_tenant_id + ipam_tenant_id = None + (net_id, port_id) = self.q_conn.get_port_by_attachment( + q_tenant_id, interface_id) + if not net_id: + LOG.error("Unable to find port with attachment: %s" % \ + (interface_id)) + continue + self.q_conn.detach_and_delete_port(q_tenant_id, + net_id, port_id) + + self.ipam.deallocate_ips_by_vif(context, ipam_tenant_id, + net_id, vif_ref) + + self.net_manager.db.virtual_interface_delete_by_instance(admin_context, + instance_id) + self._do_trigger_security_group_members_refresh_for_instance( + instance_id) + + # validates that this tenant has quantum networks with the associated + # UUIDs. This is called by the 'os-create-server-ext' API extension + # code so that we can return an API error code to the caller if they + # request an invalid network. + def validate_networks(self, context, networks): + if networks is None: + return + + project_id = context.project_id + for (net_id, _i) in networks: + self.ipam.verify_subnet_exists(context, project_id, net_id) + if not self.q_conn.network_exists(project_id, net_id): + raise exception.NetworkNotFound(network_id=net_id) diff --git a/nova/network/quantum/melange_connection.py b/nova/network/quantum/melange_connection.py new file mode 100644 index 000000000..b3955138d --- /dev/null +++ b/nova/network/quantum/melange_connection.py @@ -0,0 +1,133 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import httplib +import socket +import urllib +import json +from nova import flags + + +FLAGS = flags.FLAGS + +flags.DEFINE_string('melange_host', + '127.0.0.1', + 'HOST for connecting to melange') + +flags.DEFINE_string('melange_port', + '9898', + 'PORT for connecting to melange') + +json_content_type = {'Content-type': "application/json"} + + +class MelangeConnection(object): + + def __init__(self, host=None, port=None, use_ssl=False): + if host is None: + host = FLAGS.melange_host + if port is None: + port = int(FLAGS.melange_port) + self.host = host + self.port = port + self.use_ssl = use_ssl + + def get(self, path, params={}, headers={}): + return self.do_request("GET", path, params=params, headers=headers) + + def post(self, path, body=None, headers={}): + return self.do_request("POST", path, body=body, headers=headers) + + def delete(self, path, headers={}): + return self.do_request("DELETE", path, headers=headers) + + def _get_connection(self): + if self.use_ssl: + return httplib.HTTPSConnection(self.host, self.port) + else: + return httplib.HTTPConnection(self.host, self.port) + + def do_request(self, method, path, body=None, headers={}, params={}): + + url = path + '.json?' + urllib.urlencode(params) + + try: + connection = self._get_connection() + connection.request(method, url, body, headers) + response = connection.getresponse() + response_str = response.read() + if response.status < 400: + return response_str + raise Exception("Server returned error: %s", response_str) + except (socket.error, IOError), e: + raise Exception("Unable to connect to " + "server. Got error: %s" % e) + + def allocate_ip(self, network_id, vif_id, + project_id=None, mac_address=None): + tenant_scope = "/tenants/%s" % project_id if project_id else "" + request_body = (json.dumps(dict(network=dict(mac_address=mac_address, + tenant_id=project_id))) + if mac_address else None) + url = ("/ipam%(tenant_scope)s/networks/%(network_id)s/" + "interfaces/%(vif_id)s/ip_allocations" % locals()) + response = self.post(url, body=request_body, + headers=json_content_type) + return json.loads(response)['ip_addresses'] + + def create_block(self, network_id, cidr, + project_id=None, dns1=None, dns2=None): + tenant_scope = "/tenants/%s" % project_id if project_id else "" + + url = "/ipam%(tenant_scope)s/ip_blocks" % locals() + + req_params = dict(ip_block=dict(cidr=cidr, network_id=network_id, + type='private', dns1=dns1, dns2=dns2)) + self.post(url, body=json.dumps(req_params), + headers=json_content_type) + + def delete_block(self, block_id, project_id=None): + tenant_scope = "/tenants/%s" % project_id if project_id else "" + + url = "/ipam%(tenant_scope)s/ip_blocks/%(block_id)s" % locals() + + self.delete(url, headers=json_content_type) + + def get_blocks(self, project_id=None): + tenant_scope = "/tenants/%s" % project_id if project_id else "" + + url = "/ipam%(tenant_scope)s/ip_blocks" % locals() + + response = self.get(url, headers=json_content_type) + return json.loads(response) + + def get_allocated_ips(self, network_id, vif_id, project_id=None): + tenant_scope = "/tenants/%s" % project_id if project_id else "" + + url = ("/ipam%(tenant_scope)s/networks/%(network_id)s/" + "interfaces/%(vif_id)s/ip_allocations" % locals()) + + response = self.get(url, headers=json_content_type) + return json.loads(response)['ip_addresses'] + + def deallocate_ips(self, network_id, vif_id, project_id=None): + tenant_scope = "/tenants/%s" % project_id if project_id else "" + + url = ("/ipam%(tenant_scope)s/networks/%(network_id)s/" + "interfaces/%(vif_id)s/ip_allocations" % locals()) + + self.delete(url, headers=json_content_type) diff --git a/nova/network/quantum/melange_ipam_lib.py b/nova/network/quantum/melange_ipam_lib.py new file mode 100644 index 000000000..46038c349 --- /dev/null +++ b/nova/network/quantum/melange_ipam_lib.py @@ -0,0 +1,135 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Nicira Networks, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from netaddr import IPNetwork + +from nova import flags +from nova import log as logging +from nova.network.quantum import melange_connection + +LOG = logging.getLogger("quantum_melange_ipam") + +FLAGS = flags.FLAGS + + +def get_ipam_lib(net_man): + return QuantumMelangeIPAMLib() + + +class QuantumMelangeIPAMLib: + + def __init__(self): + self.m_conn = melange_connection.MelangeConnection() + + def create_subnet(self, context, label, project_id, + quantum_net_id, cidr=None, + gateway_v6=None, cidr_v6=None, + dns1=None, dns2=None): + tenant_id = project_id or FLAGS.quantum_default_tenant_id + if cidr: + self.m_conn.create_block(quantum_net_id, cidr, + project_id=tenant_id, + dns1=dns1, dns2=dns2) + if cidr_v6: + self.m_conn.create_block(quantum_net_id, cidr_v6, + project_id=tenant_id, + dns1=dns1, dns2=dns2) + + def allocate_fixed_ip(self, context, project_id, quantum_net_id, vif_ref): + tenant_id = project_id or FLAGS.quantum_default_tenant_id + self.m_conn.allocate_ip(quantum_net_id, + vif_ref['uuid'], project_id=tenant_id, + mac_address=vif_ref['address']) + + def get_network_id_by_cidr(self, context, cidr, project_id): + tenant_id = project_id or FLAGS.quantum_default_tenant_id + all_blocks = self.m_conn.get_blocks(tenant_id) + for b in all_blocks['ip_blocks']: + if b['cidr'] == cidr: + return b['network_id'] + + def delete_subnets_by_net_id(self, context, net_id, project_id): + tenant_id = project_id or FLAGS.quantum_default_tenant_id + all_blocks = self.m_conn.get_blocks(tenant_id) + for b in all_blocks['ip_blocks']: + if b['network_id'] == net_id: + self.m_conn.delete_block(b['id'], tenant_id) + + # get all networks with this project_id, as well as all networks + # where the project-id is not set (these are shared networks) + def get_project_and_global_net_ids(self, context, project_id): + id_proj_map = {} + if not project_id: + raise Exception("get_project_and_global_net_ids must be called" \ + " with a non-null project_id") + tenant_id = project_id + all_tenant_blocks = self.m_conn.get_blocks(tenant_id) + for b in all_tenant_blocks['ip_blocks']: + id_proj_map[b['network_id']] = tenant_id + tenant_id = FLAGS.quantum_default_tenant_id + all_provider_blocks = self.m_conn.get_blocks(tenant_id) + for b in all_provider_blocks['ip_blocks']: + id_proj_map[b['network_id']] = tenant_id + return id_proj_map.items() + + # FIXME: there must be a more efficient way to do this, + # talk to the melange folks + def get_subnets_by_net_id(self, context, project_id, net_id): + subnet_v4 = None + subnet_v6 = None + tenant_id = project_id or FLAGS.quantum_default_tenant_id + all_blocks = self.m_conn.get_blocks(tenant_id) + for b in all_blocks['ip_blocks']: + if b['network_id'] == net_id: + subnet = {'network_id': b['network_id'], + 'cidr': b['cidr'], + 'gateway': b['gateway'], + 'broadcast': b['broadcast'], + 'netmask': b['netmask'], + 'dns1': b['dns1'], + 'dns2': b['dns2']} + + if IPNetwork(b['cidr']).version == 6: + subnet_v6 = subnet + else: + subnet_v4 = subnet + return (subnet_v4, subnet_v6) + + def get_v4_ips_by_interface(self, context, net_id, vif_id, project_id): + return self.get_ips_by_interface(context, net_id, vif_id, + project_id, 4) + + def get_v6_ips_by_interface(self, context, net_id, vif_id, project_id): + return self.get_ips_by_interface(context, net_id, vif_id, + project_id, 6) + + def get_ips_by_interface(self, context, net_id, vif_id, project_id, + ip_version): + tenant_id = project_id or FLAGS.quantum_default_tenant_id + ip_list = self.m_conn.get_allocated_ips(net_id, vif_id, tenant_id) + return [ip['address'] for ip in ip_list \ + if IPNetwork(ip['address']).version == ip_version] + + def verify_subnet_exists(self, context, project_id, quantum_net_id): + tenant_id = project_id or FLAGS.quantum_default_tenant_id + v4_subnet, v6_subnet = self.get_subnets_by_net_id(context, tenant_id, + quantum_net_id) + return v4_subnet is not None + + def deallocate_ips_by_vif(self, context, project_id, net_id, vif_ref): + tenant_id = project_id or FLAGS.quantum_default_tenant_id + self.m_conn.deallocate_ips(net_id, vif_ref['uuid'], tenant_id) diff --git a/nova/network/quantum/nova_ipam_lib.py b/nova/network/quantum/nova_ipam_lib.py new file mode 100644 index 000000000..0fc74fa49 --- /dev/null +++ b/nova/network/quantum/nova_ipam_lib.py @@ -0,0 +1,152 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Nicira Networks, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import math + +#from nova import context +from nova import db +from nova import exception +from nova import flags +from nova import ipv6 +from nova import log as logging +from nova import utils +from nova.network import manager +from nova.network.quantum import melange_connection as melange + +LOG = logging.getLogger("quantum_nova_ipam_lib") + +FLAGS = flags.FLAGS + + +def get_ipam_lib(net_man): + return QuantumNovaIPAMLib(net_man) + + +class QuantumNovaIPAMLib: + + def __init__(self, net_manager): + self.net_manager = net_manager + + def create_subnet(self, context, label, tenant_id, + quantum_net_id, cidr=None, + gateway_v6=None, cidr_v6=None, + dns1=None, dns2=None): + print "creating subnet %s" % cidr + admin_context = context.elevated() + subnet_size = int(math.pow(2, (32 - int(cidr.split("/")[1])))) + manager.FlatManager.create_networks(self.net_manager, + admin_context, label, cidr, + False, 1, subnet_size, cidr_v6, + gateway_v6, quantum_net_id, None, dns1, dns2) + + # now grab the network and update project_id + network = db.network_get_by_bridge(admin_context, quantum_net_id) + net = {"project_id": tenant_id} + db.network_update(admin_context, network['id'], net) + + def get_network_id_by_cidr(self, context, cidr, project_id): + admin_context = context.elevated() + network = db.network_get_by_cidr(admin_context, cidr) + if not network: + raise Exception("No network with fixed_range = %s" \ + % fixed_range) + return network['bridge'] + + def delete_subnets_by_net_id(self, context, net_id, project_id): + admin_context = context.elevated() + network = db.network_get_by_bridge(admin_context, net_id) + if not network: + raise Exception("No network with net_id = %s" % net_id) + manager.FlatManager.delete_network(self.net_manager, + admin_context, network['cidr'], + require_disassociated=False) + + def get_project_and_global_net_ids(self, context, project_id): + + # get all networks with this project_id, as well as all networks + # where the project-id is not set (these are shared networks) + admin_context = context.elevated() + networks = db.project_get_networks(admin_context, project_id, False) + networks.extend(db.project_get_networks(admin_context, None, False)) + return [(n['bridge'], n['project_id']) for n in networks] + + def allocate_fixed_ip(self, context, tenant_id, quantum_net_id, vif_rec): + admin_context = context.elevated() + network = db.network_get_by_bridge(admin_context, quantum_net_id) + if network['cidr']: + address = db.fixed_ip_associate_pool(admin_context, + network['id'], + vif_rec['instance_id']) + values = {'allocated': True, + 'virtual_interface_id': vif_rec['id']} + db.fixed_ip_update(admin_context, address, values) + + def get_subnets_by_net_id(self, context, tenant_id, net_id): + n = db.network_get_by_bridge(context.elevated(), net_id) + subnet_data_v4 = { + 'network_id': n['bridge'], + 'cidr': n['cidr'], + 'gateway': n['gateway'], + 'broadcast': n['broadcast'], + 'netmask': n['netmask'], + 'dns1': n['dns1'], + 'dns2': n['dns2'] + } + subnet_data_v6 = { + 'network_id': n['bridge'], + 'cidr': n['cidr_v6'], + 'gateway': n['gateway_v6'], + 'broadcast': None, + 'netmask': None, + 'dns1': None, + 'dns2': None + } + return (subnet_data_v4, subnet_data_v6) + + def get_v4_ips_by_interface(self, context, net_id, vif_id, project_id): + vif_rec = db.virtual_interface_get_by_uuid(context, vif_id) + fixed_ips = db.fixed_ip_get_by_virtual_interface(context, + vif_rec['id']) + return [f['address'] for f in fixed_ips] + + def get_v6_ips_by_interface(self, context, net_id, vif_id, project_id): + admin_context = context.elevated() + network = db.network_get_by_bridge(admin_context, net_id) + vif_rec = db.virtual_interface_get_by_uuid(context, vif_id) + if network['cidr_v6']: + ip = ipv6.to_global(network['cidr_v6'], + vif_rec['address'], + project_id) + return [ip] + return [] + + def verify_subnet_exists(self, context, tenant_id, quantum_net_id): + admin_context = context.elevated() + network = db.network_get_by_bridge(admin_context, quantum_net_id) + + def deallocate_ips_by_vif(self, context, tenant_id, net_id, vif_ref): + try: + admin_context = context.elevated() + fixed_ips = db.fixed_ip_get_by_virtual_interface(admin_context, + vif_ref['id']) + for f in fixed_ips: + db.fixed_ip_update(admin_context, f['address'], + {'allocated': False, + 'virtual_interface_id': None}) + except exception.FixedIpNotFoundForInstance: + LOG.error(_('Failed to deallocate fixed IP for vif %s' % \ + vif_ref['id'])) diff --git a/nova/network/quantum/quantum_connection.py b/nova/network/quantum/quantum_connection.py new file mode 100644 index 000000000..3aa017bcd --- /dev/null +++ b/nova/network/quantum/quantum_connection.py @@ -0,0 +1,97 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Nicira Networks +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import flags +from nova import log as logging +from nova import utils + +from nova.network.quantum.client import Client + +LOG = logging.getLogger("nova.network.quantum") +FLAGS = flags.FLAGS + +flags.DEFINE_string('quantum_connection_host', + '127.0.0.1', + 'HOST for connecting to quantum') + +flags.DEFINE_string('quantum_connection_port', + '9696', + 'PORT for connecting to quantum') + +flags.DEFINE_string('quantum_default_tenant_id', + "default", + 'Default tenant id when creating quantum networks') + + +class QuantumClientConnection: + + def __init__(self): + self.client = Client(FLAGS.quantum_connection_host, + FLAGS.quantum_connection_port, + format="json", + logger=LOG) + + def create_network(self, tenant_id, network_name): + data = {'network': {'net-name': network_name}} + resdict = self.client.create_network(data, tenant=tenant_id) + return resdict["networks"]["network"]["id"] + + def delete_network(self, tenant_id, net_id): + self.client.delete_network(net_id, tenant=tenant_id) + + def network_exists(self, tenant_id, net_id): + try: + self.client.show_network_details(net_id, tenant=tenant_id) + except: + # FIXME: client lib should expose more granular exceptions + # so we can confirm we're getting a 404 and not some other error + return False + return True + + def create_and_attach_port(self, tenant_id, net_id, interface_id): + LOG.debug("Connecting interface %s to net %s for %s" % \ + (interface_id, net_id, tenant_id)) + port_data = {'port': {'port-state': 'ACTIVE'}} + resdict = self.client.create_port(net_id, port_data, tenant=tenant_id) + port_id = resdict["ports"]["port"]["id"] + + attach_data = {'port': {'attachment-id': interface_id}} + self.client.attach_resource(net_id, port_id, attach_data, + tenant=tenant_id) + + def detach_and_delete_port(self, tenant_id, net_id, port_id): + LOG.debug("Deleteing port %s on net %s for %s" % \ + (port_id, net_id, tenant_id)) + + self.client.detach_resource(net_id, port_id, tenant=tenant_id) + self.client.delete_port(net_id, port_id, tenant=tenant_id) + + # FIXME: this will be inefficient until API implements querying + def get_port_by_attachment(self, tenant_id, attachment_id): + + net_list_resdict = self.client.list_networks(tenant=tenant_id) + for n in net_list_resdict["networks"]: + net_id = n['id'] + port_list_resdict = self.client.list_ports(net_id, + tenant=tenant_id) + for p in port_list_resdict["ports"]: + port_id = p["id"] + port_get_resdict = self.client.show_port_attachment(net_id, + port_id, tenant=tenant_id) + if attachment_id == port_get_resdict["attachment"]: + return (net_id, port_id) + return (None, None) diff --git a/nova/tests/test_quantum.py b/nova/tests/test_quantum.py new file mode 100644 index 000000000..378beb6ed --- /dev/null +++ b/nova/tests/test_quantum.py @@ -0,0 +1,261 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Nicira, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import context +from nova import db +from nova import exception +from nova import log as logging +from nova import test +from nova.network.quantum import manager as quantum_manager + +LOG = logging.getLogger('nova.tests.quantum_network') + +networks = [{'label': 'project1-net1', + 'injected': False, + 'multi_host': False, + 'cidr': '192.168.0.0/24', + 'cidr_v6': '2001:1db8::/64', + 'gateway_v6': '2001:1db8::1', + 'netmask_v6': '64', + 'netmask': '255.255.255.0', + 'bridge': None, + 'bridge_interface': None, + 'gateway': '192.168.0.1', + 'broadcast': '192.168.0.255', + 'dns1': '192.168.0.1', + 'dns2': '192.168.0.2', + 'vlan': None, + 'host': None, + 'vpn_public_address': None, + 'project_id': 'fake_project1'}, + {'label': 'project2-net1', + 'injected': False, + 'multi_host': False, + 'cidr': '192.168.1.0/24', + 'cidr_v6': '2001:1db9::/64', + 'gateway_v6': '2001:1db9::1', + 'netmask_v6': '64', + 'netmask': '255.255.255.0', + 'bridge': None, + 'bridge_interface': None, + 'gateway': '192.168.1.1', + 'broadcast': '192.168.1.255', + 'dns1': '192.168.0.1', + 'dns2': '192.168.0.2', + 'vlan': None, + 'host': None, + 'project_id': 'fake_project2', + 'vpn_public_address': '192.168.1.2'}, + {'label': "public", + 'injected': False, + 'multi_host': False, + 'cidr': '10.0.0.0/24', + 'cidr_v6': '2001:1dba::/64', + 'gateway_v6': '2001:1dba::1', + 'netmask_v6': '64', + 'netmask': '255.255.255.0', + 'bridge': None, + 'bridge_interface': None, + 'gateway': '10.0.0.1', + 'broadcast': '10.0.0.255', + 'dns1': '10.0.0.1', + 'dns2': '10.0.0.2', + 'vlan': None, + 'host': None, + 'vpn_public_address': None, + 'project_id': None}, + {'label': "project2-net2", + 'injected': False, + 'multi_host': False, + 'cidr': '9.0.0.0/24', + 'cidr_v6': '2001:1dbb::/64', + 'gateway_v6': '2001:1dbb::1', + 'netmask_v6': '64', + 'netmask': '255.255.255.0', + 'bridge': None, + 'bridge_interface': None, + 'gateway': '9.0.0.1', + 'broadcast': '9.0.0.255', + 'dns1': '9.0.0.1', + 'dns2': '9.0.0.2', + 'vlan': None, + 'host': None, + 'vpn_public_address': None, + 'project_id': "fake_project2"}] + + +# this is a base class to be used by all other Quantum Test classes +class QuantumTestCaseBase(object): + + def test_create_and_delete_nets(self): + self._create_nets() + self._delete_nets() + + def _create_nets(self): + for n in networks: + ctx = context.RequestContext('user1', n['project_id']) + self.net_man.create_networks(ctx, + label=n['label'], cidr=n['cidr'], + multi_host=n['multi_host'], + num_networks=1, network_size=256, cidr_v6=n['cidr_v6'], + gateway_v6=n['gateway_v6'], bridge=None, + bridge_interface=None, dns1=n['dns1'], + dns2=n['dns2'], project_id=n['project_id']) + + def _delete_nets(self): + for n in networks: + ctx = context.RequestContext('user1', n['project_id']) + self.net_man.delete_network(ctx, n['cidr']) + + def test_allocate_and_deallocate_instance_static(self): + self._create_nets() + + project_id = "fake_project1" + ctx = context.RequestContext('user1', project_id) + + instance_ref = db.api.instance_create(ctx, {}) + nw_info = self.net_man.allocate_for_instance(ctx, + instance_id=instance_ref['id'], host="", + instance_type_id=instance_ref['instance_type_id'], + project_id=project_id) + + self.assertEquals(len(nw_info), 2) + + # we don't know which order the NICs will be in until we + # introduce the notion of priority + # v4 cidr + self.assertTrue(nw_info[0][0]['cidr'].startswith("10.") or \ + nw_info[1][0]['cidr'].startswith("10.")) + self.assertTrue(nw_info[0][0]['cidr'].startswith("192.") or \ + nw_info[1][0]['cidr'].startswith("192.")) + + # v4 address + self.assertTrue(nw_info[0][1]['ips'][0]['ip'].startswith("10.") or \ + nw_info[1][1]['ips'][0]['ip'].startswith("10.")) + self.assertTrue(nw_info[0][1]['ips'][0]['ip'].startswith("192.") or \ + nw_info[1][1]['ips'][0]['ip'].startswith("192.")) + + # v6 cidr + self.assertTrue(nw_info[0][0]['cidr_v6'].startswith("2001:1dba:") or \ + nw_info[1][0]['cidr_v6'].startswith("2001:1dba:")) + self.assertTrue(nw_info[0][0]['cidr_v6'].startswith("2001:1db8:") or \ + nw_info[1][0]['cidr_v6'].startswith("2001:1db8:")) + + # v6 address + self.assertTrue(\ + nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1dba:") or \ + nw_info[1][1]['ip6s'][0]['ip'].startswith("2001:1dba:")) + self.assertTrue(\ + nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1db8:") or \ + nw_info[1][1]['ip6s'][0]['ip'].startswith("2001:1db8:")) + + self.net_man.deallocate_for_instance(ctx, + instance_id=instance_ref['id'], + project_id=project_id) + + self._delete_nets() + + def test_allocate_and_deallocate_instance_dynamic(self): + self._create_nets() + project_id = "fake_project2" + ctx = context.RequestContext('user1', project_id) + + net_ids = self.net_man.q_conn.get_networks_for_tenant(project_id) + requested_networks = [(net_id, None) for net_id in net_ids] + + self.net_man.validate_networks(ctx, requested_networks) + + instance_ref = db.api.instance_create(ctx, {}) + nw_info = self.net_man.allocate_for_instance(ctx, + instance_id=instance_ref['id'], host="", + instance_type_id=instance_ref['instance_type_id'], + project_id=project_id, + requested_networks=requested_networks) + + self.assertEquals(len(nw_info), 2) + + # we don't know which order the NICs will be in until we + # introduce the notion of priority + # v4 cidr + self.assertTrue(nw_info[0][0]['cidr'].startswith("9.") or \ + nw_info[1][0]['cidr'].startswith("9.")) + self.assertTrue(nw_info[0][0]['cidr'].startswith("192.") or \ + nw_info[1][0]['cidr'].startswith("192.")) + + # v4 address + self.assertTrue(nw_info[0][1]['ips'][0]['ip'].startswith("9.") or \ + nw_info[1][1]['ips'][0]['ip'].startswith("9.")) + self.assertTrue(nw_info[0][1]['ips'][0]['ip'].startswith("192.") or \ + nw_info[1][1]['ips'][0]['ip'].startswith("192.")) + + # v6 cidr + self.assertTrue(nw_info[0][0]['cidr_v6'].startswith("2001:1dbb:") or \ + nw_info[1][0]['cidr_v6'].startswith("2001:1dbb:")) + self.assertTrue(nw_info[0][0]['cidr_v6'].startswith("2001:1db9:") or \ + nw_info[1][0]['cidr_v6'].startswith("2001:1db9:")) + + # v6 address + self.assertTrue(\ + nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1dbb:") or \ + nw_info[1][1]['ip6s'][0]['ip'].startswith("2001:1dbb:")) + self.assertTrue(\ + nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1db9:") or \ + nw_info[1][1]['ip6s'][0]['ip'].startswith("2001:1db9:")) + + self.net_man.deallocate_for_instance(ctx, + instance_id=instance_ref['id'], + project_id=project_id) + + self._delete_nets() + + def test_validate_bad_network(self): + ctx = context.RequestContext('user1', 'fake_project1') + self.assertRaises(exception.NetworkNotFound, + self.net_man.validate_networks, ctx, [("", None)]) + + +class QuantumFakeIPAMTestCase(QuantumTestCaseBase, test.TestCase): + + def setUp(self): + super(QuantumFakeIPAMTestCase, self).setUp() + self.net_man = quantum_manager.QuantumManager( \ + ipam_lib="nova.network.quantum.fake") + + +class QuantumNovaIPAMTestCase(QuantumTestCaseBase, test.TestCase): + + def setUp(self): + super(QuantumNovaIPAMTestCase, self).setUp() + self.net_man = quantum_manager.QuantumManager( \ + ipam_lib="nova.network.quantum.nova_ipam_lib") + + # tests seem to create some networks by default, which + # don't want. So we delete them. + + ctx = context.RequestContext('user1', 'fake_project1').elevated() + for n in db.network_get_all(ctx): + db.network_delete_safe(ctx, n['id']) + +# Cannot run this unit tests auotmatically for now, as it requires +# melange to be running locally. +# +#class QuantumMelangeIPAMTestCase(QuantumTestCaseBase, test.TestCase): +# +# def setUp(self): +# super(QuantumMelangeIPAMTestCase, self).setUp() +# self.net_man = quantum_manager.QuantumManager( \ +# ipam_lib="nova.network.quantum.melange_ipam_lib") -- cgit From 01a219f7bb5fb5101bdbaa49d7e46f55d51dcfd2 Mon Sep 17 00:00:00 2001 From: Launchpad Translations on behalf of nova-core <> Date: Tue, 23 Aug 2011 05:21:47 +0000 Subject: Launchpad automatic translations update. --- po/it.po | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/po/it.po b/po/it.po index e166297f1..462254c46 100644 --- a/po/it.po +++ b/po/it.po @@ -14,7 +14,7 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-08-22 04:48+0000\n" +"X-Launchpad-Export-Date: 2011-08-23 05:21+0000\n" "X-Generator: Launchpad (build 13697)\n" #: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 -- cgit From c96a9ae1b84ee370ff5d8282a8e0531a97c5a396 Mon Sep 17 00:00:00 2001 From: Brad Hall Date: Tue, 23 Aug 2011 21:18:47 -0700 Subject: Address code review feedback from Rick and Matt --- bin/nova-manage | 11 ++-- nova/network/manager.py | 2 +- nova/network/quantum/client.py | 63 +++++++++++---------- nova/network/quantum/fake.py | 19 +++---- nova/network/quantum/manager.py | 35 ++++++------ nova/network/quantum/melange_connection.py | 6 +- nova/network/quantum/melange_ipam_lib.py | 90 +++++++++++++++--------------- nova/network/quantum/nova_ipam_lib.py | 39 ++++++------- nova/network/quantum/quantum_connection.py | 10 ++-- nova/tests/test_quantum.py | 18 +++--- 10 files changed, 146 insertions(+), 147 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 051079ef3..3a17818b2 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -804,11 +804,11 @@ class NetworkCommands(object): def quantum_list(self): """List all created networks with Quantum-relevant fields""" _fmt = "%-32s\t%-10s\t%-10s\t%s , %s" - print _fmt % ( _('uuid'), - _('project'), - _('priority'), - _('cidr_v4'), - _('cidr_v6')) + print _fmt % (_('uuid'), + _('project'), + _('priority'), + _('cidr_v4'), + _('cidr_v6')) for network in db.network_get_all(context.get_admin_context()): print _fmt % (network.uuid, network.project_id, @@ -825,7 +825,6 @@ class NetworkCommands(object): net_manager = utils.import_object(FLAGS.network_manager) net_manager.delete_network(context.get_admin_context(), fixed_range) - @args('--network', dest="fixed_range", metavar='', help='Network to modify') @args('--project', dest="project", metavar='', diff --git a/nova/network/manager.py b/nova/network/manager.py index b625e7823..426ff2f33 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -547,7 +547,7 @@ class NetworkManager(manager.SchedulerDependentManager): 'network_id': network_id, 'uuid': str(utils.gen_uuid())} # try FLAG times to create a vif record with a unique mac_address - for i in range(FLAGS.create_unique_mac_address_attempts): + for i in xrange(FLAGS.create_unique_mac_address_attempts): try: return self.db.virtual_interface_create(context, vif) except exception.VirtualInterfaceCreateException: diff --git a/nova/network/quantum/client.py b/nova/network/quantum/client.py index 613369c7d..1927015c2 100644 --- a/nova/network/quantum/client.py +++ b/nova/network/quantum/client.py @@ -22,11 +22,12 @@ import socket import urllib -# this is a simple json-only serializer to use until -# we can just grab the standard serializer -# from the quantum library -class Serializer: - +class JSONSerializer(object): +""" +This is a simple json-only serializer to use until we can just grab +the standard serializer from the quantum library. +TODO(danwent): replace serializer with quantum implementation +""" def serialize(self, data, content_type): try: return json.dumps(data) @@ -40,12 +41,12 @@ class Serializer: class api_call(object): """A Decorator to add support for format and tenant overriding""" - def __init__(self, f): - self.f = f + def __init__(self, func): + self.func = func def __get__(self, instance, owner): def with_params(*args, **kwargs): - # Temporarily set format and tenant for this request + """Temporarily set format and tenant for this request""" (format, tenant) = (instance.format, instance.tenant) if 'format' in kwargs: @@ -53,14 +54,16 @@ class api_call(object): if 'tenant' in kwargs: instance.tenant = kwargs['tenant'] - ret = self.f(instance, *args) - (instance.format, instance.tenant) = (format, tenant) + ret = None + try: + ret = self.func(instance, *args) + finally: + (instance.format, instance.tenant) = (format, tenant) return ret return with_params class Client(object): - """A base client class - derived from Glance.BaseClient""" action_prefix = '/v1.0/tenants/{tenant_id}' @@ -73,8 +76,8 @@ class Client(object): attachment_path = "/networks/%s/ports/%s/attachment" def __init__(self, host="127.0.0.1", port=9696, use_ssl=False, tenant=None, - format="xml", testingStub=None, key_file=None, cert_file=None, - logger=None): + format="xml", testing_stub=None, key_file=None, cert_file=None, + logger=None): """ Creates a new client to some service. @@ -83,7 +86,7 @@ class Client(object): :param use_ssl: True to use SSL, False to use HTTP :param tenant: The tenant ID to make requests with :param format: The format to query the server with - :param testingStub: A class that stubs basic server methods for tests + :param testing_stub: A class that stubs basic server methods for tests :param key_file: The SSL key file to use if use_ssl is true :param cert_file: The SSL cert file to use if use_ssl is true """ @@ -93,7 +96,7 @@ class Client(object): self.tenant = tenant self.format = format self.connection = None - self.testingStub = testingStub + self.testing_stub = testing_stub self.key_file = key_file self.cert_file = cert_file self.logger = logger @@ -102,9 +105,9 @@ class Client(object): """ Returns the proper connection type """ - if self.testingStub: - return self.testingStub - if self.use_ssl: + if self.testing_stub: + return self.testing_stub + elif self.use_ssl: return httplib.HTTPSConnection else: return httplib.HTTPConnection @@ -126,7 +129,7 @@ class Client(object): # Ensure we have a tenant id if not self.tenant: - raise Exception("Tenant ID not set") + raise Exception(_("Tenant ID not set")) # Add format and tenant_id action += ".%s" % self.format @@ -151,8 +154,8 @@ class Client(object): c = connection_type(self.host, self.port) if self.logger: - self.logger.debug("Quantum Client Request:\n" \ - + method + " " + action + "\n") + self.logger.debug(_("Quantum Client Request:\n%s %s\n" % + (method, action))) if body: self.logger.debug(body) @@ -169,14 +172,14 @@ class Client(object): httplib.CREATED, httplib.ACCEPTED, httplib.NO_CONTENT): - if data is not None and len(data): + if not data: return self.deserialize(data, status_code) else: - raise Exception("Server returned error: %s" % res.read()) + raise Exception(_("Server returned error: %s" % res.read())) except (socket.error, IOError), e: - raise Exception("Unable to connect to " - "server. Got error: %s" % e) + raise Exception(_("Unable to connect to " + "server. Got error: %s" % e)) def get_status_code(self, response): """ @@ -189,18 +192,18 @@ class Client(object): return response.status def serialize(self, data): - if data is None: + if not data: return None elif type(data) is dict: - return Serializer().serialize(data, self.content_type()) + return JSONSerializer().serialize(data, self.content_type()) else: - raise Exception("unable to deserialize object of type = '%s'" \ - % type(data)) + raise Exception(_("unable to deserialize object of type = '%s'" % + type(data))) def deserialize(self, data, status_code): if status_code == 202: return data - return Serializer().deserialize(data, self.content_type()) + return JSONSerializer().deserialize(data, self.content_type()) def content_type(self, format=None): if not format: diff --git a/nova/network/quantum/fake.py b/nova/network/quantum/fake.py index 00cdd8e08..f668edfed 100644 --- a/nova/network/quantum/fake.py +++ b/nova/network/quantum/fake.py @@ -15,9 +15,6 @@ # License for the specific language governing permissions and limitations # under the License. -import math -from netaddr import IPNetwork - from nova import exception from nova import ipv6 from nova import log as logging @@ -29,7 +26,7 @@ LOG = logging.getLogger("network.quantum.fake") # this class can be used for unit functional/testing on nova, # as it does not actually make remote calls to the Quantum service -class FakeQuantumClientConnection: +class FakeQuantumClientConnection(object): def __init__(self): self.nets = {} @@ -56,20 +53,20 @@ class FakeQuantumClientConnection: def network_exists(self, tenant_id, net_id): try: return self.nets[net_id]['tenant-id'] == tenant_id - except: + except KeyError: return False def _confirm_not_attached(self, interface_id): for n in self.nets.values(): for p in n['ports'].values(): if p['attachment-id'] == interface_id: - raise Exception("interface '%s' is already attached" %\ - interface_id) + raise Exception(_("interface '%s' is already attached" % + interface_id)) def create_and_attach_port(self, tenant_id, net_id, interface_id): if not self.network_exists(tenant_id, net_id): - raise Exception("network %s does not exist for tenant %s" %\ - (net_id, tenant_id)) + raise Exception(_("network %s does not exist for tenant %s" % + (net_id, tenant_id))) self._confirm_not_attached(interface_id) uuid = str(utils.gen_uuid()) @@ -79,8 +76,8 @@ class FakeQuantumClientConnection: def detach_and_delete_port(self, tenant_id, net_id, port_id): if not self.network_exists(tenant_id, net_id): - raise Exception("network %s does not exist for tenant %s" %\ - (net_id, tenant_id)) + raise Exception(_("network %s does not exist for tenant %s" %\ + (net_id, tenant_id))) del self.nets[net_id]['ports'][port_id] def get_port_by_attachment(self, tenant_id, attachment_id): diff --git a/nova/network/quantum/manager.py b/nova/network/quantum/manager.py index a002a3d7b..975598324 100644 --- a/nova/network/quantum/manager.py +++ b/nova/network/quantum/manager.py @@ -84,16 +84,15 @@ class QuantumManager(manager.FlatManager): In both cases, we initialize a subnet using the IPAM lib. """ if num_networks != 1: - raise Exception("QuantumManager requires that only one" - " network is created per call") - q_tenant_id = kwargs["project_id"] or \ - FLAGS.quantum_default_tenant_id + raise Exception(_("QuantumManager requires that only one" + " network is created per call")) + q_tenant_id = kwargs.get("project_id", FLAGS.quantum_default_tenant_id) quantum_net_id = uuid if quantum_net_id: if not self.q_conn.network_exists(q_tenant_id, quantum_net_id): - raise Exception("Unable to find existing quantum " \ - " network for tenant '%s' with net-id '%s'" % \ - (q_tenant_id, quantum_net_id)) + raise Exception(_("Unable to find existing quantum " \ + " network for tenant '%s' with net-id '%s'" % \ + (q_tenant_id, quantum_net_id))) else: # otherwise, create network from default quantum pool quantum_net_id = self.q_conn.create_network(q_tenant_id, label) @@ -156,18 +155,18 @@ class QuantumManager(manager.FlatManager): # Create a port via quantum and attach the vif for (quantum_net_id, project_id) in net_proj_pairs: - # FIXME: (danwent). We'd like to have the manager be completely - # decoupled from the nova networks table. - # However, other parts of nova sometimes go behind - # our back and access network data directly from the DB. So + # FIXME(danwent): We'd like to have the manager be + # completely decoupled from the nova networks table. + # However, other parts of nova sometimes go behind our + # back and access network data directly from the DB. So # for now, the quantum manager knows that there is a nova - # networks DB table and accesses it here. - # updating the virtual_interfaces table to use UUIDs would - # be one solution, but this would require significant work + # networks DB table and accesses it here. updating the + # virtual_interfaces table to use UUIDs would be one + # solution, but this would require significant work # elsewhere. admin_context = context.elevated() network_ref = db.network_get_by_uuid(admin_context, - quantum_net_id) + quantum_net_id) vif_rec = manager.FlatManager.add_virtual_interface(self, context, instance_id, network_ref['id']) @@ -177,7 +176,7 @@ class QuantumManager(manager.FlatManager): self.q_conn.create_and_attach_port(q_tenant_id, quantum_net_id, vif_rec['uuid']) self.ipam.allocate_fixed_ip(context, project_id, quantum_net_id, - vif_rec) + vif_rec) return self.get_instance_nw_info(context, instance_id, instance_type_id, host) @@ -214,8 +213,8 @@ class QuantumManager(manager.FlatManager): net_id, port_id = self.q_conn.get_port_by_attachment( q_tenant_id, vif['uuid']) if not net_id: - raise Exception(_("No network for for virtual interface %s") %\ - vif['uuid']) + raise Exception(_("No network for for virtual interface %s") % + vif['uuid']) (v4_subnet, v6_subnet) = self.ipam.get_subnets_by_net_id(context, ipam_tenant_id, net_id) v4_ips = self.ipam.get_v4_ips_by_interface(context, diff --git a/nova/network/quantum/melange_connection.py b/nova/network/quantum/melange_connection.py index 2d884fa60..1ee0c29a2 100644 --- a/nova/network/quantum/melange_connection.py +++ b/nova/network/quantum/melange_connection.py @@ -73,10 +73,10 @@ class MelangeConnection(object): response_str = response.read() if response.status < 400: return response_str - raise Exception("Server returned error: %s", response_str) + raise Exception(_("Server returned error: %s", response_str)) except (socket.error, IOError), e: - raise Exception("Unable to connect to " - "server. Got error: %s" % e) + raise Exception(_("Unable to connect to " + "server. Got error: %s" % e)) def allocate_ip(self, network_id, vif_id, project_id=None, mac_address=None): diff --git a/nova/network/quantum/melange_ipam_lib.py b/nova/network/quantum/melange_ipam_lib.py index 7b7baf281..24a7c5404 100644 --- a/nova/network/quantum/melange_ipam_lib.py +++ b/nova/network/quantum/melange_ipam_lib.py @@ -31,7 +31,7 @@ def get_ipam_lib(net_man): return QuantumMelangeIPAMLib() -class QuantumMelangeIPAMLib: +class QuantumMelangeIPAMLib(object): """ Implements Quantum IP Address Management (IPAM) interface using the Melange service, which is access using the Melange web services API. @@ -42,9 +42,9 @@ class QuantumMelangeIPAMLib: self.m_conn = melange_connection.MelangeConnection() def create_subnet(self, context, label, project_id, - quantum_net_id, priority, cidr=None, - gateway_v6=None, cidr_v6=None, - dns1=None, dns2=None): + quantum_net_id, priority, cidr=None, + gateway_v6=None, cidr_v6=None, + dns1=None, dns2=None): """ Contact Melange and create a subnet for any non-NULL IPv4 or IPv6 subnets. @@ -56,25 +56,25 @@ class QuantumMelangeIPAMLib: tenant_id = project_id or FLAGS.quantum_default_tenant_id if cidr: self.m_conn.create_block(quantum_net_id, cidr, - project_id=tenant_id, - dns1=dns1, dns2=dns2) + project_id=tenant_id, + dns1=dns1, dns2=dns2) if cidr_v6: self.m_conn.create_block(quantum_net_id, cidr_v6, project_id=tenant_id, dns1=dns1, dns2=dns2) net = {"uuid": quantum_net_id, - "project_id": project_id, - "priority": priority, - "label": label} + "project_id": project_id, + "priority": priority, + "label": label} network = self.db.network_create_safe(context, net) def allocate_fixed_ip(self, context, project_id, quantum_net_id, vif_ref): """ Pass call to allocate fixed IP on to Melange""" tenant_id = project_id or FLAGS.quantum_default_tenant_id self.m_conn.allocate_ip(quantum_net_id, - vif_ref['uuid'], project_id=tenant_id, - mac_address=vif_ref['address']) + vif_ref['uuid'], project_id=tenant_id, + mac_address=vif_ref['address']) def get_network_id_by_cidr(self, context, cidr, project_id): """ Find the Quantum UUID associated with a IPv4 CIDR @@ -85,7 +85,7 @@ class QuantumMelangeIPAMLib: for b in all_blocks['ip_blocks']: if b['cidr'] == cidr: return b['network_id'] - raise Exception("No network found for cidr %s" % cidr) + raise Exception(_("No network found for cidr %s" % cidr)) def delete_subnets_by_net_id(self, context, net_id, project_id): """ Find Melange block associated with the Quantum UUID, @@ -107,38 +107,38 @@ class QuantumMelangeIPAMLib: that are "global" (i.e., have no project set). Returns list sorted by 'priority'. """ - admin_context = context.elevated() - id_proj_map = {} if project_id is None: - raise Exception("get_project_and_global_net_ids must be called" \ - " with a non-null project_id") - tenant_id = project_id - all_tenant_blocks = self.m_conn.get_blocks(tenant_id) - for b in all_tenant_blocks['ip_blocks']: - id_proj_map[b['network_id']] = tenant_id - tenant_id = FLAGS.quantum_default_tenant_id - all_provider_blocks = self.m_conn.get_blocks(tenant_id) - for b in all_provider_blocks['ip_blocks']: - id_proj_map[b['network_id']] = tenant_id - - id_priority_map = {} - for net_id, project_id in id_project_map.item(): - network = db.network_get_by_uuid(admin_context, net_id) - if network is None: - del id_proj_map[net_id] - else: - id_priority_map[net_id] = network['priority'] - return sorted(id_priority_map.items(), - key=lambda x: id_priority_map[x[0]]) + raise Exception(_("get_project_and_global_net_ids must be called" + " with a non-null project_id")) + + admin_context = context.elevated() + + # Decorate with priority + priority_nets = [] + for tenant_id in (project_id, FLAGS.quantum_default_tenant_id): + blocks = self.m_conn.get_blocks(tenant_id) + for ip_block in blocks['ip_blocks']: + network_id = ip_block['network_id'] + network = db.network_get_by_uuid(admin_context, network_id) + if network: + priority = network['priority'] + priority_nets.append((priority, network_id, tenant_id)) + + # Sort by priority + priority_nets.sort() + + # Undecorate + return [(network_id, tenant_id) + for priority, network_id, tenant_id in priority_nets] def get_subnets_by_net_id(self, context, project_id, net_id): """ Returns information about the IPv4 and IPv6 subnets associated with a Quantum Network UUID. """ - # FIXME: (danwent) Melange actually returns the subnet info - # when we query for a particular interface. we may want to - # reworks the ipam_manager python API to let us take advantage of + # FIXME(danwent): Melange actually returns the subnet info + # when we query for a particular interface. We may want to + # rework the ipam_manager python API to let us take advantage of # this, as right now we have to get all blocks and cycle through # them. subnet_v4 = None @@ -148,12 +148,12 @@ class QuantumMelangeIPAMLib: for b in all_blocks['ip_blocks']: if b['network_id'] == net_id: subnet = {'network_id': b['network_id'], - 'cidr': b['cidr'], - 'gateway': b['gateway'], - 'broadcast': b['broadcast'], - 'netmask': b['netmask'], - 'dns1': b['dns1'], - 'dns2': b['dns2']} + 'cidr': b['cidr'], + 'gateway': b['gateway'], + 'broadcast': b['broadcast'], + 'netmask': b['netmask'], + 'dns1': b['dns1'], + 'dns2': b['dns2']} if IPNetwork(b['cidr']).version == 6: subnet_v6 = subnet @@ -182,8 +182,8 @@ class QuantumMelangeIPAMLib: """ tenant_id = project_id or FLAGS.quantum_default_tenant_id ip_list = self.m_conn.get_allocated_ips(net_id, vif_id, tenant_id) - return [ip['address'] for ip in ip_list \ - if IPNetwork(ip['address']).version == ip_version] + return [ip['address'] for ip in ip_list + if IPNetwork(ip['address']).version == ip_version] def verify_subnet_exists(self, context, project_id, quantum_net_id): """ Confirms that a subnet exists that is associated with the diff --git a/nova/network/quantum/nova_ipam_lib.py b/nova/network/quantum/nova_ipam_lib.py index ce7d3efcb..71e723cb9 100644 --- a/nova/network/quantum/nova_ipam_lib.py +++ b/nova/network/quantum/nova_ipam_lib.py @@ -36,7 +36,7 @@ def get_ipam_lib(net_man): return QuantumNovaIPAMLib(net_man) -class QuantumNovaIPAMLib: +class QuantumNovaIPAMLib(object): """ Implements Quantum IP Address Management (IPAM) interface using the local Nova database. This implementation is inline with how IPAM is used by other NetworkManagers. @@ -50,9 +50,9 @@ class QuantumNovaIPAMLib: self.net_manager = net_manager def create_subnet(self, context, label, tenant_id, - quantum_net_id, priority, cidr=None, - gateway_v6=None, cidr_v6=None, - dns1=None, dns2=None): + quantum_net_id, priority, cidr=None, + gateway_v6=None, cidr_v6=None, + dns1=None, dns2=None): """ Re-use the basic FlatManager create_networks method to initialize the networks and fixed_ips tables in Nova DB. @@ -60,6 +60,7 @@ class QuantumNovaIPAMLib: are needed by Quantum but not the FlatManager. """ admin_context = context.elevated() + # FIXME(danwent): Use the netaddr library here subnet_size = int(math.pow(2, (32 - int(cidr.split("/")[1])))) networks = manager.FlatManager.create_networks(self.net_manager, admin_context, label, cidr, @@ -67,12 +68,12 @@ class QuantumNovaIPAMLib: gateway_v6, quantum_net_id, None, dns1, dns2) if len(networks) != 1: - raise Exception("Error creating network entry") + raise Exception(_("Error creating network entry")) network = networks[0] net = {"project_id": tenant_id, - "priority": priority, - "uuid": quantum_net_id} + "priority": priority, + "uuid": quantum_net_id} db.network_update(admin_context, network['id'], net) def get_network_id_by_cidr(self, context, cidr, project_id): @@ -80,7 +81,7 @@ class QuantumNovaIPAMLib: admin_context = context.elevated() network = db.network_get_by_cidr(admin_context, cidr) if not network: - raise Exception("No network with fixed_range = %s" % fixed_range) + raise Exception(_("No network with fixed_range = %s" % fixed_range)) return network['uuid'] def delete_subnets_by_net_id(self, context, net_id, project_id): @@ -90,10 +91,10 @@ class QuantumNovaIPAMLib: admin_context = context.elevated() network = db.network_get_by_uuid(admin_context, net_id) if not network: - raise Exception("No network with net_id = %s" % net_id) + raise Exception(_("No network with net_id = %s" % net_id)) manager.FlatManager.delete_network(self.net_manager, - admin_context, network['cidr'], - require_disassociated=False) + admin_context, network['cidr'], + require_disassociated=False) def get_project_and_global_net_ids(self, context, project_id): """ Fetches all networks associated with this project, or @@ -118,8 +119,8 @@ class QuantumNovaIPAMLib: network = db.network_get_by_uuid(admin_context, quantum_net_id) if network['cidr']: address = db.fixed_ip_associate_pool(admin_context, - network['id'], - vif_rec['instance_id']) + network['id'], + vif_rec['instance_id']) values = {'allocated': True, 'virtual_interface_id': vif_rec['id']} db.fixed_ip_update(admin_context, address, values) @@ -186,10 +187,10 @@ class QuantumNovaIPAMLib: admin_context = context.elevated() fixed_ips = db.fixed_ip_get_by_virtual_interface(admin_context, vif_ref['id']) - for f in fixed_ips: - db.fixed_ip_update(admin_context, f['address'], - {'allocated': False, - 'virtual_interface_id': None}) + for fixed_ip in fixed_ips: + db.fixed_ip_update(admin_context, fixed_ip['address'], + {'allocated': False, + 'virtual_interface_id': None}) except exception.FixedIpNotFoundForInstance: - LOG.error(_('No fixed IPs to deallocate for vif %s' % \ - vif_ref['id'])) + LOG.error(_('No fixed IPs to deallocate for vif %s' % + vif_ref['id'])) diff --git a/nova/network/quantum/quantum_connection.py b/nova/network/quantum/quantum_connection.py index bd3592c2c..e2218c68d 100644 --- a/nova/network/quantum/quantum_connection.py +++ b/nova/network/quantum/quantum_connection.py @@ -37,7 +37,7 @@ flags.DEFINE_string('quantum_default_tenant_id', 'Default tenant id when creating quantum networks') -class QuantumClientConnection: +class QuantumClientConnection(object): """ Abstracts connection to Quantum service into higher level operations performed by the QuantumManager. @@ -71,7 +71,7 @@ class QuantumClientConnection: try: self.client.show_network_details(net_id, tenant=tenant_id) except: - # FIXME: (danwent) client lib should expose granular exceptions + # FIXME(danwent): client lib should expose granular exceptions # so we can confirm we're getting a 404 and not some other error return False return True @@ -81,8 +81,8 @@ class QuantumClientConnection: status to ACTIVE to enable traffic, and attaches the vNIC with the specified interface-id. """ - LOG.debug("Connecting interface %s to net %s for %s" % \ - (interface_id, net_id, tenant_id)) + LOG.debug(_("Connecting interface %s to net %s for %s" % + (interface_id, net_id, tenant_id))) port_data = {'port': {'state': 'ACTIVE'}} resdict = self.client.create_port(net_id, port_data, tenant=tenant_id) port_id = resdict["port"]["id"] @@ -103,7 +103,7 @@ class QuantumClientConnection: """ Given a tenant, search for the Quantum network and port UUID that has the specified interface-id attachment. """ - # FIXME: (danwent) this will be inefficient until the Quantum + # FIXME(danwent): this will be inefficient until the Quantum # API implements querying a port by the interface-id net_list_resdict = self.client.list_networks(tenant=tenant_id) for n in net_list_resdict["networks"]: diff --git a/nova/tests/test_quantum.py b/nova/tests/test_quantum.py index 3efdba910..2cc83adf1 100644 --- a/nova/tests/test_quantum.py +++ b/nova/tests/test_quantum.py @@ -155,9 +155,9 @@ class QuantumTestCaseBase(object): self.assertTrue(nw_info[1][0]['cidr_v6'].startswith("2001:1db8:")) # v6 address - self.assertTrue(\ + self.assertTrue( nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1dba:")) - self.assertTrue(\ + self.assertTrue( nw_info[1][1]['ip6s'][0]['ip'].startswith("2001:1db8:")) self.net_man.deallocate_for_instance(ctx, @@ -233,24 +233,24 @@ class QuantumNovaIPAMTestCase(QuantumTestCaseBase, test.TestCase): self.net_man = quantum_manager.QuantumManager( \ ipam_lib="nova.network.quantum.nova_ipam_lib") - # tests seem to create some networks by default, which - # don't want. So we delete them. + # Tests seem to create some networks by default, which + # we don't want. So we delete them. ctx = context.RequestContext('user1', 'fake_project1').elevated() for n in db.network_get_all(ctx): db.network_delete_safe(ctx, n['id']) - # I've found that other unit tests have a nasty habit of - # of creating fixed IPs and not cleaning up, which can - # confuse these tests, so we clean them all. + # NOTE(danwent): I've found that other unit tests have a nasty + # habit of of creating fixed IPs and not cleaning up, which + # can confuse these tests, so we clean them all. session = get_session() result = session.query(models.FixedIp).all() with session.begin(): for fip_ref in result: session.delete(fip_ref) -# Cannot run this unit tests auotmatically for now, as it requires -# melange to be running locally. +# FIXME(danwent): Cannot run this unit tests automatically for now, as +# it requires melange to be running locally. # #class QuantumMelangeIPAMTestCase(QuantumTestCaseBase, test.TestCase): # -- cgit From f95d187e7b9f6f1c3d244668cc4aec49339ed169 Mon Sep 17 00:00:00 2001 From: Launchpad Translations on behalf of nova-core <> Date: Wed, 24 Aug 2011 04:47:59 +0000 Subject: Launchpad automatic translations update. --- po/cs.po | 26 +--- po/de.po | 44 +----- po/es.po | 484 +----------------------------------------------------------- po/ja.po | 479 +---------------------------------------------------------- po/pt_BR.po | 156 +------------------- po/ru.po | 126 +--------------- po/tl.po | 13 +- po/uk.po | 42 +----- 8 files changed, 31 insertions(+), 1339 deletions(-) diff --git a/po/cs.po b/po/cs.po index 07bdf1928..561c71a4b 100644 --- a/po/cs.po +++ b/po/cs.po @@ -8,14 +8,14 @@ msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: FULL NAME \n" "POT-Creation-Date: 2011-02-21 10:03-0500\n" -"PO-Revision-Date: 2011-02-07 12:45+0000\n" -"Last-Translator: David Pravec \n" +"PO-Revision-Date: 2011-08-23 11:22+0000\n" +"Last-Translator: Thierry Carrez \n" "Language-Team: Czech \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-08-03 04:43+0000\n" -"X-Generator: Launchpad (build 13573)\n" +"X-Launchpad-Export-Date: 2011-08-24 04:47+0000\n" +"X-Generator: Launchpad (build 13697)\n" #: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 #: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 @@ -2789,21 +2789,3 @@ msgstr "" #, python-format msgid "Removing user %(user)s from project %(project)s" msgstr "" - -#, python-format -#~ msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." -#~ msgstr "AMQP server na %s:%d není dosažitelný. Zkusím znovu za %d sekund." - -#, python-format -#~ msgid "" -#~ "%s\n" -#~ "Command: %s\n" -#~ "Exit code: %s\n" -#~ "Stdout: %r\n" -#~ "Stderr: %r" -#~ msgstr "" -#~ "%s\n" -#~ "Příkaz: %s\n" -#~ "Vrácená hodnota: %s\n" -#~ "Stdout: %r\n" -#~ "Stderr: %r" diff --git a/po/de.po b/po/de.po index 1f652c373..6d8c1372c 100644 --- a/po/de.po +++ b/po/de.po @@ -8,14 +8,14 @@ msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: FULL NAME \n" "POT-Creation-Date: 2011-02-21 10:03-0500\n" -"PO-Revision-Date: 2011-06-06 07:58+0000\n" -"Last-Translator: Christian Berendt \n" +"PO-Revision-Date: 2011-08-23 11:23+0000\n" +"Last-Translator: Thierry Carrez \n" "Language-Team: German \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-08-03 04:44+0000\n" -"X-Generator: Launchpad (build 13573)\n" +"X-Launchpad-Export-Date: 2011-08-24 04:47+0000\n" +"X-Generator: Launchpad (build 13697)\n" #: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 #: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 @@ -2798,42 +2798,6 @@ msgstr "" msgid "Removing user %(user)s from project %(project)s" msgstr "" -#, python-format -#~ msgid "" -#~ "%s\n" -#~ "Command: %s\n" -#~ "Exit code: %s\n" -#~ "Stdout: %r\n" -#~ "Stderr: %r" -#~ msgstr "" -#~ "%s\n" -#~ "Kommando: %s\n" -#~ "Exit Code: %s\n" -#~ "Stdout: %r\n" -#~ "Stderr: %r" - -#, python-format -#~ msgid "(%s) publish (key: %s) %s" -#~ msgstr "(%s) öffentlich (Schlüssel: %s) %s" - -#, python-format -#~ msgid "Getting from %s: %s" -#~ msgstr "Beziehe von %s: %s" - -#, python-format -#~ msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." -#~ msgstr "" -#~ "Der AMQP server %s:%d ist nicht erreichbar. Erneuter Versuch in %d Sekunden." - -#, python-format -#~ msgid "volume %s: creating lv of size %sG" -#~ msgstr "Volume %s: erstelle LV mit %sG" - -#, python-format -#~ msgid "Data store %s is unreachable. Trying again in %d seconds." -#~ msgstr "" -#~ "Datastore %s ist nicht erreichbar. Versuche es erneut in %d Sekunden." - #~ msgid "Full set of FLAGS:" #~ msgstr "Alle vorhandenen FLAGS:" diff --git a/po/es.po b/po/es.po index 7371eae8c..d50269378 100644 --- a/po/es.po +++ b/po/es.po @@ -8,14 +8,14 @@ msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: FULL NAME \n" "POT-Creation-Date: 2011-02-21 10:03-0500\n" -"PO-Revision-Date: 2011-08-01 03:23+0000\n" -"Last-Translator: Juan Alfredo Salas Santillana \n" +"PO-Revision-Date: 2011-08-23 11:22+0000\n" +"Last-Translator: Thierry Carrez \n" "Language-Team: Spanish \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-08-03 04:44+0000\n" -"X-Generator: Launchpad (build 13573)\n" +"X-Launchpad-Export-Date: 2011-08-24 04:47+0000\n" +"X-Generator: Launchpad (build 13697)\n" #: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 #: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 @@ -2838,221 +2838,16 @@ msgstr "Agregando usuario %(user)s al proyecto %(project)s" msgid "Removing user %(user)s from project %(project)s" msgstr "Eliminando el usuario %(user)s del proyecto %(project)s" -#, python-format -#~ msgid "" -#~ "%s\n" -#~ "Command: %s\n" -#~ "Exit code: %s\n" -#~ "Stdout: %r\n" -#~ "Stderr: %r" -#~ msgstr "" -#~ "%s\n" -#~ "Comando: %s\n" -#~ "Código de salida: %s\n" -#~ "Stdout: %s\n" -#~ "Stderr: %r" - -#, python-format -#~ msgid "(%s) publish (key: %s) %s" -#~ msgstr "(%s) públicar (clave: %s) %s" - -#, python-format -#~ msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." -#~ msgstr "" -#~ "El servidor AMQP en %s:%d no se puede alcanzar. Se reintentará en %d " -#~ "segundos." - -#, python-format -#~ msgid "Binding %s to %s with key %s" -#~ msgstr "Asociando %s a %s con clave %s" - -#, python-format -#~ msgid "Getting from %s: %s" -#~ msgstr "Obteniendo desde %s: %s" - -#, python-format -#~ msgid "Starting %s node" -#~ msgstr "Inciando nodo %s" - -#, python-format -#~ msgid "Data store %s is unreachable. Trying again in %d seconds." -#~ msgstr "" -#~ "El almacen de datos %s es inalcanzable. Reintentandolo en %d segundos." - #, python-format #~ msgid "Serving %s" #~ msgstr "Sirviendo %s" -#, python-format -#~ msgid "Couldn't get IP, using 127.0.0.1 %s" -#~ msgstr "No puedo obtener IP, usando 127.0.0.1 %s" - -#, python-format -#~ msgid "" -#~ "Access key %s has had %d failed authentications and will be locked out for " -#~ "%d minutes." -#~ msgstr "" -#~ "La clave de acceso %s ha tenido %d fallos de autenticación y se bloqueará " -#~ "por %d minutos." - -#, python-format -#~ msgid "arg: %s\t\tval: %s" -#~ msgstr "arg: %s \t \t val: %s" - -#, python-format -#~ msgid "Authenticated Request For %s:%s)" -#~ msgstr "Solicitud de autenticación para %s:%s" - -#, python-format -#~ msgid "Adding role %s to user %s for project %s" -#~ msgstr "Añadiendo rol %s al usuario %s para el proyecto %s" - -#, python-format -#~ msgid "Removing role %s from user %s for project %s" -#~ msgstr "Eliminando rol %s del usuario %s para el proyecto %s" - -#, python-format -#~ msgid "Unauthorized request for controller=%s and action=%s" -#~ msgstr "Solicitud no autorizada para controller=%s y action=%s" - -#, python-format -#~ msgid "Getting x509 for user: %s on project: %s" -#~ msgstr "Obteniendo x509 para el usuario: %s en el proyecto %s" - -#, python-format -#~ msgid "Create project %s managed by %s" -#~ msgstr "Creación del proyecto %s gestionada por %s" - -#, python-format -#~ msgid "Removing user %s from project %s" -#~ msgstr "Eliminando usuario %s del proyecto %s" - -#, python-format -#~ msgid "Adding user %s to project %s" -#~ msgstr "Añadiendo usuario %s al proyecto %s" - -#, python-format -#~ msgid "Unsupported API request: controller = %s,action = %s" -#~ msgstr "Solicitud de API no soportada: controller=%s,action=%s" - -#, python-format -#~ msgid "Associate address %s to instance %s" -#~ msgstr "Asociar dirección %s a la instancia %s" - -#, python-format -#~ msgid "Attach volume %s to instacne %s at %s" -#~ msgstr "Asociar volumen %s a la instancia %s en %s" - -#, python-format -#~ msgid "Registered image %s with id %s" -#~ msgstr "Registrada imagen %s con id %s" - -#, python-format -#~ msgid "User %s is already a member of the group %s" -#~ msgstr "El usuario %s ya es miembro de el grupo %s" - -#, python-format -#~ msgid "User %s is not a member of project %s" -#~ msgstr "El usuario %s no es miembro del proyecto %s" - -#, python-format -#~ msgid "failed authorization: no project named %s (user=%s)" -#~ msgstr "" -#~ "fallo de autorización: no existe proyecto con el nombre %s (usuario=%s)" - -#, python-format -#~ msgid "Failed authorization: user %s not admin and not member of project %s" -#~ msgstr "" -#~ "Fallo de autorización: el usuario %s no es administrador y no es miembro del " -#~ "proyecto %s" - -#, python-format -#~ msgid "Created user %s (admin: %r)" -#~ msgstr "Creado usuario %s (administrador: %r)" - -#, python-format -#~ msgid "Created project %s with manager %s" -#~ msgstr "Proyecto %s creado con administrador %s" - -#, python-format -#~ msgid "Removing role %s from user %s on project %s" -#~ msgstr "Eliminando rol %s al usuario %s en el proyecto %s" - -#, python-format -#~ msgid "Adding role %s to user %s in project %s" -#~ msgstr "Añadiendo rol %s al usuario %s en el proyecto %s" - -#, python-format -#~ msgid "Remove user %s from project %s" -#~ msgstr "Eliminar usuario %s del proyecto %s" - -#, python-format -#~ msgid "Admin status set to %r for user %s" -#~ msgstr "El estado del administrador se ha fijado a %r para el usuario %s" - -#, python-format -#~ msgid "Going to try and terminate %s" -#~ msgstr "Se va a probar y terminar %s" - -#, python-format -#~ msgid "Casting to scheduler for %s/%s's instance %s" -#~ msgstr "Llamando al planificar para %s/%s insntancia %s" - -#, python-format -#~ msgid "Quota exceeeded for %s, tried to run %s instances" -#~ msgstr "Quota superada por %s, intentando lanzar %s instancias" - -#, python-format -#~ msgid "check_instance_lock: arguments: |%s| |%s| |%s|" -#~ msgstr "check_instance_lock: arguments: |%s| |%s| |%s|" - -#, python-format -#~ msgid "Input partition size not evenly divisible by sector size: %d / %d" -#~ msgstr "" -#~ "El tamaño de la partición de entrada no es divisible de forma uniforme por " -#~ "el tamaño del sector: %d / %d" - -#, python-format -#~ msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" -#~ msgstr "" -#~ "Los bytes del almacenamiento local no son divisibles de forma uniforme por " -#~ "el tamaño del sector: %d / %d" - -#, python-format -#~ msgid "volume %s: creating lv of size %sG" -#~ msgstr "volumen %s: creando lv de tamaño %sG" - -#, python-format -#~ msgid "Disassociating address %s" -#~ msgstr "Desasociando la dirección %s" - -#, python-format -#~ msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" -#~ msgstr "" -#~ "intentando reiniciar una instancia que no está en ejecución: %s (estado: %s " -#~ "esperado: %s)" - -#, python-format -#~ msgid "" -#~ "trying to snapshot a non-running instance: %s (state: %s excepted: %s)" -#~ msgstr "" -#~ "intentando crear un snapshot de una instancia que no está en ejecución: %s " -#~ "(estado: %s esperado: %s)" - -#, python-format -#~ msgid "Detach volume %s from mountpoint %s on instance %s" -#~ msgstr "Desvinculando volumen %s del punto de montaje %s en la instancia %s" - #~ msgid "unexpected exception getting connection" #~ msgstr "excepción inexperada al obtener la conexión" #~ msgid "unexpected error during update" #~ msgstr "error inesperado durante la actualización" -#, python-format -#~ msgid "Cannot get blockstats for \"%s\" on \"%s\"" -#~ msgstr "No puedo obtener estadísticas del bloque para \"%s\" en \"%s\"" - #, python-format #~ msgid "updating %s..." #~ msgstr "actualizando %s..." @@ -3061,285 +2856,14 @@ msgstr "Eliminando el usuario %(user)s del proyecto %(project)s" #~ msgid "Found instance: %s" #~ msgstr "Encontrada interfaz: %s" -#, python-format -#~ msgid "Cannot get ifstats for \"%s\" on \"%s\"" -#~ msgstr "No puedo obtener estadísticas de la interfaz para \"%s\" en \"%s\"" - -#, python-format -#~ msgid "No instance for id %s" -#~ msgstr "No hay instancia con id %s" - -#, python-format -#~ msgid "no keypair for user %s, name %s" -#~ msgstr "no hay par de claves para el usuario %s, nombre %s" - -#, python-format -#~ msgid "No service for %s, %s" -#~ msgstr "No hay servicio para %s, %s" - -#, python-format -#~ msgid "No volume for id %s" -#~ msgstr "No hay volumen para el id %s" - -#, python-format -#~ msgid "No security group named %s for project: %s" -#~ msgstr "No hay un grupo de seguridad con nombre %s para el proyecto: %s" - -#, python-format -#~ msgid "Parallax returned HTTP error %d from request for /images/detail" -#~ msgstr "" -#~ "Parallax ha devuelto un error HTTP %d para la petición para /images/detail" - -#, python-format -#~ msgid "Parallax returned HTTP error %d from request for /images" -#~ msgstr "Parallax ha devuelto un error HTTP %d a la petición para /images" - -#, python-format -#~ msgid "IP %s leased to bad mac %s vs %s" -#~ msgstr "IP %s asociada a una mac incorrecta %s vs %s" - -#, python-format -#~ msgid "Unauthorized attempt to get object %s from bucket %s" -#~ msgstr "Intento no autorizado de obtener el objeto %s en el cubo %s" - -#, python-format -#~ msgid "Getting object: %s / %s" -#~ msgstr "Obteniendo objeto: %s / %s" - -#, python-format -#~ msgid "Putting object: %s / %s" -#~ msgstr "Colocando objeto: %s / %s" - -#, python-format -#~ msgid "Unauthorized attempt to upload object %s to bucket %s" -#~ msgstr "Intento no autorizado de subir el objeto %s al cubo %s" - -#, python-format -#~ msgid "Deleting object: %s / %s" -#~ msgstr "Eliminando objeto: %s / %s" - -#, python-format -#~ msgid "Toggling publicity flag of image %s %r" -#~ msgstr "Cambiando los atributos de publicidad de la imagen %s %r" - -#, python-format -#~ msgid "Creating disk for %s by attaching disk file %s" -#~ msgstr "" -#~ "Creando disco para %s a través de la asignación del fichero de disco %s" - -#, python-format -#~ msgid "WMI job succeeded: %s, Elapsed=%s " -#~ msgstr "Trabajo WMI ha tenido exito: %s, Transcurrido=%s " - -#, python-format -#~ msgid "Created switch port %s on switch %s" -#~ msgstr "Creado puerto %s en el switch %s" - -#, python-format -#~ msgid "instance %s: deleting instance files %s" -#~ msgstr "instancia %s: eliminando los ficheros de la instancia %s" - -#, python-format -#~ msgid "Finished retreving %s -- placed in %s" -#~ msgstr "Finalizada la obtención de %s -- coloado en %s" - -#, python-format -#~ msgid "Failed to change vm state of %s to %s" -#~ msgstr "Fallo al cambiar el estado de la vm de %s a %s" - -#, python-format -#~ msgid "Successfully changed vm state of %s to %s" -#~ msgstr "Cambio de estado de la vm con éxito de %s a %s" - -#, python-format -#~ msgid "" -#~ "Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " -#~ "cpu_time=%s" -#~ msgstr "" -#~ "Obtenida información para vm %s: state=%s, mem=%s, num_cpu=%s, cpu_time=%s" - -#, python-format -#~ msgid "instance %s: ignoring error injecting data into image %s (%s)" -#~ msgstr "" -#~ "instancia %s: ignorando el error al inyectar datos en la imagen %s (%s)" - -#, python-format -#~ msgid "Contents of file %s: %r" -#~ msgstr "Contenidos del fichero %s: %r" - -#, python-format -#~ msgid "instance %s: injecting net into image %s" -#~ msgstr "instancia %s: inyectando red en la imagen %s" - -#, python-format -#~ msgid "instance %s: injecting key into image %s" -#~ msgstr "instancia %s: inyectando clave en la imagen %s" - -#, python-format -#~ msgid "data: %r, fpath: %r" -#~ msgstr "datos: %r, fpath: %r" - -#, python-format -#~ msgid "Task [%s] %s status: %s %s" -#~ msgstr "Tarea [%s] %s estado: %s %s" - -#, python-format -#~ msgid "Task [%s] %s status: success %s" -#~ msgstr "Tarea [%s] %s estado: éxito %s" - -#, python-format -#~ msgid "Calling %s %s" -#~ msgstr "Llamando %s %s" - -#, python-format -#~ msgid "%s: _db_content => %s" -#~ msgstr "%s: _db_content => %s" - -#, python-format -#~ msgid "Created VBD %s for VM %s, VDI %s." -#~ msgstr "Creado VBD %s for VM %s, VDI %s." - -#, python-format -#~ msgid "Creating VBD for VM %s, VDI %s ... " -#~ msgstr "Creando VBD para VM %s, VDI %s... " - -#, python-format -#~ msgid "Created VIF %s for VM %s, network %s." -#~ msgstr "Creado VIF %s para VM %s, red %s." - -#, python-format -#~ msgid "Creating VIF for VM %s, network %s." -#~ msgstr "Creando VIF para VM %s, red %s." - -#, python-format -#~ msgid "Created VM %s as %s." -#~ msgstr "Creada VM %s cómo %s" - -#, python-format -#~ msgid "Asking xapi to upload %s as '%s'" -#~ msgstr "Solicitando a xapi la subida de %s cómo %s'" - -#, python-format -#~ msgid "VHD %s has parent %s" -#~ msgstr "VHD %s tiene cómo padre a %s" - -#, python-format -#~ msgid "Asking xapi to fetch %s as %s" -#~ msgstr "Solicitando a xapi obtener %s cómo %s" - #, python-format #~ msgid "PV Kernel in VDI:%d" #~ msgstr "PV Kernel en VDI:%d" -#, python-format -#~ msgid "Unexpected number of VDIs (%s) found for VM %s" -#~ msgstr "Número no esperado de VDIs (%s) encontrados para VM %s" - -#, python-format -#~ msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." -#~ msgstr "" -#~ "El padre %s no concuerda con el padre original %s, esperando la unión..." - -#, python-format -#~ msgid "suspend: instance not present %s" -#~ msgstr "suspendido: instancia no encontrada: %s" - -#, python-format -#~ msgid "Introduced %s as %s." -#~ msgstr "Introducido %s cómo %s." - -#, python-format -#~ msgid "resume: instance not present %s" -#~ msgstr "reanudar: instancia no encontrada %s" - -#, python-format -#~ msgid "Instance not found %s" -#~ msgstr "instancia no encontrada %s" - -#, python-format -#~ msgid "Ignoring exception %s when getting PBDs for %s" -#~ msgstr "Ignorando excepción %s al obtener PBDs de %s" - -#, python-format -#~ msgid "Unable to create VDI on SR %s for instance %s" -#~ msgstr "Inpoisble crear VDI en SR %s para la instancia %s" - -#, python-format -#~ msgid "Unable to obtain target information %s, %s" -#~ msgstr "Imposible obtener información del destino %s, %s" - -#, python-format -#~ msgid "Ignoring exception %s when forgetting SR %s" -#~ msgstr "Ignorando excepción %s al olvidar SR %s" - -#, python-format -#~ msgid "Ignoring exception %s when unplugging PBD %s" -#~ msgstr "Ignorando excepción %s al desconectar PBD %s" - -#, python-format -#~ msgid "Attach_volume: %s, %s, %s" -#~ msgstr "Attach_volume: %s, %s, %s" - -#, python-format -#~ msgid "Unable to use SR %s for instance %s" -#~ msgstr "Imposible utilizar SR %s para la instancia %s" - -#, python-format -#~ msgid "Mountpoint %s attached to instance %s" -#~ msgstr "Punto de montaje %s unido a la instancia %s" - -#, python-format -#~ msgid "Detach_volume: %s, %s" -#~ msgstr "Detach_volume: %s, %s" - -#, python-format -#~ msgid "Mountpoint %s detached from instance %s" -#~ msgstr "Punto d emontaje %s desasociado de la instancia %s" - -#, python-format -#~ msgid "Quota exceeeded for %s, tried to create %sG volume" -#~ msgstr "Quota excedida para %s, intentando crear el volumen %sG" - #, python-format #~ msgid "Volume quota exceeded. You cannot create a volume of size %s" #~ msgstr "Quota de volumen superada. No puedes crear un volumen de tamaño %s" -#, python-format -#~ msgid "instance %s: attach failed %s, removing" -#~ msgstr "instalación %s: asociación fallida %s, eliminando" - -#, python-format -#~ msgid "instance %s: attaching volume %s to %s" -#~ msgstr "instancia %s: asociando volumen %s a %s" - -#, python-format -#~ msgid "Snapshotting VM %s with label '%s'..." -#~ msgstr "Creando snapshot de la VM %s con la etiqueta '%s'..." - -#, python-format -#~ msgid "Created snapshot %s from VM %s." -#~ msgstr "Creando snapshot %s de la VM %s" - -#, python-format -#~ msgid "Unable to Snapshot %s: %s" -#~ msgstr "Incapaz de realizar snapshot %s: %s" - -#, python-format -#~ msgid "Adding sitewide role %s to user %s" -#~ msgstr "Añadiendo rol global %s al usuario %s" - -#, python-format -#~ msgid "Removing sitewide role %s from user %s" -#~ msgstr "Eliminando rol global %s del usuario %s" - -#, python-format -#~ msgid "Del: disk %s vm %s" -#~ msgstr "Del: disco %s vm %s" - -#, python-format -#~ msgid "Spawning VM %s created %s." -#~ msgstr "Iniciando VM %s creado %s." - #~ msgid "No such process" #~ msgstr "No existe el proceso" diff --git a/po/ja.po b/po/ja.po index 179302b55..8bc282ec0 100644 --- a/po/ja.po +++ b/po/ja.po @@ -8,14 +8,14 @@ msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: FULL NAME \n" "POT-Creation-Date: 2011-02-21 10:03-0500\n" -"PO-Revision-Date: 2011-05-10 10:26+0000\n" -"Last-Translator: Akira YOSHIYAMA \n" +"PO-Revision-Date: 2011-08-23 11:22+0000\n" +"Last-Translator: Thierry Carrez \n" "Language-Team: \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-08-03 04:44+0000\n" -"X-Generator: Launchpad (build 13573)\n" +"X-Launchpad-Export-Date: 2011-08-24 04:47+0000\n" +"X-Generator: Launchpad (build 13697)\n" #: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 #: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 @@ -2840,44 +2840,6 @@ msgstr "ユーザ %(user)s をプロジェクト %(project)s ã«è¿½åŠ ã—ã¾ã™ msgid "Removing user %(user)s from project %(project)s" msgstr "ユーザ %(user)s をプロジェクト %(project)s ã‹ã‚‰å‰Šé™¤ã—ã¾ã™ã€‚" -#, python-format -#~ msgid "" -#~ "%s\n" -#~ "Command: %s\n" -#~ "Exit code: %s\n" -#~ "Stdout: %r\n" -#~ "Stderr: %r" -#~ msgstr "" -#~ "%s\n" -#~ "コマンド: %s\n" -#~ "終了コード: %s\n" -#~ "標準出力: %r\n" -#~ "標準エラー出力: %r" - -#, python-format -#~ msgid "(%s) publish (key: %s) %s" -#~ msgstr "(%s) パブリッシュ (key: %s) %s" - -#, python-format -#~ msgid "Binding %s to %s with key %s" -#~ msgstr "%s ã‚’ %s ã«ã‚­ãƒ¼ %s ã§ãƒã‚¤ãƒ³ãƒ‰ã—ã¾ã™ã€‚" - -#, python-format -#~ msgid "Getting from %s: %s" -#~ msgstr "%s ã‹ã‚‰ %s ã‚’å–å¾—" - -#, python-format -#~ msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." -#~ msgstr "AMQPサーム%s:%d ã«æŽ¥ç¶šã§ãã¾ã›ã‚“。 %d 秒後ã«å†åº¦è©¦ã¿ã¾ã™ã€‚" - -#, python-format -#~ msgid "Starting %s node" -#~ msgstr "ノード %s ã‚’é–‹å§‹ã—ã¾ã™ã€‚" - -#, python-format -#~ msgid "Data store %s is unreachable. Trying again in %d seconds." -#~ msgstr "データストア %s ã«æŽ¥ç¶šã§ãã¾ã›ã‚“。 %d 秒後ã«å†æŽ¥ç¶šã—ã¾ã™ã€‚" - #, python-format #~ msgid "Serving %s" #~ msgstr "%s サービスã®é–‹å§‹" @@ -2889,166 +2851,6 @@ msgstr "ユーザ %(user)s をプロジェクト %(project)s ã‹ã‚‰å‰Šé™¤ã—ã¾ #~ msgid "pidfile %s does not exist. Daemon not running?\n" #~ msgstr "pidfile %s ãŒå­˜åœ¨ã—ã¾ã›ã‚“。デーモンã¯å®Ÿè¡Œä¸­ã§ã™ã‹ï¼Ÿ\n" -#, python-format -#~ msgid "Couldn't get IP, using 127.0.0.1 %s" -#~ msgstr "IPã‚’å–å¾—ã§ãã¾ã›ã‚“。127.0.0.1 ã‚’ %s ã¨ã—ã¦ä½¿ã„ã¾ã™ã€‚" - -#, python-format -#~ msgid "" -#~ "Access key %s has had %d failed authentications and will be locked out for " -#~ "%d minutes." -#~ msgstr "アクセスキー %s 㯠%d 回èªè¨¼ã«å¤±æ•—ã—ãŸãŸã‚ã€%d 分間ロックã•れã¾ã™ã€‚" - -#, python-format -#~ msgid "Authenticated Request For %s:%s)" -#~ msgstr "リクエストをèªè¨¼ã—ã¾ã—ãŸï¼š %s:%s" - -#, python-format -#~ msgid "arg: %s\t\tval: %s" -#~ msgstr "引数(arg): %s\t値(val): %s" - -#, python-format -#~ msgid "Unauthorized request for controller=%s and action=%s" -#~ msgstr "許å¯ã•れã¦ã„ãªã„リクエスト: controller=%s, action %sã§ã™ã€‚" - -#, python-format -#~ msgid "Adding role %s to user %s for project %s" -#~ msgstr "Adding role: ロール %s をユーザ %sã€ãƒ—ロジェクト %s ã«è¿½åŠ ã—ã¾ã™ã€‚" - -#, python-format -#~ msgid "Adding sitewide role %s to user %s" -#~ msgstr "Adding sitewide role: サイトワイドã®ãƒ­ãƒ¼ãƒ« %s をユーザ %s ã«è¿½åŠ ã—ã¾ã™ã€‚" - -#, python-format -#~ msgid "Removing role %s from user %s for project %s" -#~ msgstr "Removing role: ロール %s をユーザ %s プロジェクト %s ã‹ã‚‰å‰Šé™¤ã—ã¾ã™ã€‚" - -#, python-format -#~ msgid "Removing sitewide role %s from user %s" -#~ msgstr "Removing sitewide role: サイトワイドã®ãƒ­ãƒ¼ãƒ« %s をユーザ %s ã‹ã‚‰å‰Šé™¤ã—ã¾ã™ã€‚" - -#, python-format -#~ msgid "Getting x509 for user: %s on project: %s" -#~ msgstr "Getting X509: x509ã®å–得: ユーザ %s, プロジェクト %s" - -#, python-format -#~ msgid "Create project %s managed by %s" -#~ msgstr "Create project: プロジェクト %s (%s ã«ã‚ˆã‚Šç®¡ç†ã•れる)を作æˆã—ã¾ã™ã€‚" - -#, python-format -#~ msgid "Adding user %s to project %s" -#~ msgstr "Adding user: ユーザ %s をプロジェクト %s ã«è¿½åŠ ã—ã¾ã™ã€‚" - -#, python-format -#~ msgid "Removing user %s from project %s" -#~ msgstr "Removing user: ユーザ %s をプロジェクト %s ã‹ã‚‰å‰Šé™¤ã—ã¾ã™ã€‚" - -#, python-format -#~ msgid "Unsupported API request: controller = %s,action = %s" -#~ msgstr "サãƒãƒ¼ãƒˆã•れã¦ã„ãªã„APIリクエストã§ã™ã€‚ controller = %s,action = %s" - -#, python-format -#~ msgid "Attach volume %s to instacne %s at %s" -#~ msgstr "Attach volume: ボリューム%s をインスタンス %s ã«ãƒ‡ãƒã‚¤ã‚¹ %s ã§ã‚¢ã‚¿ãƒƒãƒã—ã¾ã™ã€‚" - -#, python-format -#~ msgid "Associate address %s to instance %s" -#~ msgstr "Associate address: アドレス %s をインスタンス %s ã«é–¢é€£ä»˜ã‘ã¾ã™ã€‚" - -#, python-format -#~ msgid "Registered image %s with id %s" -#~ msgstr "Registered image: イメージ %s ã‚’id %s ã§ç™»éŒ²ã—ã¾ã™ã€‚" - -#, python-format -#~ msgid "User %s is already a member of the group %s" -#~ msgstr "ユーザ %s ã¯æ—¢ã«ã‚°ãƒ«ãƒ¼ãƒ— %s ã®ãƒ¡ãƒ³ãƒãƒ¼ã§ã™ã€‚" - -#, python-format -#~ msgid "failed authorization: no project named %s (user=%s)" -#~ msgstr "Failed authorization: èªè¨¼ã«å¤±æ•—ã—ã¾ã—ãŸã€‚プロジェクトå %s (ユーザ = %s) ã¯å­˜åœ¨ã—ã¾ã›ã‚“。" - -#, python-format -#~ msgid "Failed authorization: user %s not admin and not member of project %s" -#~ msgstr "" -#~ "Failed authorization: èªè¨¼ã«å¤±æ•—ã—ã¾ã—ãŸï¼š ユーザ %s ã¯ç®¡ç†è€…ã§ã¯ãªãã‹ã¤ãƒ—ロジェクト %s ã®ãƒ¡ãƒ³ãƒãƒ¼ã§ã¯ã‚りã¾ã›ã‚“。" - -#, python-format -#~ msgid "User %s is not a member of project %s" -#~ msgstr "ユーザ %s ã¯ãƒ—ロジェクト %s ã®ãƒ¡ãƒ³ãƒãƒ¼ã§ã¯ã‚りã¾ã›ã‚“。" - -#, python-format -#~ msgid "Adding role %s to user %s in project %s" -#~ msgstr "Adding role: ロール %s をユーザ %s (プロジェクト %s ã®ï¼‰ ã«è¿½åŠ ã—ã¾ã™ã€‚" - -#, python-format -#~ msgid "Removing role %s from user %s on project %s" -#~ msgstr "Removing role: ロール %s をユーザ %s (プロジェクト %s ã®ï¼‰ã‹ã‚‰å‰Šé™¤ã—ã¾ã™ã€‚" - -#, python-format -#~ msgid "Created project %s with manager %s" -#~ msgstr "Created project: プロジェクト %s (マãƒãƒ¼ã‚¸ãƒ£ %s)を作æˆã—ã¾ã™ã€‚" - -#, python-format -#~ msgid "Remove user %s from project %s" -#~ msgstr "Remove user: ユーザ %s をプロジェクト %s ã‹ã‚‰å‰Šé™¤ã—ã¾ã™ã€‚" - -#, python-format -#~ msgid "Created user %s (admin: %r)" -#~ msgstr "Created user: ユーザ %s (admin: %r) を作æˆã—ã¾ã—ãŸã€‚" - -#, python-format -#~ msgid "Admin status set to %r for user %s" -#~ msgstr "Admin status set: 管ç†è€…ステータス %r をユーザ %s ã«è¨­å®šã—ã¾ã™ã€‚" - -#, python-format -#~ msgid "Quota exceeeded for %s, tried to run %s instances" -#~ msgstr "%s ã®ã‚¯ã‚ªãƒ¼ã‚¿ä¸Šé™ã‚’è¶…ãˆã¾ã—ãŸã€‚%s インスタンスを実行ã—よã†ã¨ã—ã¾ã—ãŸã€‚" - -#, python-format -#~ msgid "Casting to scheduler for %s/%s's instance %s" -#~ msgstr "スケジューラã«å¯¾ã—㦠%s/%s ã®ã‚¤ãƒ³ã‚¹ã‚¿ãƒ³ã‚¹ %s ã‚’é€ä¿¡ã—ã¾ã™ã€‚" - -#, python-format -#~ msgid "Going to try and terminate %s" -#~ msgstr "%s を終了ã—ã¾ã™ã€‚" - -#, python-format -#~ msgid "Input partition size not evenly divisible by sector size: %d / %d" -#~ msgstr "インプットパーティションサイズãŒã‚»ã‚¯ã‚¿ãƒ¼ã‚µã‚¤ã‚ºã§å‰²ã‚Šåˆ‡ã‚Œã¾ã›ã‚“。 %d / %d" - -#, python-format -#~ msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" -#~ msgstr "ローカルストレージã®ãƒã‚¤ãƒˆæ•°ãŒã‚»ã‚¯ã‚¿ãƒ¼ã‚µã‚¤ã‚ºã§å‰²ã‚Šåˆ‡ã‚Œã¾ã›ã‚“: %d / %d" - -#, python-format -#~ msgid "check_instance_lock: arguments: |%s| |%s| |%s|" -#~ msgstr "check_instance_lock: arguments: |%s| |%s| |%s|" - -#, python-format -#~ msgid "Disassociating address %s" -#~ msgstr "アドレス %s ã®é–¢é€£ä»˜ã‘を解除(disassociate)ã—ã¦ã„ã¾ã™ã€‚" - -#, python-format -#~ msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" -#~ msgstr "実行ã—ã¦ã„ãªã„インスタンスã®å†èµ·å‹•を試ã¿ã¾ã™ã€‚%s (状態: %s 期待ã™ã‚‹çŠ¶æ…‹: %s)" - -#, python-format -#~ msgid "" -#~ "trying to snapshot a non-running instance: %s (state: %s excepted: %s)" -#~ msgstr "実行ã—ã¦ã„ãªã„インスタンスã®ã‚¹ãƒŠãƒƒãƒ—ショットå–得を試ã¿ã¾ã™ã€‚%s (状態: %s 期待ã™ã‚‹çŠ¶æ…‹: %s)" - -#, python-format -#~ msgid "instance %s: attaching volume %s to %s" -#~ msgstr "attaching volume: インスタンス %s ã«ã¤ã„ã¦ãƒœãƒªãƒ¥ãƒ¼ãƒ  %s ã‚’ %s ã«ã‚¢ã‚¿ãƒƒãƒã—ã¾ã™ã€‚" - -#, python-format -#~ msgid "instance %s: attach failed %s, removing" -#~ msgstr "インスタンス %s: %sã®ã‚¢ã‚¿ãƒƒãƒã«å¤±æ•—ã—ã¾ã—ãŸã€‚リムーブã—ã¾ã™ã€‚" - -#, python-format -#~ msgid "Detach volume %s from mountpoint %s on instance %s" -#~ msgstr "Detach volume: ボリューム %s をマウントãƒã‚¤ãƒ³ãƒˆ %s (インスタンス%s)ã‹ã‚‰ãƒ‡ã‚¿ãƒƒãƒã—ã¾ã™ã€‚" - #, python-format #~ msgid "updating %s..." #~ msgstr "%s ã®æƒ…å ±ã®æ›´æ–°â€¦" @@ -3056,14 +2858,6 @@ msgstr "ユーザ %(user)s をプロジェクト %(project)s ã‹ã‚‰å‰Šé™¤ã—ã¾ #~ msgid "unexpected error during update" #~ msgstr "æ›´æ–°ã®æœ€ä¸­ã«äºˆæœŸã—ãªã„エラーãŒç™ºç”Ÿã—ã¾ã—ãŸã€‚" -#, python-format -#~ msgid "Cannot get blockstats for \"%s\" on \"%s\"" -#~ msgstr "ブロックデãƒã‚¤ã‚¹ \"%s\" ã®çµ±è¨ˆã‚’ \"%s\" ã«ã¤ã„ã¦å–å¾—ã§ãã¾ã›ã‚“。" - -#, python-format -#~ msgid "Cannot get ifstats for \"%s\" on \"%s\"" -#~ msgstr "インタフェース \"%s\" ã®çµ±è¨ˆã‚’ \"%s\" ã«ã¤ã„ã¦å–å¾—ã§ãã¾ã›ã‚“。" - #~ msgid "unexpected exception getting connection" #~ msgstr "接続ã«éš›ã—予期ã—ãªã„エラーãŒç™ºç”Ÿã—ã¾ã—ãŸã€‚" @@ -3071,279 +2865,14 @@ msgstr "ユーザ %(user)s をプロジェクト %(project)s ã‹ã‚‰å‰Šé™¤ã—ã¾ #~ msgid "Found instance: %s" #~ msgstr "インスタンス %s ãŒè¦‹ã¤ã‹ã‚Šã¾ã—ãŸã€‚" -#, python-format -#~ msgid "No service for %s, %s" -#~ msgstr "%s, %s ã®serviceãŒå­˜åœ¨ã—ã¾ã›ã‚“。" - -#, python-format -#~ msgid "No instance for id %s" -#~ msgstr "id %s ã®instanceãŒå­˜åœ¨ã—ã¾ã›ã‚“。" - -#, python-format -#~ msgid "no keypair for user %s, name %s" -#~ msgstr "ユーザ %s, ãƒãƒ¼ãƒ %s ã«è©²å½“ã™ã‚‹ã‚­ãƒ¼ãƒšã‚¢ãŒå­˜åœ¨ã—ã¾ã›ã‚“。" - -#, python-format -#~ msgid "No volume for id %s" -#~ msgstr "id %s ã«è©²å½“ã™ã‚‹ãƒœãƒªãƒ¥ãƒ¼ãƒ ãŒå­˜åœ¨ã—ã¾ã›ã‚“。" - -#, python-format -#~ msgid "No security group named %s for project: %s" -#~ msgstr "セキュリティグループå %s ãŒãƒ—ロジェクト %s ã«å­˜åœ¨ã—ã¾ã›ã‚“。" - -#, python-format -#~ msgid "Parallax returned HTTP error %d from request for /images" -#~ msgstr "Parallax ãŒHTTPエラー%d ã‚’ /images ã«å¯¾ã™ã‚‹ãƒªã‚¯ã‚¨ã‚¹ãƒˆã«å¯¾ã—ã¦è¿”ã—ã¾ã—ãŸã€‚" - -#, python-format -#~ msgid "Parallax returned HTTP error %d from request for /images/detail" -#~ msgstr "Parallax ãŒHTTPエラー %d ã‚’ /images/detail ã«å¯¾ã™ã‚‹ãƒªã‚¯ã‚¨ã‚¹ãƒˆã«å¯¾ã—ã¦è¿”ã—ã¾ã—ãŸ" - -#, python-format -#~ msgid "IP %s leased to bad mac %s vs %s" -#~ msgstr "IP %s ãŒæœŸå¾…ã—㟠mac %s ã§ã¯ãªã %s ã«ãƒªãƒ¼ã‚¹ã•れã¾ã—ãŸã€‚" - -#, python-format -#~ msgid "IP %s released from bad mac %s vs %s" -#~ msgstr "IP %s ãŒmac %s ã§ã¯ãªã„ mac %s ã¸ã®å‰²å½“ã‹ã‚‰é–‹æ”¾ã•れã¾ã—ãŸã€‚" - -#, python-format -#~ msgid "Getting object: %s / %s" -#~ msgstr "オブジェクトã®å–得: %s / %s" - -#, python-format -#~ msgid "Unauthorized attempt to get object %s from bucket %s" -#~ msgstr "" -#~ "Unauthorized attempt to get object: オブジェクト %s ã®ãƒã‚±ãƒƒãƒˆ %s ã‹ã‚‰ã®å–å¾—ã¯è¨±å¯ã•れã¦ã„ã¾ã›ã‚“。" - -#, python-format -#~ msgid "Putting object: %s / %s" -#~ msgstr "ã‚ªãƒ–ã‚¸ã‚§ã‚¯ãƒˆã®æ ¼ç´ï¼š: %s / %s" - -#, python-format -#~ msgid "Unauthorized attempt to upload object %s to bucket %s" -#~ msgstr "" -#~ "Unauthorized attempt to upload: オブジェクト %s ã®ãƒã‚±ãƒƒãƒˆ %s ã¸ã®ã‚¢ãƒƒãƒ—ロードã¯è¨±å¯ã•れã¦ã„ã¾ã›ã‚“。" - -#, python-format -#~ msgid "Deleting object: %s / %s" -#~ msgstr "オブジェクトを削除ã—ã¦ã„ã¾ã™ã€‚: %s / %s" - -#, python-format -#~ msgid "Toggling publicity flag of image %s %r" -#~ msgstr "Toggling publicity flag: イメージ %s ã®å…¬é–‹ãƒ•ラグを %r ã«æ›´æ–°ã—ã¾ã™ã€‚" - -#, python-format -#~ msgid "Casting to %s %s for %s" -#~ msgstr "メッセージã®cast: %s %s for %s" - -#, python-format -#~ msgid "Nested received %s, %s" -#~ msgstr "ãƒã‚¹ã¨ã—ãŸå—ä¿¡: %s, %s" - -#, python-format -#~ msgid "Creating disk for %s by attaching disk file %s" -#~ msgstr "%s ã®ãƒ‡ã‚£ã‚¹ã‚¯ã‚’ディスクファイル %s をアタッãƒã—ã¦ä½œæˆã—ã¾ã™ã€‚" - -#, python-format -#~ msgid "Created switch port %s on switch %s" -#~ msgstr "スイッãƒãƒãƒ¼ãƒˆ %s をスイッム%s ã«ä½œæˆã—ã¾ã—ãŸã€‚" - -#, python-format -#~ msgid "WMI job succeeded: %s, Elapsed=%s " -#~ msgstr "WMIã‚¸ãƒ§ãƒ–ãŒæˆåŠŸã—ã¾ã—ãŸ: %s, çµŒéŽæ™‚é–“=%s " - -#, python-format -#~ msgid "Del: disk %s vm %s" -#~ msgstr "Del: 削除: disk %s vm %s" - -#, python-format -#~ msgid "" -#~ "Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " -#~ "cpu_time=%s" -#~ msgstr "" -#~ "vm %s ã®æƒ…å ±ã®å–å¾—: state=%s, mem=%s, num_cpu=%s, cpu_time=%s" - -#, python-format -#~ msgid "Successfully changed vm state of %s to %s" -#~ msgstr "vmã®çŠ¶æ…‹ã® %s ã‹ã‚‰ %s ã¸ã®å¤‰æ›´ã«æˆåŠŸã—ã¾ã—ãŸã€‚" - -#, python-format -#~ msgid "Failed to change vm state of %s to %s" -#~ msgstr "VMã®çŠ¶æ…‹ã® %s ã‹ã‚‰ %s ã¸ã®å¤‰æ›´ã«å¤±æ•—ã—ã¾ã—ãŸã€‚" - -#, python-format -#~ msgid "Finished retreving %s -- placed in %s" -#~ msgstr "%s ã‚’å–å¾—ã—ã¾ã—ãŸã€‚æ ¼ç´å…ˆ: %s" - -#, python-format -#~ msgid "instance %s: deleting instance files %s" -#~ msgstr "インスタンス %s: インスタンスファイル %s を削除ã—ã¦ã„ã¾ã™ã€‚" - -#, python-format -#~ msgid "data: %r, fpath: %r" -#~ msgstr "データ:%r ファイルパス: %r" - -#, python-format -#~ msgid "Contents of file %s: %r" -#~ msgstr "ファイル %s ã®ä¸­èº«: %r" - -#, python-format -#~ msgid "instance %s: injecting key into image %s" -#~ msgstr "インスタンス %s ã«ã‚­ãƒ¼ %s をインジェクトã—ã¾ã™ã€‚" - -#, python-format -#~ msgid "instance %s: injecting net into image %s" -#~ msgstr "インスタンス %s ã®ãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯è¨­å®šã‚’イメージ %s ã«ã‚¤ãƒ³ã‚¸ã‚§ã‚¯ãƒˆã—ã¾ã™ã€‚" - -#, python-format -#~ msgid "instance %s: ignoring error injecting data into image %s (%s)" -#~ msgstr "インスタンス %s: データをイメージ %s ã«ã‚¤ãƒ³ã‚¸ã‚§ã‚¯ãƒˆã™ã‚‹éš›ã«ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¾ã—ãŸã€‚(%s)" - -#, python-format -#~ msgid "Task [%s] %s status: success %s" -#~ msgstr "タスク [%s] %s ステータス: success %s" - -#, python-format -#~ msgid "Task [%s] %s status: %s %s" -#~ msgstr "タスク [%s] %s ステータス: %s %s" - -#, python-format -#~ msgid "%s: _db_content => %s" -#~ msgstr "%s: _db_content => %s" - -#, python-format -#~ msgid "Calling %s %s" -#~ msgstr "呼ã³å‡ºã—: %s %s" - -#, python-format -#~ msgid "Created VM %s as %s." -#~ msgstr "VM %s ã‚’ %s ã¨ã—ã¦ä½œæˆã—ã¾ã—ãŸã€‚" - -#, python-format -#~ msgid "Creating VBD for VM %s, VDI %s ... " -#~ msgstr "VM %s, VDI %s ã®VBDを作æˆã—ã¾ã™â€¦ " - -#, python-format -#~ msgid "Created VBD %s for VM %s, VDI %s." -#~ msgstr "VBD %s ã‚’ VM %s, VDI %s ã«å¯¾ã—ã¦ä½œæˆã—ã¾ã—ãŸã€‚" - -#, python-format -#~ msgid "Creating VIF for VM %s, network %s." -#~ msgstr "VM %s, ãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯ %s を作æˆã—ã¾ã™ã€‚" - -#, python-format -#~ msgid "Created VIF %s for VM %s, network %s." -#~ msgstr "VIF %s ã‚’ VM %s, ãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯ %s ã«ä½œæˆã—ã¾ã—ãŸã€‚" - -#, python-format -#~ msgid "Snapshotting VM %s with label '%s'..." -#~ msgstr "VM %s ã®ã‚¹ãƒŠãƒƒãƒ—ショットをラベル '%s' ã§ä½œæˆã—ã¾ã™ã€‚" - -#, python-format -#~ msgid "Created snapshot %s from VM %s." -#~ msgstr "スナップショット %s ã‚’ VM %s ã«ã¤ã„ã¦ä½œæˆã—ã¾ã—ãŸã€‚" - -#, python-format -#~ msgid "Asking xapi to upload %s as '%s'" -#~ msgstr "xapiã«å¯¾ã—㦠%s ã‚’ '%s' ã¨ã—ã¦ã‚¢ãƒƒãƒ—ロードã™ã‚‹ã‚ˆã†ã«æŒ‡ç¤ºã—ã¾ã™ã€‚" - -#, python-format -#~ msgid "Asking xapi to fetch %s as %s" -#~ msgstr "xapi ã«å¯¾ã—㦠%s ã‚’ %s ã¨ã—ã¦å–å¾—ã™ã‚‹ã‚ˆã†ã«æŒ‡ç¤ºã—ã¾ã™ã€‚" - #, python-format #~ msgid "PV Kernel in VDI:%d" #~ msgstr "VDIã®PV Kernel: %d" -#, python-format -#~ msgid "VHD %s has parent %s" -#~ msgstr "VHD %s ã®ãƒšã‚¢ãƒ¬ãƒ³ãƒˆã¯ %s ã§ã™ã€‚" - -#, python-format -#~ msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." -#~ msgstr "ペアレント %s ãŒã‚ªãƒªã‚¸ãƒŠãƒ«ã®ãƒšã‚¢ãƒ¬ãƒ³ãƒˆ %s ã¨ä¸€è‡´ã—ã¾ã›ã‚“。åˆè‡´ã™ã‚‹ã®ã‚’å¾…ã¡ã¾ã™â€¦" - -#, python-format -#~ msgid "Unexpected number of VDIs (%s) found for VM %s" -#~ msgstr "予期ã—ãªã„æ•° (%s) ã®VDIãŒVM %s ã«å­˜åœ¨ã—ã¾ã™ã€‚" - -#, python-format -#~ msgid "Spawning VM %s created %s." -#~ msgstr "VM %s ã®ç”Ÿæˆ(spawning) ã«ã‚ˆã‚Š %s を作æˆã—ã¾ã—ãŸã€‚" - -#, python-format -#~ msgid "Unable to Snapshot %s: %s" -#~ msgstr "%s ã®ã‚¹ãƒŠãƒƒãƒ—ショットã«å¤±æ•—ã—ã¾ã—ãŸï¼š %s" - -#, python-format -#~ msgid "suspend: instance not present %s" -#~ msgstr "suspend: インスタンス %s ã¯å­˜åœ¨ã—ã¾ã›ã‚“。" - -#, python-format -#~ msgid "resume: instance not present %s" -#~ msgstr "resume: インスタンス %s ã¯å­˜åœ¨ã—ã¾ã›ã‚“。" - -#, python-format -#~ msgid "Instance not found %s" -#~ msgstr "インスタンス %s ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。" - -#, python-format -#~ msgid "Introduced %s as %s." -#~ msgstr "%s ã‚’ %s ã¨ã—㦠introduce ã—ã¾ã—ãŸã€‚" - -#, python-format -#~ msgid "Ignoring exception %s when getting PBDs for %s" -#~ msgstr "例外 %s ㌠%s ã®PBDã‚’å–å¾—ã™ã‚‹éš›ã«ç™ºç”Ÿã—ã¾ã—ãŸãŒç„¡è¦–ã—ã¾ã™ã€‚" - -#, python-format -#~ msgid "Ignoring exception %s when unplugging PBD %s" -#~ msgstr "例外 %s ㌠%s ã®PBDã‚’unplugã™ã‚‹éš›ã«ç™ºç”Ÿã—ã¾ã—ãŸãŒç„¡è¦–ã—ã¾ã™ã€‚" - -#, python-format -#~ msgid "Ignoring exception %s when forgetting SR %s" -#~ msgstr "例外 %s ãŒSR %s ã‚’forgetã™ã‚‹éš›ã«ç™ºç”Ÿã—ã¾ã—ãŸãŒç„¡è¦–ã—ã¾ã™ã€‚" - -#, python-format -#~ msgid "Unable to obtain target information %s, %s" -#~ msgstr "ã‚¿ãƒ¼ã‚²ãƒƒãƒˆã®æƒ…報をå–å¾—ã§ãã¾ã›ã‚“。 %s, %s" - -#, python-format -#~ msgid "Attach_volume: %s, %s, %s" -#~ msgstr "Attach_volume: ボリュームã®ã‚¢ã‚¿ãƒƒãƒ: %s, %s, %s" - -#, python-format -#~ msgid "Unable to create VDI on SR %s for instance %s" -#~ msgstr "SR %s ã«ã‚¤ãƒ³ã‚¹ã‚¿ãƒ³ã‚¹ %s ã®VDIを作æˆã§ãã¾ã›ã‚“。" - -#, python-format -#~ msgid "Unable to use SR %s for instance %s" -#~ msgstr "SR %s をインスタンス %s ã«å¯¾ã—ã¦åˆ©ç”¨ã§ãã¾ã›ã‚“。" - -#, python-format -#~ msgid "Mountpoint %s attached to instance %s" -#~ msgstr "マウントãƒã‚¤ãƒ³ãƒˆ %s をインスタンス %s ã«ã‚¢ã‚¿ãƒƒãƒã—ã¾ã—ãŸã€‚" - -#, python-format -#~ msgid "Detach_volume: %s, %s" -#~ msgstr "Detach_volume: ボリュームã®ãƒ‡ã‚¿ãƒƒãƒ: %s, %s" - -#, python-format -#~ msgid "Mountpoint %s detached from instance %s" -#~ msgstr "マウントãƒã‚¤ãƒ³ãƒˆ %s をインスタンス %s ã‹ã‚‰ãƒ‡ã‚¿ãƒƒãƒã—ã¾ã—ãŸã€‚" - -#, python-format -#~ msgid "Quota exceeeded for %s, tried to create %sG volume" -#~ msgstr "%sã®ã‚¯ã‚ªãƒ¼ã‚¿ã‚’è¶…ãˆã¦ã„ã¾ã™ã€‚サイズ %sG ã®ãƒœãƒªãƒ¥ãƒ¼ãƒ ã®ä½œæˆã‚’行ãŠã†ã¨ã—ã¾ã—ãŸã€‚" - #, python-format #~ msgid "Volume quota exceeded. You cannot create a volume of size %s" #~ msgstr "ボリュームã®ã‚¯ã‚ªãƒ¼ã‚¿ã‚’è¶…ãˆã¦ã„ã¾ã™ã€‚%sã®å¤§ãã•ã®ãƒœãƒªãƒ¥ãƒ¼ãƒ ã¯ä½œæˆã§ãã¾ã›ã‚“。" -#, python-format -#~ msgid "volume %s: creating lv of size %sG" -#~ msgstr "ボリューム%sã®%sGã®lv (è«–ç†ãƒœãƒªãƒ¥ãƒ¼ãƒ ) を作æˆã—ã¾ã™ã€‚" - #~ msgid "Wrong number of arguments." #~ msgstr "å¼•æ•°ã®æ•°ãŒç•°ãªã‚Šã¾ã™ã€‚" diff --git a/po/pt_BR.po b/po/pt_BR.po index d6d57a9b1..bff1135c6 100644 --- a/po/pt_BR.po +++ b/po/pt_BR.po @@ -8,14 +8,14 @@ msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: FULL NAME \n" "POT-Creation-Date: 2011-02-21 10:03-0500\n" -"PO-Revision-Date: 2011-07-25 17:40+0000\n" +"PO-Revision-Date: 2011-08-23 11:23+0000\n" "Last-Translator: msinhore \n" "Language-Team: Brazilian Portuguese \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-08-03 04:44+0000\n" -"X-Generator: Launchpad (build 13573)\n" +"X-Launchpad-Export-Date: 2011-08-24 04:47+0000\n" +"X-Generator: Launchpad (build 13697)\n" #: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 #: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 @@ -2824,47 +2824,6 @@ msgstr "" msgid "Removing user %(user)s from project %(project)s" msgstr "" -#, python-format -#~ msgid "" -#~ "%s\n" -#~ "Command: %s\n" -#~ "Exit code: %s\n" -#~ "Stdout: %r\n" -#~ "Stderr: %r" -#~ msgstr "" -#~ "%s\n" -#~ "Comando: %s\n" -#~ "Código de retorno: %s\n" -#~ "Stdout: %r\n" -#~ "Stderr: %r" - -#, python-format -#~ msgid "(%s) publish (key: %s) %s" -#~ msgstr "(%s) publicar (key: %s) %s" - -#, python-format -#~ msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." -#~ msgstr "" -#~ "Servidor AMQP em %s:%d inatingível. Tentando novamente em %d segundos." - -#, python-format -#~ msgid "Binding %s to %s with key %s" -#~ msgstr "Atribuindo %s para %s com chave %s" - -#, python-format -#~ msgid "Getting from %s: %s" -#~ msgstr "Obtendo de %s: %s" - -#, python-format -#~ msgid "Starting %s node" -#~ msgstr "Iniciando nó %s" - -#, python-format -#~ msgid "Data store %s is unreachable. Trying again in %d seconds." -#~ msgstr "" -#~ "Repositório de dados %s não pode ser atingido. Tentando novamente em %d " -#~ "segundos." - #~ msgid "Full set of FLAGS:" #~ msgstr "Conjunto completo de FLAGS:" @@ -2876,115 +2835,6 @@ msgstr "" #~ msgid "Serving %s" #~ msgstr "Servindo %s" -#, python-format -#~ msgid "Couldn't get IP, using 127.0.0.1 %s" -#~ msgstr "Não foi possível obter IP, usando 127.0.0.1 %s" - -#, python-format -#~ msgid "" -#~ "Access key %s has had %d failed authentications and will be locked out for " -#~ "%d minutes." -#~ msgstr "" -#~ "Chave de acesso %s tem %d falhas de autenticação e vai ser bloqueada por %d " -#~ "minutos." - -#, python-format -#~ msgid "arg: %s\t\tval: %s" -#~ msgstr "argumento: %s\t\tvalor: %s" - -#, python-format -#~ msgid "Authenticated Request For %s:%s)" -#~ msgstr "Pedido de Autenticação Para: %s:%s" - -#, python-format -#~ msgid "Adding sitewide role %s to user %s" -#~ msgstr "Adicionando papel em todo site %s ao usuário %s" - -#, python-format -#~ msgid "Adding role %s to user %s for project %s" -#~ msgstr "Adicionando papel %s ao usuário %s para o projeto %s" - -#, python-format -#~ msgid "Unauthorized request for controller=%s and action=%s" -#~ msgstr "Requisição não autorizada para controlador=%s e ação=%s" - -#, python-format -#~ msgid "Removing role %s from user %s for project %s" -#~ msgstr "Removendo papel %s do usuário %s para o projeto %s" - -#, python-format -#~ msgid "Getting x509 for user: %s on project: %s" -#~ msgstr "Obtendo x509 para usuário: %s do projeto: %s" - -#, python-format -#~ msgid "Create project %s managed by %s" -#~ msgstr "Criar projeto %s gerenciado por %s" - -#, python-format -#~ msgid "Removing user %s from project %s" -#~ msgstr "Excluindo usuário %s do projeto %s" - -#, python-format -#~ msgid "Adding user %s to project %s" -#~ msgstr "Adicionando usuário %s ao projeto %s" - -#, python-format -#~ msgid "Unsupported API request: controller = %s,action = %s" -#~ msgstr "Requisição de API não suportada: controlador = %s,ação = %s" - -#, python-format -#~ msgid "Removing sitewide role %s from user %s" -#~ msgstr "Removendo papel %s em todo site do usuário %s" - -#, python-format -#~ msgid "Associate address %s to instance %s" -#~ msgstr "Atribuir endereço %s à instância %s" - -#, python-format -#~ msgid "Attach volume %s to instacne %s at %s" -#~ msgstr "Anexar volume %s para instância %s em %s" - -#, python-format -#~ msgid "Registered image %s with id %s" -#~ msgstr "Registrada imagem %s com id %s" - -#, python-format -#~ msgid "User %s is already a member of the group %s" -#~ msgstr "Usuário %s já pertence ao grupo %s" - -#, python-format -#~ msgid "User %s is not a member of project %s" -#~ msgstr "Usuário %s não é membro do projeto %s" - -#, python-format -#~ msgid "failed authorization: no project named %s (user=%s)" -#~ msgstr "falha de autorização: nenhum projeto de nome %s (usuário=%s)" - -#, python-format -#~ msgid "Failed authorization: user %s not admin and not member of project %s" -#~ msgstr "" -#~ "Falha de autorização: usuário %s não é administrador nem membro do projeto %s" - -#, python-format -#~ msgid "Created project %s with manager %s" -#~ msgstr "Criado projeto %s com gerente %s" - -#, python-format -#~ msgid "Removing role %s from user %s on project %s" -#~ msgstr "Removendo papel %s do usuário %s no projeto %s" - -#, python-format -#~ msgid "Adding role %s to user %s in project %s" -#~ msgstr "Adicionando papel %s ao usuário %s no projeto %s" - -#, python-format -#~ msgid "Remove user %s from project %s" -#~ msgstr "Remover usuário %s do projeto %s" - -#, python-format -#~ msgid "Created user %s (admin: %r)" -#~ msgstr "Criado usuário %s (administrador: %r)" - #~ msgid "No such process" #~ msgstr "Processo inexistente" diff --git a/po/ru.po b/po/ru.po index 746db964a..951571d9e 100644 --- a/po/ru.po +++ b/po/ru.po @@ -8,14 +8,14 @@ msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: FULL NAME \n" "POT-Creation-Date: 2011-02-21 10:03-0500\n" -"PO-Revision-Date: 2011-07-09 07:20+0000\n" -"Last-Translator: ilya kislicyn \n" +"PO-Revision-Date: 2011-08-23 11:22+0000\n" +"Last-Translator: Thierry Carrez \n" "Language-Team: Russian \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-08-03 04:44+0000\n" -"X-Generator: Launchpad (build 13573)\n" +"X-Launchpad-Export-Date: 2011-08-24 04:47+0000\n" +"X-Generator: Launchpad (build 13697)\n" #: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 #: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 @@ -2793,58 +2793,6 @@ msgstr "" #~ msgid "Starting %s" #~ msgstr "ЗапуÑкаетÑÑ %s" -#, python-format -#~ msgid "arg: %s\t\tval: %s" -#~ msgstr "arg: %s\t\tval: %s" - -#, python-format -#~ msgid "Adding role %s to user %s for project %s" -#~ msgstr "Добавление роли %s Ð´Ð»Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ %s Ð´Ð»Ñ Ð¿Ñ€Ð¾ÐµÐºÑ‚Ð° %s" - -#, python-format -#~ msgid "Removing role %s from user %s for project %s" -#~ msgstr "Удаление роли %s Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ %s Ð´Ð»Ñ Ð¿Ñ€Ð¾ÐµÐºÑ‚Ð° %s" - -#, python-format -#~ msgid "Create project %s managed by %s" -#~ msgstr "Создать проект %s под управлением %s" - -#, python-format -#~ msgid "Removing user %s from project %s" -#~ msgstr "Удаление Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ %s Ñ Ð¿Ñ€Ð¾ÐµÐºÑ‚Ð° %s" - -#, python-format -#~ msgid "Adding user %s to project %s" -#~ msgstr "Добавление Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ %s к проекту %s" - -#, python-format -#~ msgid "User %s is already a member of the group %s" -#~ msgstr "Пользователь %s уже член группы %s" - -#, python-format -#~ msgid "User %s is not a member of project %s" -#~ msgstr "Пользователь %s не ÑвлÑетÑÑ Ñ‡Ð»ÐµÐ½Ð¾Ð¼ группы %s" - -#, python-format -#~ msgid "Created project %s with manager %s" -#~ msgstr "Создан проект %s под управлением %s" - -#, python-format -#~ msgid "Removing role %s from user %s on project %s" -#~ msgstr "Удаление роли %s Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ %s в проекте %s" - -#, python-format -#~ msgid "Remove user %s from project %s" -#~ msgstr "Удалить Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ %s из проекта %s" - -#, python-format -#~ msgid "Created user %s (admin: %r)" -#~ msgstr "Создан пользователь %s (админиÑтратор: %r)" - -#, python-format -#~ msgid "Adding role %s to user %s in project %s" -#~ msgstr "Добавление роли %s Ð´Ð»Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ %s в проект %s" - #~ msgid "unexpected error during update" #~ msgstr "Ð½ÐµÐ¾Ð¶Ð¸Ð´Ð°Ð½Ð½Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ° во Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð±Ð½Ð¾Ð²Ð»ÐµÐ½Ð¸Ñ" @@ -2852,75 +2800,9 @@ msgstr "" #~ msgid "updating %s..." #~ msgstr "обновление %s..." -#, python-format -#~ msgid "Getting object: %s / %s" -#~ msgstr "Получение объекта: %s / %s" - -#, python-format -#~ msgid "Deleting object: %s / %s" -#~ msgstr "Удаление объекта: %s / %s" - -#, python-format -#~ msgid "%s: _db_content => %s" -#~ msgstr "%s: _db_content => %s" - -#, python-format -#~ msgid "Calling %s %s" -#~ msgstr "Звонок %s %s" - -#, python-format -#~ msgid "" -#~ "%s\n" -#~ "Command: %s\n" -#~ "Exit code: %s\n" -#~ "Stdout: %r\n" -#~ "Stderr: %r" -#~ msgstr "" -#~ "%s\n" -#~ "Команда: %s\n" -#~ "Код завершениÑ: %s\n" -#~ "Stdout: %r\n" -#~ "Stderr: %r" - -#, python-format -#~ msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." -#~ msgstr "AMQP Ñервер %s:%d недоÑтупен. ÐŸÐ¾Ð²Ñ‚Ð¾Ñ€Ð½Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ° через %d Ñекунд." - -#, python-format -#~ msgid "Putting object: %s / %s" -#~ msgstr "Ð’Ñтавка объекта: %s / %s" - -#, python-format -#~ msgid "Starting %s node" -#~ msgstr "ЗапуÑкаетÑÑ Ð½Ð¾Ð´Ð° %s" - -#, python-format -#~ msgid "Data store %s is unreachable. Trying again in %d seconds." -#~ msgstr "Хранилище данных %s недоÑтупно. ÐŸÐ¾Ð²Ñ‚Ð¾Ñ€Ð½Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ° через %d Ñекунд." - -#, python-format -#~ msgid "Couldn't get IP, using 127.0.0.1 %s" -#~ msgstr "Ðе удалоÑÑŒ получить IP, иÑпользуем 127.0.0.1 %s" - #, python-format #~ msgid "pidfile %s does not exist. Daemon not running?\n" #~ msgstr "pidfile %s не обнаружен. Демон не запущен?\n" -#, python-format -#~ msgid "Getting from %s: %s" -#~ msgstr "Получение из %s: %s" - -#, python-format -#~ msgid "" -#~ "Access key %s has had %d failed authentications and will be locked out for " -#~ "%d minutes." -#~ msgstr "" -#~ "Ключ доÑтупа %s имеет %d неудачных попыток аутентификации и будет " -#~ "заблокирован на %d минут." - -#, python-format -#~ msgid "Authenticated Request For %s:%s)" -#~ msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð°ÑƒÑ‚ÐµÐ½Ñ‚Ð¸Ñ„Ð¸ÐºÐ°Ñ†Ð¸Ð¸ Ð´Ð»Ñ %s:%s)" - #~ msgid "Wrong number of arguments." #~ msgstr "Ðеверное чиÑло аргументов." diff --git a/po/tl.po b/po/tl.po index 84e9d26e6..3c7cd792f 100644 --- a/po/tl.po +++ b/po/tl.po @@ -8,14 +8,14 @@ msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: FULL NAME \n" "POT-Creation-Date: 2011-02-21 10:03-0500\n" -"PO-Revision-Date: 2011-02-17 03:24+0000\n" -"Last-Translator: John Michael Baterna \n" +"PO-Revision-Date: 2011-08-23 11:21+0000\n" +"Last-Translator: Thierry Carrez \n" "Language-Team: Tagalog \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-08-03 04:44+0000\n" -"X-Generator: Launchpad (build 13573)\n" +"X-Launchpad-Export-Date: 2011-08-24 04:47+0000\n" +"X-Generator: Launchpad (build 13697)\n" #: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 #: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 @@ -2789,8 +2789,3 @@ msgstr "" #, python-format msgid "Removing user %(user)s from project %(project)s" msgstr "" - -#, python-format -#~ msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." -#~ msgstr "" -#~ "Hindi makita o maabot ang AMQP server sa %s:%d. Muling subukan sa %d segundo." diff --git a/po/uk.po b/po/uk.po index bcc53fed3..d040eaf69 100644 --- a/po/uk.po +++ b/po/uk.po @@ -8,14 +8,14 @@ msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: FULL NAME \n" "POT-Creation-Date: 2011-02-21 10:03-0500\n" -"PO-Revision-Date: 2011-02-03 22:02+0000\n" -"Last-Translator: Wladimir Rossinski \n" +"PO-Revision-Date: 2011-08-23 11:21+0000\n" +"Last-Translator: Thierry Carrez \n" "Language-Team: Ukrainian \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-08-03 04:44+0000\n" -"X-Generator: Launchpad (build 13573)\n" +"X-Launchpad-Export-Date: 2011-08-24 04:47+0000\n" +"X-Generator: Launchpad (build 13697)\n" #: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 #: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 @@ -2788,10 +2788,6 @@ msgstr "" msgid "Removing user %(user)s from project %(project)s" msgstr "" -#, python-format -#~ msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." -#~ msgstr "AMQP Ñервер %s:%d недоÑтупний. Спроба під'Ñ”Ð´Ð½Ð°Ð½Ð½Ñ Ñ‡ÐµÑ€ÐµÐ· %d Ñекунд." - #, python-format #~ msgid "Starting %s" #~ msgstr "ЗапуÑкаєтьÑÑ %s" @@ -2799,33 +2795,3 @@ msgstr "" #, python-format #~ msgid "Serving %s" #~ msgstr "ОбÑÐ»ÑƒÐ³Ð¾Ð²ÑƒÐ²Ð°Ð½Ð½Ñ %s" - -#, python-format -#~ msgid "Couldn't get IP, using 127.0.0.1 %s" -#~ msgstr "Ðе вдалоÑÑŒ отримати IP, викориÑтовуючи 127.0.0.1 %s" - -#, python-format -#~ msgid "Removing user %s from project %s" -#~ msgstr "Ð’Ð¸Ð»ÑƒÑ‡ÐµÐ½Ð½Ñ ÐºÐ¾Ñ€Ð¸Ñтувача %s з проекту %s" - -#, python-format -#~ msgid "Adding user %s to project %s" -#~ msgstr "Ð”Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð½Ñ ÐºÐ¾Ñ€Ð¸Ñтувача %s до проекту %s" - -#, python-format -#~ msgid "" -#~ "%s\n" -#~ "Command: %s\n" -#~ "Exit code: %s\n" -#~ "Stdout: %r\n" -#~ "Stderr: %r" -#~ msgstr "" -#~ "%s\n" -#~ "Команда: %s\n" -#~ "Код завершеннÑ: %s\n" -#~ "Stdout: %r\n" -#~ "Stderr: %r" - -#, python-format -#~ msgid "Getting from %s: %s" -#~ msgstr "ÐžÑ‚Ñ€Ð¸Ð¼Ð°Ð½Ð½Ñ Ð· %s: %s" -- cgit From 3058e5eeb5e1068b7b5f721b985c735bc5486c92 Mon Sep 17 00:00:00 2001 From: Brad Hall Date: Wed, 24 Aug 2011 02:11:56 -0700 Subject: Couple of fixes to the review feedback changes --- nova/network/quantum/client.py | 12 ++++++------ nova/network/quantum/manager.py | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/nova/network/quantum/client.py b/nova/network/quantum/client.py index 1927015c2..b457190c1 100644 --- a/nova/network/quantum/client.py +++ b/nova/network/quantum/client.py @@ -23,11 +23,11 @@ import urllib class JSONSerializer(object): -""" -This is a simple json-only serializer to use until we can just grab -the standard serializer from the quantum library. -TODO(danwent): replace serializer with quantum implementation -""" + """ + This is a simple json-only serializer to use until we can just grab + the standard serializer from the quantum library. + TODO(danwent): replace serializer with quantum implementation + """ def serialize(self, data, content_type): try: return json.dumps(data) @@ -172,7 +172,7 @@ class Client(object): httplib.CREATED, httplib.ACCEPTED, httplib.NO_CONTENT): - if not data: + if data is not None and len(data): return self.deserialize(data, status_code) else: raise Exception(_("Server returned error: %s" % res.read())) diff --git a/nova/network/quantum/manager.py b/nova/network/quantum/manager.py index 975598324..d29d1e06b 100644 --- a/nova/network/quantum/manager.py +++ b/nova/network/quantum/manager.py @@ -86,7 +86,7 @@ class QuantumManager(manager.FlatManager): if num_networks != 1: raise Exception(_("QuantumManager requires that only one" " network is created per call")) - q_tenant_id = kwargs.get("project_id", FLAGS.quantum_default_tenant_id) + q_tenant_id = kwargs["project_id"] or FLAGS.quantum_default_tenant_id quantum_net_id = uuid if quantum_net_id: if not self.q_conn.network_exists(q_tenant_id, quantum_net_id): @@ -179,7 +179,7 @@ class QuantumManager(manager.FlatManager): vif_rec) return self.get_instance_nw_info(context, instance_id, - instance_type_id, host) + instance_type_id, host) def get_instance_nw_info(self, context, instance_id, instance_type_id, host): -- cgit From 13544ec52d4f84032b7925b1dab00fbdc5ca0c79 Mon Sep 17 00:00:00 2001 From: Brad Hall Date: Wed, 24 Aug 2011 03:07:11 -0700 Subject: pep8 fixes --- nova/network/quantum/client.py | 4 ++-- nova/network/quantum/nova_ipam_lib.py | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/nova/network/quantum/client.py b/nova/network/quantum/client.py index b457190c1..82f23b6b5 100644 --- a/nova/network/quantum/client.py +++ b/nova/network/quantum/client.py @@ -76,8 +76,8 @@ class Client(object): attachment_path = "/networks/%s/ports/%s/attachment" def __init__(self, host="127.0.0.1", port=9696, use_ssl=False, tenant=None, - format="xml", testing_stub=None, key_file=None, cert_file=None, - logger=None): + format="xml", testing_stub=None, key_file=None, + cert_file=None, logger=None): """ Creates a new client to some service. diff --git a/nova/network/quantum/nova_ipam_lib.py b/nova/network/quantum/nova_ipam_lib.py index 71e723cb9..4f62887d1 100644 --- a/nova/network/quantum/nova_ipam_lib.py +++ b/nova/network/quantum/nova_ipam_lib.py @@ -81,7 +81,8 @@ class QuantumNovaIPAMLib(object): admin_context = context.elevated() network = db.network_get_by_cidr(admin_context, cidr) if not network: - raise Exception(_("No network with fixed_range = %s" % fixed_range)) + raise Exception(_("No network with fixed_range = %s" % + fixed_range)) return network['uuid'] def delete_subnets_by_net_id(self, context, net_id, project_id): -- cgit From 551b4b1b16c894551e5337663374a40aa46663d7 Mon Sep 17 00:00:00 2001 From: Brad Hall Date: Wed, 24 Aug 2011 03:14:39 -0700 Subject: Catch exception for instances that aren't there If an instance failed to spawn it will be in a shutdown state but the instance won't actually be there when we attempt to deallocate for the instance. For now we catch the exception and just log a message. Also made some minor formatting changes. --- nova/network/quantum/manager.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/nova/network/quantum/manager.py b/nova/network/quantum/manager.py index d29d1e06b..c03622218 100644 --- a/nova/network/quantum/manager.py +++ b/nova/network/quantum/manager.py @@ -287,17 +287,21 @@ class QuantumManager(manager.FlatManager): (net_id, port_id) = self.q_conn.get_port_by_attachment( q_tenant_id, interface_id) if not net_id: - LOG.error("Unable to find port with attachment: %s" % \ - (interface_id)) + LOG.error("Unable to find port with attachment: %s" % + (interface_id)) continue self.q_conn.detach_and_delete_port(q_tenant_id, - net_id, port_id) + net_id, port_id) self.ipam.deallocate_ips_by_vif(context, ipam_tenant_id, - net_id, vif_ref) + net_id, vif_ref) - db.virtual_interface_delete_by_instance(admin_context, - instance_id) + try: + db.virtual_interface_delete_by_instance(admin_context, + instance_id) + except exception.InstanceNotFound: + LOG.error(_("Attempted to deallocate non-existent instance: %s" % + (instance_id))) self._do_trigger_security_group_members_refresh_for_instance( instance_id) -- cgit From c173b6bef17d8efe64b43664cc39d81a1c31f3b8 Mon Sep 17 00:00:00 2001 From: Mark McLoughlin Date: Wed, 24 Aug 2011 15:53:23 +0100 Subject: Do not require --bridge_interface for FlatDHCPManager Unlike VlanManager, FlatDHCPManager actually works fine without a bridge interface on single host deployments. --- bin/nova-manage | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 2e0bd0ecb..bd42c24df 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -714,8 +714,7 @@ class NetworkCommands(object): bridge_interface = bridge_interface or FLAGS.flat_interface or \ FLAGS.vlan_interface if not bridge_interface: - interface_required = ['nova.network.manager.FlatDHCPManager', - 'nova.network.manager.VlanManager'] + interface_required = ['nova.network.manager.VlanManager'] if FLAGS.network_manager in interface_required: raise exception.NetworkNotCreated(req='--bridge_interface') -- cgit From 026efcd174cdb1b1d0fece9611dbae358de48387 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Wed, 24 Aug 2011 14:08:25 -0400 Subject: Updated FlavorsXMLSerialization tests to use etree and validation instead of minidom --- nova/api/openstack/schemas/v1.1/flavor.rng | 10 ++ nova/api/openstack/schemas/v1.1/flavors.rng | 6 + nova/api/openstack/schemas/v1.1/flavors_index.rng | 12 ++ nova/tests/api/openstack/test_flavors.py | 180 ++++++++++------------ 4 files changed, 107 insertions(+), 101 deletions(-) create mode 100644 nova/api/openstack/schemas/v1.1/flavor.rng create mode 100644 nova/api/openstack/schemas/v1.1/flavors.rng create mode 100644 nova/api/openstack/schemas/v1.1/flavors_index.rng diff --git a/nova/api/openstack/schemas/v1.1/flavor.rng b/nova/api/openstack/schemas/v1.1/flavor.rng new file mode 100644 index 000000000..a00e4e9ee --- /dev/null +++ b/nova/api/openstack/schemas/v1.1/flavor.rng @@ -0,0 +1,10 @@ + + + + + + + + + diff --git a/nova/api/openstack/schemas/v1.1/flavors.rng b/nova/api/openstack/schemas/v1.1/flavors.rng new file mode 100644 index 000000000..b7a3acc01 --- /dev/null +++ b/nova/api/openstack/schemas/v1.1/flavors.rng @@ -0,0 +1,6 @@ + + + + + diff --git a/nova/api/openstack/schemas/v1.1/flavors_index.rng b/nova/api/openstack/schemas/v1.1/flavors_index.rng new file mode 100644 index 000000000..d1a4fedb1 --- /dev/null +++ b/nova/api/openstack/schemas/v1.1/flavors_index.rng @@ -0,0 +1,12 @@ + + + + + + + + + + + diff --git a/nova/tests/api/openstack/test_flavors.py b/nova/tests/api/openstack/test_flavors.py index 812bece42..ea5038b39 100644 --- a/nova/tests/api/openstack/test_flavors.py +++ b/nova/tests/api/openstack/test_flavors.py @@ -17,16 +17,21 @@ import json import webob -import xml.dom.minidom as minidom +from lxml import etree from nova.api.openstack import flavors import nova.db.api from nova import exception from nova import test +from nova.api.openstack import xmlutil from nova.tests.api.openstack import fakes from nova import wsgi +NS = "{http://docs.openstack.org/compute/api/v1.1}" +ATOMNS = "{http://www.w3.org/2005/Atom}" + + def stub_flavor(flavorid, name, memory_mb="256", local_gb="10"): return { "flavorid": str(flavorid), @@ -265,7 +270,7 @@ class FlavorsXMLSerializationTest(test.TestCase): def test_show(self): serializer = flavors.FlavorXMLSerializer() - input = { + fixture = { "flavor": { "id": "12", "name": "asdf", @@ -284,29 +289,25 @@ class FlavorsXMLSerializationTest(test.TestCase): }, } - output = serializer.serialize(input, 'show') - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(""" - - - - - """.replace(" ", "")) - - self.assertEqual(expected.toxml(), actual.toxml()) + output = serializer.serialize(fixture, 'show') + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'flavor') + flavor_dict = fixture['flavor'] + + for key in ['name', 'id', 'ram', 'disk']: + self.assertEqual(root.get(key), str(flavor_dict[key])) + + link_nodes = root.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 2) + for i, link in enumerate(flavor_dict['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) def test_show_handles_integers(self): serializer = flavors.FlavorXMLSerializer() - input = { + fixture = { "flavor": { "id": 12, "name": "asdf", @@ -325,29 +326,25 @@ class FlavorsXMLSerializationTest(test.TestCase): }, } - output = serializer.serialize(input, 'show') - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(""" - - - - - """.replace(" ", "")) - - self.assertEqual(expected.toxml(), actual.toxml()) + output = serializer.serialize(fixture, 'show') + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'flavor') + flavor_dict = fixture['flavor'] + + for key in ['name', 'id', 'ram', 'disk']: + self.assertEqual(root.get(key), str(flavor_dict[key])) + + link_nodes = root.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 2) + for i, link in enumerate(flavor_dict['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) def test_detail(self): serializer = flavors.FlavorXMLSerializer() - input = { + fixture = { "flavors": [ { "id": "23", @@ -383,39 +380,28 @@ class FlavorsXMLSerializationTest(test.TestCase): ], } - output = serializer.serialize(input, 'detail') - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(""" - - - - - - - - - - - """.replace(" ", "") % locals()) - - self.assertEqual(expected.toxml(), actual.toxml()) + output = serializer.serialize(fixture, 'detail') + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'flavors') + flavor_elems = root.findall('{0}flavor'.format(NS)) + self.assertEqual(len(flavor_elems), 2) + for i, flavor_elem in enumerate(flavor_elems): + flavor_dict = fixture['flavors'][i] + + for key in ['name', 'id', 'ram', 'disk']: + self.assertEqual(flavor_elem.get(key), str(flavor_dict[key])) + + link_nodes = flavor_elem.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 2) + for i, link in enumerate(flavor_dict['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) def test_index(self): serializer = flavors.FlavorXMLSerializer() - input = { + fixture = { "flavors": [ { "id": "23", @@ -451,42 +437,34 @@ class FlavorsXMLSerializationTest(test.TestCase): ], } - output = serializer.serialize(input, 'index') - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(""" - - - - - - - - - - - """.replace(" ", "") % locals()) - - self.assertEqual(expected.toxml(), actual.toxml()) + output = serializer.serialize(fixture, 'index') + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'flavors_index') + flavor_elems = root.findall('{0}flavor'.format(NS)) + self.assertEqual(len(flavor_elems), 2) + for i, flavor_elem in enumerate(flavor_elems): + flavor_dict = fixture['flavors'][i] + + for key in ['name', 'id']: + self.assertEqual(flavor_elem.get(key), str(flavor_dict[key])) + + link_nodes = flavor_elem.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 2) + for i, link in enumerate(flavor_dict['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) def test_index_empty(self): serializer = flavors.FlavorXMLSerializer() - input = { + fixture = { "flavors": [], } - output = serializer.serialize(input, 'index') - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(""" - - """.replace(" ", "") % locals()) - - self.assertEqual(expected.toxml(), actual.toxml()) + output = serializer.serialize(fixture, 'index') + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'flavors_index') + flavor_elems = root.findall('{0}flavor'.format(NS)) + self.assertEqual(len(flavor_elems), 0) -- cgit From 3d17d22926e2f589648fdac26302a58e8c4e9069 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 24 Aug 2011 11:36:41 -0700 Subject: fix more tests --- nova/tests/api/openstack/contrib/test_createserverext.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nova/tests/api/openstack/contrib/test_createserverext.py b/nova/tests/api/openstack/contrib/test_createserverext.py index e5eed14fe..01c7587e3 100644 --- a/nova/tests/api/openstack/contrib/test_createserverext.py +++ b/nova/tests/api/openstack/contrib/test_createserverext.py @@ -89,6 +89,9 @@ class CreateserverextTest(test.TestCase): self.networks = None return [{'id': '1234', 'display_name': 'fakeinstance', 'uuid': FAKE_UUID, + 'user_id': 'fake', + 'project_id': 'fake', + 'display_description': 'fakedescription', 'created_at': "", 'updated_at': ""}] -- cgit From ba0bc2830e3a67617a0199a2a5079f5dfd3b22af Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Wed, 24 Aug 2011 15:07:57 -0400 Subject: Updated flavors xml serialization to use lxml instead of minidom --- nova/api/openstack/flavors.py | 70 ++++++++++++++++++++++--------------------- 1 file changed, 36 insertions(+), 34 deletions(-) diff --git a/nova/api/openstack/flavors.py b/nova/api/openstack/flavors.py index fd36060da..f689aa7ab 100644 --- a/nova/api/openstack/flavors.py +++ b/nova/api/openstack/flavors.py @@ -16,12 +16,13 @@ # under the License. import webob -import xml.dom.minidom as minidom +from lxml import etree from nova import db from nova import exception from nova.api.openstack import views from nova.api.openstack import wsgi +from nova.api.openstack import xmlutil class Controller(object): @@ -78,48 +79,49 @@ class ControllerV11(Controller): class FlavorXMLSerializer(wsgi.XMLDictSerializer): + NSMAP = {None: xmlutil.XMLNS_V11, 'atom': xmlutil.XMLNS_ATOM} + def __init__(self): super(FlavorXMLSerializer, self).__init__(xmlns=wsgi.XMLNS_V11) - def _flavor_to_xml(self, xml_doc, flavor, detailed): - flavor_node = xml_doc.createElement('flavor') - flavor_node.setAttribute('id', str(flavor['id'])) - flavor_node.setAttribute('name', flavor['name']) + def _populate_flavor(self, flavor_elem, flavor_dict, detailed=False): + """Populate a flavor xml element from a dict.""" + flavor_elem.set('name', flavor_dict['name']) + flavor_elem.set('id', str(flavor_dict['id'])) if detailed: - flavor_node.setAttribute('ram', str(flavor['ram'])) - flavor_node.setAttribute('disk', str(flavor['disk'])) - - link_nodes = self._create_link_nodes(xml_doc, flavor['links']) - for link_node in link_nodes: - flavor_node.appendChild(link_node) - return flavor_node + flavor_elem.set('ram', str(flavor_dict['ram'])) + flavor_elem.set('disk', str(flavor_dict['disk'])) + for link in flavor_dict.get('links', []): + elem = etree.SubElement(flavor_elem, + '{%s}link' % xmlutil.XMLNS_ATOM) + elem.set('rel', link['rel']) + elem.set('href', link['href']) + return flavor_elem - def _flavors_list_to_xml(self, xml_doc, flavors, detailed): - container_node = xml_doc.createElement('flavors') + def _to_xml(self, root): + """Convert the xml object to an xml string.""" - for flavor in flavors: - item_node = self._flavor_to_xml(xml_doc, flavor, detailed) - container_node.appendChild(item_node) - return container_node + return etree.tostring(root, encoding='UTF-8') def show(self, flavor_container): - xml_doc = minidom.Document() - flavor = flavor_container['flavor'] - node = self._flavor_to_xml(xml_doc, flavor, True) - return self.to_xml_string(node, True) - - def detail(self, flavors_container): - xml_doc = minidom.Document() - flavors = flavors_container['flavors'] - node = self._flavors_list_to_xml(xml_doc, flavors, True) - return self.to_xml_string(node, True) - - def index(self, flavors_container): - xml_doc = minidom.Document() - flavors = flavors_container['flavors'] - node = self._flavors_list_to_xml(xml_doc, flavors, False) - return self.to_xml_string(node, True) + flavor = etree.Element('flavor', nsmap=self.NSMAP) + self._populate_flavor(flavor, flavor_container['flavor'], True) + return self._to_xml(flavor) + + def detail(self, flavors_dict): + flavors = etree.Element('flavors', nsmap=self.NSMAP) + for flavor_dict in flavors_dict['flavors']: + flavor = etree.SubElement(flavors, 'flavor') + self._populate_flavor(flavor, flavor_dict, True) + return self._to_xml(flavors) + + def index(self, flavors_dict): + flavors = etree.Element('flavors', nsmap=self.NSMAP) + for flavor_dict in flavors_dict['flavors']: + flavor = etree.SubElement(flavors, 'flavor') + self._populate_flavor(flavor, flavor_dict, False) + return self._to_xml(flavors) def create_resource(version='1.0'): -- cgit From 0571c86d18c242f46e44e380b257cfc40598d31b Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 24 Aug 2011 12:17:58 -0700 Subject: use dict.get for user_id, project_id, and display_description in servers view as suggested by ed leaf, so that not all tests require these fields --- nova/api/openstack/views/servers.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index fd7c040d4..6fd09aae6 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -65,9 +65,9 @@ class ViewBuilder(object): inst_dict = { 'id': inst['id'], 'name': inst['display_name'], - 'user_id': inst['user_id'], - 'tenant_id': inst['project_id'], - 'description': inst['display_description'], + 'user_id': inst.get('user_id', ''), + 'tenant_id': inst.get('project_id', ''), + 'description': inst.get('display_description', ''), 'status': common.status_from_power_state(inst.get('state'))} ctxt = nova.context.get_admin_context() -- cgit From 08981ee7228aa0e4b68ec8e9016ef68b987a3ac3 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Wed, 24 Aug 2011 16:41:26 -0400 Subject: Updated ImageXMLSerialization tests to use etree instead of minidom Fixed incorrect server entity ids in tests --- nova/api/openstack/images.py | 5 +- nova/tests/api/openstack/test_images.py | 347 +++++++++++++++----------------- 2 files changed, 164 insertions(+), 188 deletions(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 1c8fc10c9..59184d81a 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -230,9 +230,8 @@ class ImageXMLSerializer(wsgi.XMLDictSerializer): image_node.appendChild(server_node) metadata = image.get('metadata', {}).items() - if len(metadata) > 0: - metadata_node = self._create_metadata_node(xml_doc, metadata) - image_node.appendChild(metadata_node) + metadata_node = self._create_metadata_node(xml_doc, metadata) + image_node.appendChild(metadata_node) link_nodes = self._create_link_nodes(xml_doc, image['links']) diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 2a7cfc382..145cf527c 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -23,6 +23,7 @@ and as a WSGI layer import copy import json import os +from lxml import etree import shutil import tempfile import xml.dom.minidom as minidom @@ -38,9 +39,14 @@ from nova import test from nova import utils import nova.api.openstack from nova.api.openstack import images +from nova.api.openstack import xmlutil from nova.tests.api.openstack import fakes +NS = "{http://docs.openstack.org/compute/api/v1.1}" +ATOMNS = "{http://www.w3.org/2005/Atom}" + + class _BaseImageServiceTests(test.TestCase): """Tasks to test for all image services""" @@ -1144,7 +1150,7 @@ class ImageXMLSerializationTest(test.TestCase): 'status': 'ACTIVE', 'progress': 80, 'server': { - 'id': 1, + 'id': '1', 'links': [ { 'href': self.SERVER_HREF, @@ -1173,37 +1179,35 @@ class ImageXMLSerializationTest(test.TestCase): } output = serializer.serialize(fixture, 'show') - actual = minidom.parseString(output.replace(" ", "")) - - expected_server_href = self.SERVER_HREF - expected_server_bookmark = self.SERVER_BOOKMARK - expected_href = self.IMAGE_HREF % 1 - expected_bookmark = self.IMAGE_BOOKMARK % 1 - expected_now = self.TIMESTAMP - expected = minidom.parseString(""" - - - - - - - - value1 - - - - - - """.replace(" ", "") % (locals())) - - self.assertEqual(expected.toxml(), actual.toxml()) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'image') + image_dict = fixture['image'] + + for key in ['name', 'id', 'updated', 'created', 'status', 'progress']: + self.assertEqual(root.get(key), str(image_dict[key])) + + link_nodes = root.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 2) + for i, link in enumerate(image_dict['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) + + metadata_root = root.find('{0}metadata'.format(NS)) + metadata_elems = metadata_root.findall('{0}meta'.format(NS)) + self.assertEqual(len(metadata_elems), 1) + for i, metadata_elem in enumerate(metadata_elems): + (meta_key, meta_value) = image_dict['metadata'].items()[i] + self.assertEqual(str(metadata_elem.get('key')), str(meta_key)) + self.assertEqual(str(metadata_elem.text).strip(), str(meta_value)) + + server_root = root.find('{0}server'.format(NS)) + self.assertEqual(server_root.get('id'), image_dict['server']['id']) + link_nodes = server_root.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 2) + for i, link in enumerate(image_dict['server']['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) def test_show_zero_metadata(self): serializer = images.ImageXMLSerializer() @@ -1216,7 +1220,7 @@ class ImageXMLSerializationTest(test.TestCase): 'updated': self.TIMESTAMP, 'status': 'ACTIVE', 'server': { - 'id': 1, + 'id': '1', 'links': [ { 'href': self.SERVER_HREF, @@ -1243,31 +1247,31 @@ class ImageXMLSerializationTest(test.TestCase): } output = serializer.serialize(fixture, 'show') - actual = minidom.parseString(output.replace(" ", "")) - - expected_server_href = self.SERVER_HREF - expected_server_bookmark = self.SERVER_BOOKMARK - expected_href = self.IMAGE_HREF % 1 - expected_bookmark = self.IMAGE_BOOKMARK % 1 - expected_now = self.TIMESTAMP - expected = minidom.parseString(""" - - - - - - - - - """.replace(" ", "") % (locals())) - - self.assertEqual(expected.toxml(), actual.toxml()) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'image') + image_dict = fixture['image'] + + for key in ['name', 'id', 'updated', 'created', 'status']: + self.assertEqual(root.get(key), str(image_dict[key])) + + link_nodes = root.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 2) + for i, link in enumerate(image_dict['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) + + metadata_root = root.find('{0}metadata'.format(NS)) + meta_nodes = root.findall('{0}meta'.format(ATOMNS)) + self.assertEqual(len(meta_nodes), 0) + + server_root = root.find('{0}server'.format(NS)) + self.assertEqual(server_root.get('id'), image_dict['server']['id']) + link_nodes = server_root.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 2) + for i, link in enumerate(image_dict['server']['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) def test_show_image_no_metadata_key(self): serializer = images.ImageXMLSerializer() @@ -1280,7 +1284,7 @@ class ImageXMLSerializationTest(test.TestCase): 'updated': self.TIMESTAMP, 'status': 'ACTIVE', 'server': { - 'id': 1, + 'id': '1', 'links': [ { 'href': self.SERVER_HREF, @@ -1306,31 +1310,31 @@ class ImageXMLSerializationTest(test.TestCase): } output = serializer.serialize(fixture, 'show') - actual = minidom.parseString(output.replace(" ", "")) - - expected_server_href = self.SERVER_HREF - expected_server_bookmark = self.SERVER_BOOKMARK - expected_href = self.IMAGE_HREF % 1 - expected_bookmark = self.IMAGE_BOOKMARK % 1 - expected_now = self.TIMESTAMP - expected = minidom.parseString(""" - - - - - - - - - """.replace(" ", "") % (locals())) - - self.assertEqual(expected.toxml(), actual.toxml()) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'image') + image_dict = fixture['image'] + + for key in ['name', 'id', 'updated', 'created', 'status']: + self.assertEqual(root.get(key), str(image_dict[key])) + + link_nodes = root.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 2) + for i, link in enumerate(image_dict['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) + + metadata_root = root.find('{0}metadata'.format(NS)) + meta_nodes = root.findall('{0}meta'.format(ATOMNS)) + self.assertEqual(len(meta_nodes), 0) + + server_root = root.find('{0}server'.format(NS)) + self.assertEqual(server_root.get('id'), image_dict['server']['id']) + link_nodes = server_root.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 2) + for i, link in enumerate(image_dict['server']['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) def test_show_no_server(self): serializer = images.ImageXMLSerializer() @@ -1359,30 +1363,30 @@ class ImageXMLSerializationTest(test.TestCase): } output = serializer.serialize(fixture, 'show') - actual = minidom.parseString(output.replace(" ", "")) - - expected_href = self.IMAGE_HREF % 1 - expected_bookmark = self.IMAGE_BOOKMARK % 1 - expected_now = self.TIMESTAMP - expected = minidom.parseString(""" - - - - value1 - - - - - - """.replace(" ", "") % (locals())) - - self.assertEqual(expected.toxml(), actual.toxml()) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'image') + image_dict = fixture['image'] + + for key in ['name', 'id', 'updated', 'created', 'status']: + self.assertEqual(root.get(key), str(image_dict[key])) + + link_nodes = root.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 2) + for i, link in enumerate(image_dict['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) + + metadata_root = root.find('{0}metadata'.format(NS)) + metadata_elems = metadata_root.findall('{0}meta'.format(NS)) + self.assertEqual(len(metadata_elems), 1) + for i, metadata_elem in enumerate(metadata_elems): + (meta_key, meta_value) = image_dict['metadata'].items()[i] + self.assertEqual(str(metadata_elem.get('key')), str(meta_key)) + self.assertEqual(str(metadata_elem.text).strip(), str(meta_value)) + + server_root = root.find('{0}server'.format(NS)) + self.assertEqual(server_root, None) def test_index(self): serializer = images.ImageXMLSerializer() @@ -1397,6 +1401,10 @@ class ImageXMLSerializationTest(test.TestCase): 'href': self.IMAGE_HREF % 1, 'rel': 'self', }, + { + 'href': self.IMAGE_BOOKMARK % 1, + 'rel': 'bookmark', + }, ], }, { @@ -1407,35 +1415,32 @@ class ImageXMLSerializationTest(test.TestCase): 'href': self.IMAGE_HREF % 2, 'rel': 'self', }, + { + 'href': self.IMAGE_BOOKMARK % 2, + 'rel': 'bookmark', + }, ], }, ] } output = serializer.serialize(fixture, 'index') - actual = minidom.parseString(output.replace(" ", "")) - - expected_server_href = self.SERVER_HREF - expected_server_bookmark = self.SERVER_BOOKMARK - expected_href = self.IMAGE_HREF % 1 - expected_bookmark = self.IMAGE_BOOKMARK % 1 - expected_href_two = self.IMAGE_HREF % 2 - expected_bookmark_two = self.IMAGE_BOOKMARK % 2 - expected_now = self.TIMESTAMP - expected = minidom.parseString(""" - - - - - - - - - """.replace(" ", "") % (locals())) - - self.assertEqual(expected.toxml(), actual.toxml()) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'images_index') + image_elems = root.findall('{0}image'.format(NS)) + self.assertEqual(len(image_elems), 2) + for i, image_elem in enumerate(image_elems): + image_dict = fixture['images'][i] + + for key in ['name', 'id']: + self.assertEqual(image_elem.get(key), str(image_dict[key])) + + link_nodes = image_elem.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 2) + for i, link in enumerate(image_dict['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) def test_index_zero_images(self): serializer = images.ImageXMLSerializer() @@ -1445,15 +1450,11 @@ class ImageXMLSerializationTest(test.TestCase): } output = serializer.serialize(fixtures, 'index') - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(""" - - """.replace(" ", "") % (locals())) - - self.assertEqual(expected.toxml(), actual.toxml()) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'images_index') + image_elems = root.findall('{0}image'.format(NS)) + self.assertEqual(len(image_elems), 0) def test_detail(self): serializer = images.ImageXMLSerializer() @@ -1467,7 +1468,7 @@ class ImageXMLSerializationTest(test.TestCase): 'updated': self.TIMESTAMP, 'status': 'ACTIVE', 'server': { - 'id': 1, + 'id': '1', 'links': [ { 'href': self.SERVER_HREF, @@ -1491,7 +1492,7 @@ class ImageXMLSerializationTest(test.TestCase): ], }, { - 'id': 2, + 'id': '2', 'name': 'Image2', 'created': self.TIMESTAMP, 'updated': self.TIMESTAMP, @@ -1515,46 +1516,22 @@ class ImageXMLSerializationTest(test.TestCase): } output = serializer.serialize(fixture, 'detail') - actual = minidom.parseString(output.replace(" ", "")) - - expected_server_href = self.SERVER_HREF - expected_server_bookmark = self.SERVER_BOOKMARK - expected_href = self.IMAGE_HREF % 1 - expected_bookmark = self.IMAGE_BOOKMARK % 1 - expected_href_two = self.IMAGE_HREF % 2 - expected_bookmark_two = self.IMAGE_BOOKMARK % 2 - expected_now = self.TIMESTAMP - expected = minidom.parseString(""" - - - - - - - - - - - - - value1 - - - - - - - """.replace(" ", "") % (locals())) - - self.assertEqual(expected.toxml(), actual.toxml()) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'images') + image_elems = root.findall('{0}image'.format(NS)) + self.assertEqual(len(image_elems), 2) + for i, image_elem in enumerate(image_elems): + image_dict = fixture['images'][i] + + for key in ['name', 'id', 'updated', 'created', 'status']: + self.assertEqual(image_elem.get(key), str(image_dict[key])) + + link_nodes = image_elem.findall('{0}link'.format(ATOMNS)) + self.assertEqual(len(link_nodes), 2) + for i, link in enumerate(image_dict['links']): + for key, value in link.items(): + self.assertEqual(link_nodes[i].get(key), value) + + metadata_root = image_elem.find('{0}metadata'.format(NS)) + metadata_elems = metadata_root.findall('{0}meta'.format(NS)) -- cgit From f1ccdc547d083ffe4c5d03f26f2658d98bc21541 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Thu, 25 Aug 2011 11:24:25 -0400 Subject: Updated ImagesXMLSerializer to use etree instead of minidom --- nova/api/openstack/images.py | 145 +++++++++++++++++++------------------------ 1 file changed, 65 insertions(+), 80 deletions(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 59184d81a..edd26c171 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -16,8 +16,8 @@ import urlparse import os.path +from lxml import etree import webob.exc -from xml.dom import minidom from nova import compute from nova import exception @@ -29,6 +29,7 @@ from nova.api.openstack import image_metadata from nova.api.openstack import servers from nova.api.openstack.views import images as images_view from nova.api.openstack import wsgi +from nova.api.openstack import xmlutil LOG = log.getLogger('nova.api.openstack.images') @@ -206,92 +207,76 @@ class ControllerV11(Controller): class ImageXMLSerializer(wsgi.XMLDictSerializer): - xmlns = wsgi.XMLNS_V11 - - def __init__(self): - self.metadata_serializer = common.MetadataXMLSerializer() - - def _image_to_xml(self, xml_doc, image): - image_node = xml_doc.createElement('image') - image_node.setAttribute('id', str(image['id'])) - image_node.setAttribute('name', image['name']) - link_nodes = self._create_link_nodes(xml_doc, - image['links']) - for link_node in link_nodes: - image_node.appendChild(link_node) - return image_node - - def _image_to_xml_detailed(self, xml_doc, image): - image_node = xml_doc.createElement('image') - self._add_image_attributes(image_node, image) - - if 'server' in image: - server_node = self._create_server_node(xml_doc, image['server']) - image_node.appendChild(server_node) - - metadata = image.get('metadata', {}).items() - metadata_node = self._create_metadata_node(xml_doc, metadata) - image_node.appendChild(metadata_node) - - link_nodes = self._create_link_nodes(xml_doc, - image['links']) - for link_node in link_nodes: - image_node.appendChild(link_node) - - return image_node - - def _add_image_attributes(self, node, image): - node.setAttribute('id', str(image['id'])) - node.setAttribute('name', image['name']) - node.setAttribute('created', image['created']) - node.setAttribute('updated', image['updated']) - node.setAttribute('status', image['status']) - if 'progress' in image: - node.setAttribute('progress', str(image['progress'])) - - def _create_metadata_node(self, xml_doc, metadata): - return self.metadata_serializer.meta_list_to_xml(xml_doc, metadata) - - def _create_server_node(self, xml_doc, server): - server_node = xml_doc.createElement('server') - server_node.setAttribute('id', str(server['id'])) - link_nodes = self._create_link_nodes(xml_doc, - server['links']) - for link_node in link_nodes: - server_node.appendChild(link_node) - return server_node - - def _image_list_to_xml(self, xml_doc, images, detailed): - container_node = xml_doc.createElement('images') - if detailed: - image_to_xml = self._image_to_xml_detailed - else: - image_to_xml = self._image_to_xml + NSMAP = {None: xmlutil.XMLNS_V11, 'atom': xmlutil.XMLNS_ATOM} + + def _create_metadata_node(self, metadata_dict): + metadata_elem = etree.Element('metadata', nsmap=self.NSMAP) + for (key, value) in metadata_dict.items(): + elem = etree.SubElement(metadata_elem, 'meta') + elem.set('key', key) + elem.text = value + + return metadata_elem - for image in images: - item_node = image_to_xml(xml_doc, image) - container_node.appendChild(item_node) - return container_node + def _create_server_node(self, server_dict): + server_elem = etree.Element('server', nsmap=self.NSMAP) + server_elem.set('id', str(server_dict['id'])) + for link in server_dict.get('links', []): + elem = etree.SubElement(server_elem, + '{%s}link' % xmlutil.XMLNS_ATOM) + elem.set('rel', link['rel']) + elem.set('href', link['href']) + return server_elem + + def _populate_image(self, image_elem, image_dict, detailed=False): + """Populate an image xml element from a dict.""" + + image_elem.set('name', image_dict['name']) + image_elem.set('id', str(image_dict['id'])) + if detailed: + image_elem.set('updated', str(image_dict['updated'])) + image_elem.set('created', str(image_dict['created'])) + image_elem.set('status', str(image_dict['status'])) + if 'progress' in image_dict: + image_elem.set('progress', str(image_dict['progress'])) + if 'server' in image_dict: + server_elem = self._create_server_node(image_dict['server']) + image_elem.append(server_elem) + + meta_elem = self._create_metadata_node( + image_dict.get('metadata', {})) + image_elem.append(meta_elem) + + for link in image_dict.get('links', []): + elem = etree.SubElement(image_elem, + '{%s}link' % xmlutil.XMLNS_ATOM) + elem.set('rel', link['rel']) + elem.set('href', link['href']) + return image_elem + + def _to_xml(self, root): + """Convert the xml object to an xml string.""" + + return etree.tostring(root, encoding='UTF-8') def index(self, images_dict): - xml_doc = minidom.Document() - node = self._image_list_to_xml(xml_doc, - images_dict['images'], - detailed=False) - return self.to_xml_string(node, True) + images = etree.Element('images', nsmap=self.NSMAP) + for image_dict in images_dict['images']: + image = etree.SubElement(images, 'image') + self._populate_image(image, image_dict, False) + return self._to_xml(images) def detail(self, images_dict): - xml_doc = minidom.Document() - node = self._image_list_to_xml(xml_doc, - images_dict['images'], - detailed=True) - return self.to_xml_string(node, True) + images = etree.Element('images', nsmap=self.NSMAP) + for image_dict in images_dict['images']: + image = etree.SubElement(images, 'image') + self._populate_image(image, image_dict, True) + return self._to_xml(images) def show(self, image_dict): - xml_doc = minidom.Document() - node = self._image_to_xml_detailed(xml_doc, - image_dict['image']) - return self.to_xml_string(node, True) + image = etree.Element('image', nsmap=self.NSMAP) + self._populate_image(image, image_dict['image'], True) + return self._to_xml(image) def create_resource(version='1.0'): -- cgit From 48b2de98d002a3f7dac03d29696e4c37ed4f983f Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Thu, 25 Aug 2011 12:05:53 -0400 Subject: Added schemas Updated metadata tests to use etree instead of minidom --- nova/api/openstack/schemas/v1.1/image.rng | 30 ++++++ nova/api/openstack/schemas/v1.1/images.rng | 6 ++ nova/api/openstack/schemas/v1.1/images_index.rng | 12 +++ nova/api/openstack/schemas/v1.1/metadata.rng | 9 ++ nova/tests/api/openstack/test_common.py | 122 +++++++++++++---------- 5 files changed, 125 insertions(+), 54 deletions(-) create mode 100644 nova/api/openstack/schemas/v1.1/image.rng create mode 100644 nova/api/openstack/schemas/v1.1/images.rng create mode 100644 nova/api/openstack/schemas/v1.1/images_index.rng create mode 100644 nova/api/openstack/schemas/v1.1/metadata.rng diff --git a/nova/api/openstack/schemas/v1.1/image.rng b/nova/api/openstack/schemas/v1.1/image.rng new file mode 100644 index 000000000..887f76751 --- /dev/null +++ b/nova/api/openstack/schemas/v1.1/image.rng @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/nova/api/openstack/schemas/v1.1/images.rng b/nova/api/openstack/schemas/v1.1/images.rng new file mode 100644 index 000000000..064d4d9cc --- /dev/null +++ b/nova/api/openstack/schemas/v1.1/images.rng @@ -0,0 +1,6 @@ + + + + + diff --git a/nova/api/openstack/schemas/v1.1/images_index.rng b/nova/api/openstack/schemas/v1.1/images_index.rng new file mode 100644 index 000000000..81af19cb5 --- /dev/null +++ b/nova/api/openstack/schemas/v1.1/images_index.rng @@ -0,0 +1,12 @@ + + + + + + + + + + + diff --git a/nova/api/openstack/schemas/v1.1/metadata.rng b/nova/api/openstack/schemas/v1.1/metadata.rng new file mode 100644 index 000000000..b2f5d702a --- /dev/null +++ b/nova/api/openstack/schemas/v1.1/metadata.rng @@ -0,0 +1,9 @@ + + + + + + + + diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py index b422bc4d1..2e5bf08fa 100644 --- a/nova/tests/api/openstack/test_common.py +++ b/nova/tests/api/openstack/test_common.py @@ -19,6 +19,7 @@ Test suites for 'common' code used throughout the OpenStack HTTP API. """ +from lxml import etree import webob.exc import xml.dom.minidom as minidom @@ -26,6 +27,11 @@ from webob import Request from nova import test from nova.api.openstack import common +from nova.api.openstack import xmlutil + + +NS = "{http://docs.openstack.org/compute/api/v1.1}" +ATOMNS = "{http://www.w3.org/2005/Atom}" class LimiterTest(test.TestCase): @@ -323,16 +329,16 @@ class MetadataXMLSerializationTest(test.TestCase): }, } output = serializer.serialize(fixture, 'index') - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(""" - - four - two - - """.replace(" ", "").replace("\n", "")) - - self.assertEqual(expected.toxml(), actual.toxml()) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'metadata') + metadata_dict = fixture['metadata'] + metadata_elems = root.findall('{0}meta'.format(NS)) + self.assertEqual(len(metadata_elems), 2) + for i, metadata_elem in enumerate(metadata_elems): + (meta_key, meta_value) = metadata_dict.items()[i] + self.assertEqual(str(metadata_elem.get('key')), str(meta_key)) + self.assertEqual(str(metadata_elem.text).strip(), str(meta_value)) def test_index_null(self): serializer = common.MetadataXMLSerializer() @@ -342,15 +348,16 @@ class MetadataXMLSerializationTest(test.TestCase): }, } output = serializer.serialize(fixture, 'index') - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(""" - - None - - """.replace(" ", "").replace("\n", "")) - - self.assertEqual(expected.toxml(), actual.toxml()) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'metadata') + metadata_dict = fixture['metadata'] + metadata_elems = root.findall('{0}meta'.format(NS)) + self.assertEqual(len(metadata_elems), 1) + for i, metadata_elem in enumerate(metadata_elems): + (meta_key, meta_value) = metadata_dict.items()[i] + self.assertEqual(str(metadata_elem.get('key')), str(meta_key)) + self.assertEqual(str(metadata_elem.text).strip(), str(meta_value)) def test_index_unicode(self): serializer = common.MetadataXMLSerializer() @@ -360,15 +367,16 @@ class MetadataXMLSerializationTest(test.TestCase): }, } output = serializer.serialize(fixture, 'index') - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(u""" - - Jos\xe9 - - """.encode("UTF-8").replace(" ", "").replace("\n", "")) - - self.assertEqual(expected.toxml(), actual.toxml()) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'metadata') + metadata_dict = fixture['metadata'] + metadata_elems = root.findall('{0}meta'.format(NS)) + self.assertEqual(len(metadata_elems), 1) + for i, metadata_elem in enumerate(metadata_elems): + (meta_key, meta_value) = metadata_dict.items()[i] + self.assertEqual(str(metadata_elem.get('key')), str(meta_key)) + self.assertEqual(metadata_elem.text.strip(), meta_value) def test_show(self): serializer = common.MetadataXMLSerializer() @@ -378,14 +386,12 @@ class MetadataXMLSerializationTest(test.TestCase): }, } output = serializer.serialize(fixture, 'show') - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(""" - two - """.replace(" ", "").replace("\n", "")) - - self.assertEqual(expected.toxml(), actual.toxml()) + print output + root = etree.XML(output) + meta_dict = fixture['meta'] + (meta_key, meta_value) = meta_dict.items()[0] + self.assertEqual(str(root.get('key')), str(meta_key)) + self.assertEqual(root.text.strip(), meta_value) def test_update_all(self): serializer = common.MetadataXMLSerializer() @@ -396,16 +402,16 @@ class MetadataXMLSerializationTest(test.TestCase): }, } output = serializer.serialize(fixture, 'update_all') - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(""" - - value6 - value4 - - """.replace(" ", "").replace("\n", "")) - - self.assertEqual(expected.toxml(), actual.toxml()) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'metadata') + metadata_dict = fixture['metadata'] + metadata_elems = root.findall('{0}meta'.format(NS)) + self.assertEqual(len(metadata_elems), 2) + for i, metadata_elem in enumerate(metadata_elems): + (meta_key, meta_value) = metadata_dict.items()[i] + self.assertEqual(str(metadata_elem.get('key')), str(meta_key)) + self.assertEqual(str(metadata_elem.text).strip(), str(meta_value)) def test_update_item(self): serializer = common.MetadataXMLSerializer() @@ -415,14 +421,12 @@ class MetadataXMLSerializationTest(test.TestCase): }, } output = serializer.serialize(fixture, 'update') - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(""" - two - """.replace(" ", "").replace("\n", "")) - - self.assertEqual(expected.toxml(), actual.toxml()) + print output + root = etree.XML(output) + meta_dict = fixture['meta'] + (meta_key, meta_value) = meta_dict.items()[0] + self.assertEqual(str(root.get('key')), str(meta_key)) + self.assertEqual(root.text.strip(), meta_value) def test_create(self): serializer = common.MetadataXMLSerializer() @@ -434,6 +438,16 @@ class MetadataXMLSerializationTest(test.TestCase): }, } output = serializer.serialize(fixture, 'create') + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'metadata') + metadata_dict = fixture['metadata'] + metadata_elems = root.findall('{0}meta'.format(NS)) + self.assertEqual(len(metadata_elems), 3) + for i, metadata_elem in enumerate(metadata_elems): + (meta_key, meta_value) = metadata_dict.items()[i] + self.assertEqual(str(metadata_elem.get('key')), str(meta_key)) + self.assertEqual(str(metadata_elem.text).strip(), str(meta_value)) actual = minidom.parseString(output.replace(" ", "")) expected = minidom.parseString(""" -- cgit From e0b64c9aa0d2617d1d9e4dc0c35dc3899e0e527a Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Thu, 25 Aug 2011 12:44:01 -0400 Subject: Updated MetadataXMLSerializer to use etree instead of minidom --- nova/api/openstack/common.py | 71 ++++++++++++++++++++++---------------------- 1 file changed, 36 insertions(+), 35 deletions(-) diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index d9eb832f2..d9371b89a 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -16,6 +16,7 @@ # under the License. import functools +from lxml import etree import re import urlparse from xml.dom import minidom @@ -27,6 +28,7 @@ from nova import flags from nova import log as logging from nova import quota from nova.api.openstack import wsgi +from nova.api.openstack import xmlutil from nova.compute import power_state as compute_power_state @@ -282,54 +284,53 @@ class MetadataHeadersSerializer(wsgi.ResponseHeadersSerializer): class MetadataXMLSerializer(wsgi.XMLDictSerializer): + + NSMAP = {None: xmlutil.XMLNS_V11} def __init__(self, xmlns=wsgi.XMLNS_V11): super(MetadataXMLSerializer, self).__init__(xmlns=xmlns) - def _meta_item_to_xml(self, doc, key, value): - node = doc.createElement('meta') - doc.appendChild(node) - node.setAttribute('key', '%s' % key) - text = doc.createTextNode('%s' % value) - node.appendChild(text) - return node - - def meta_list_to_xml(self, xml_doc, meta_items): - container_node = xml_doc.createElement('metadata') - for (key, value) in meta_items: - item_node = self._meta_item_to_xml(xml_doc, key, value) - container_node.appendChild(item_node) - return container_node - - def _meta_list_to_xml_string(self, metadata_dict): - xml_doc = minidom.Document() - items = metadata_dict['metadata'].items() - container_node = self.meta_list_to_xml(xml_doc, items) - xml_doc.appendChild(container_node) - self._add_xmlns(container_node) - return xml_doc.toxml('UTF-8') + def _populate_metadata(self, metadata_elem, meta_dict): + for (key, value) in meta_dict.items(): + elem = etree.SubElement(metadata_elem, 'meta') + elem.set('key', str(key)) + elem.text = value + + def _populate_meta_item(self, meta_elem, meta_item_dict): + """Populate a meta xml element from a dict.""" + (key, value) = meta_item_dict.items()[0] + meta_elem.set('key', str(key)) + meta_elem.text = value + return meta_elem + + def _to_xml(self, root): + """Convert the xml object to an xml string.""" + + return etree.tostring(root, encoding='UTF-8') def index(self, metadata_dict): - return self._meta_list_to_xml_string(metadata_dict) + metadata = etree.Element('metadata', nsmap=self.NSMAP) + self._populate_metadata(metadata, metadata_dict.get('metadata', {})) + return self._to_xml(metadata) def create(self, metadata_dict): - return self._meta_list_to_xml_string(metadata_dict) + metadata = etree.Element('metadata', nsmap=self.NSMAP) + self._populate_metadata(metadata, metadata_dict.get('metadata', {})) + return self._to_xml(metadata) def update_all(self, metadata_dict): - return self._meta_list_to_xml_string(metadata_dict) - - def _meta_item_to_xml_string(self, meta_item_dict): - xml_doc = minidom.Document() - item_key, item_value = meta_item_dict.items()[0] - item_node = self._meta_item_to_xml(xml_doc, item_key, item_value) - xml_doc.appendChild(item_node) - self._add_xmlns(item_node) - return xml_doc.toxml('UTF-8') + metadata = etree.Element('metadata', nsmap=self.NSMAP) + self._populate_metadata(metadata, metadata_dict.get('metadata', {})) + return self._to_xml(metadata) def show(self, meta_item_dict): - return self._meta_item_to_xml_string(meta_item_dict['meta']) + meta = etree.Element('meta', nsmap=self.NSMAP) + self._populate_meta_item(meta, meta_item_dict['meta']) + return self._to_xml(meta) def update(self, meta_item_dict): - return self._meta_item_to_xml_string(meta_item_dict['meta']) + meta = etree.Element('meta', nsmap=self.NSMAP) + self._populate_meta_item(meta, meta_item_dict['meta']) + return self._to_xml(meta) def default(self, *args, **kwargs): return '' -- cgit From 73704209a146bf51f51f445dc1ccc4410181ad6c Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Thu, 25 Aug 2011 14:58:31 -0400 Subject: Updated ServerXMLSerializer to use etree instead of minidom --- nova/api/openstack/schemas/v1.1/server.rng | 6 +- nova/api/openstack/servers.py | 215 ++++++++++++++--------------- 2 files changed, 105 insertions(+), 116 deletions(-) diff --git a/nova/api/openstack/schemas/v1.1/server.rng b/nova/api/openstack/schemas/v1.1/server.rng index dbd169a83..68f86373c 100644 --- a/nova/api/openstack/schemas/v1.1/server.rng +++ b/nova/api/openstack/schemas/v1.1/server.rng @@ -15,9 +15,6 @@ - - - @@ -47,4 +44,7 @@ + + + diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 27c67e79e..b978dea08 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -17,8 +17,8 @@ import base64 import os import traceback +from lxml import etree from webob import exc -from xml.dom import minidom import webob from nova import compute @@ -37,6 +37,7 @@ import nova.api.openstack.views.addresses import nova.api.openstack.views.flavors import nova.api.openstack.views.images import nova.api.openstack.views.servers +from nova.api.openstack import xmlutil LOG = logging.getLogger('nova.api.openstack.servers') @@ -835,123 +836,112 @@ class HeadersSerializer(wsgi.ResponseHeadersSerializer): class ServerXMLSerializer(wsgi.XMLDictSerializer): - xmlns = wsgi.XMLNS_V11 - - def __init__(self): - self.metadata_serializer = common.MetadataXMLSerializer() - self.addresses_serializer = ips.IPXMLSerializer() - - def _create_basic_entity_node(self, xml_doc, id, links, name): - basic_node = xml_doc.createElement(name) - basic_node.setAttribute('id', str(id)) - link_nodes = self._create_link_nodes(xml_doc, links) - for link_node in link_nodes: - basic_node.appendChild(link_node) - return basic_node - - def _create_metadata_node(self, xml_doc, metadata): - return self.metadata_serializer.meta_list_to_xml(xml_doc, metadata) - - def _create_addresses_node(self, xml_doc, addresses): - return self.addresses_serializer.networks_to_xml(xml_doc, addresses) - - def _add_server_attributes(self, node, server): - node.setAttribute('id', str(server['id'])) - node.setAttribute('uuid', str(server['uuid'])) - node.setAttribute('hostId', str(server['hostId'])) - node.setAttribute('name', server['name']) - node.setAttribute('created', str(server['created'])) - node.setAttribute('updated', str(server['updated'])) - node.setAttribute('status', server['status']) - if 'accessIPv4' in server: - node.setAttribute('accessIPv4', str(server['accessIPv4'])) - if 'accessIPv6' in server: - node.setAttribute('accessIPv6', str(server['accessIPv6'])) - if 'progress' in server: - node.setAttribute('progress', str(server['progress'])) - - def _server_to_xml(self, xml_doc, server): - server_node = xml_doc.createElement('server') - server_node.setAttribute('id', str(server['id'])) - server_node.setAttribute('name', server['name']) - link_nodes = self._create_link_nodes(xml_doc, - server['links']) - for link_node in link_nodes: - server_node.appendChild(link_node) - return server_node - - def _server_to_xml_detailed(self, xml_doc, server): - server_node = xml_doc.createElement('server') - self._add_server_attributes(server_node, server) - - link_nodes = self._create_link_nodes(xml_doc, - server['links']) - for link_node in link_nodes: - server_node.appendChild(link_node) - - if 'image' in server: - image_node = self._create_basic_entity_node(xml_doc, - server['image']['id'], - server['image']['links'], - 'image') - server_node.appendChild(image_node) - - if 'flavor' in server: - flavor_node = self._create_basic_entity_node(xml_doc, - server['flavor']['id'], - server['flavor']['links'], - 'flavor') - server_node.appendChild(flavor_node) - - metadata = server.get('metadata', {}).items() - if len(metadata) > 0: - metadata_node = self._create_metadata_node(xml_doc, metadata) - server_node.appendChild(metadata_node) - - addresses_node = self._create_addresses_node(xml_doc, - server['addresses']) - server_node.appendChild(addresses_node) - - return server_node - - def _server_list_to_xml(self, xml_doc, servers, detailed): - container_node = xml_doc.createElement('servers') + NSMAP = {None: xmlutil.XMLNS_V11, 'atom': xmlutil.XMLNS_ATOM} + + def _create_metadata_node(self, metadata_dict): + metadata_elem = etree.Element('metadata', nsmap=self.NSMAP) + for (key, value) in metadata_dict.items(): + elem = etree.SubElement(metadata_elem, 'meta') + elem.set('key', key) + elem.text = value + + return metadata_elem + + def _create_image_node(self, image_dict): + image_elem = etree.Element('image', nsmap=self.NSMAP) + image_elem.set('id', str(image_dict['id'])) + for link in image_dict.get('links', []): + elem = etree.SubElement(image_elem, + '{%s}link' % xmlutil.XMLNS_ATOM) + elem.set('rel', link['rel']) + elem.set('href', link['href']) + return image_elem + + def _create_flavor_node(self, flavor_dict): + flavor_elem = etree.Element('flavor', nsmap=self.NSMAP) + flavor_elem.set('id', str(flavor_dict['id'])) + for link in flavor_dict.get('links', []): + elem = etree.SubElement(flavor_elem, + '{%s}link' % xmlutil.XMLNS_ATOM) + elem.set('rel', link['rel']) + elem.set('href', link['href']) + return flavor_elem + + def _create_addresses_node(self, addresses_dict): + addresses_elem = etree.Element('addresses', nsmap=self.NSMAP) + for (network_id, ip_dicts) in addresses_dict.items(): + network_node = etree.SubElement(addresses_elem, 'network') + network_node.set('id', str(network_id)) + for ip_dict in ip_dicts: + ip_elem = etree.SubElement(network_node, 'ip') + ip_elem.set('version', str(ip_dict['version'])) + ip_elem.set('addr', ip_dict['addr']) + return addresses_elem + + def _populate_server(self, server_elem, server_dict, detailed=False): + """Populate a server xml element from a dict.""" + + server_elem.set('name', server_dict['name']) + server_elem.set('id', str(server_dict['id'])) if detailed: - server_to_xml = self._server_to_xml_detailed - else: - server_to_xml = self._server_to_xml - - for server in servers: - item_node = server_to_xml(xml_doc, server) - container_node.appendChild(item_node) - return container_node + server_elem.set('uuid', str(server_dict['uuid'])) + server_elem.set('updated', str(server_dict['updated'])) + server_elem.set('created', str(server_dict['created'])) + server_elem.set('hostId', str(server_dict['hostId'])) + server_elem.set('accessIPv4', str(server_dict['accessIPv4'])) + server_elem.set('accessIPv6', str(server_dict['accessIPv6'])) + server_elem.set('status', str(server_dict['status'])) + if 'progress' in server_dict: + server_elem.set('progress', str(server_dict['progress'])) + image_elem = self._create_image_node(server_dict['image']) + server_elem.append(image_elem) + + flavor_elem = self._create_flavor_node(server_dict['flavor']) + server_elem.append(flavor_elem) + + meta_elem = self._create_metadata_node( + server_dict.get('metadata', {})) + server_elem.append(meta_elem) + + addresses_elem = self._create_addresses_node( + server_dict.get('addresses', {})) + server_elem.append(addresses_elem) + + for link in server_dict.get('links', []): + elem = etree.SubElement(server_elem, + '{%s}link' % xmlutil.XMLNS_ATOM) + elem.set('rel', link['rel']) + elem.set('href', link['href']) + return server_elem + + def _to_xml(self, root): + """Convert the xml object to an xml string.""" + return etree.tostring(root, encoding='UTF-8') def index(self, servers_dict): - xml_doc = minidom.Document() - node = self._server_list_to_xml(xml_doc, - servers_dict['servers'], - detailed=False) - return self.to_xml_string(node, True) + servers = etree.Element('servers', nsmap=self.NSMAP) + for server_dict in servers_dict['servers']: + server = etree.SubElement(servers, 'server') + self._populate_server(server, server_dict, False) + return self._to_xml(servers) def detail(self, servers_dict): - xml_doc = minidom.Document() - node = self._server_list_to_xml(xml_doc, - servers_dict['servers'], - detailed=True) - return self.to_xml_string(node, True) + servers = etree.Element('servers', nsmap=self.NSMAP) + for server_dict in servers_dict['servers']: + server = etree.SubElement(servers, 'server') + self._populate_server(server, server_dict, True) + return self._to_xml(servers) def show(self, server_dict): - xml_doc = minidom.Document() - node = self._server_to_xml_detailed(xml_doc, - server_dict['server']) - return self.to_xml_string(node, True) + server = etree.Element('server', nsmap=self.NSMAP) + self._populate_server(server, server_dict['server'], True) + return self._to_xml(server) def create(self, server_dict): - xml_doc = minidom.Document() - node = self._server_to_xml_detailed(xml_doc, - server_dict['server']) - node.setAttribute('adminPass', server_dict['server']['adminPass']) - return self.to_xml_string(node, True) + server = etree.Element('server', nsmap=self.NSMAP) + self._populate_server(server, server_dict['server'], True) + server.set('adminPass', server_dict['server']['adminPass']) + return self._to_xml(server) def action(self, server_dict): #NOTE(bcwaldon): We need a way to serialize actions individually. This @@ -959,10 +949,9 @@ class ServerXMLSerializer(wsgi.XMLDictSerializer): return self.create(server_dict) def update(self, server_dict): - xml_doc = minidom.Document() - node = self._server_to_xml_detailed(xml_doc, - server_dict['server']) - return self.to_xml_string(node, True) + server = etree.Element('server', nsmap=self.NSMAP) + self._populate_server(server, server_dict['server'], True) + return self._to_xml(server) def create_resource(version='1.0'): -- cgit From bed249069e2770d1248ab9ef89751c15be8a13d6 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Thu, 25 Aug 2011 15:12:02 -0400 Subject: updated addresses xml serialization tests to use etree instead of minidom --- nova/tests/api/openstack/test_servers.py | 54 ++++++++++++++++---------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 3559e6de5..0c816f965 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -3171,17 +3171,17 @@ class TestAddressesXMLSerialization(test.TestCase): ], } output = self.serializer.serialize(fixture, 'show') - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(""" - - - - - """.replace(" ", "")) - - self.assertEqual(expected.toxml(), actual.toxml()) + print output + root = etree.XML(output) + network = fixture['network_2'] + self.assertEqual(str(root.get('id')), 'network_2') + ip_elems = root.findall('{0}ip'.format(NS)) + for z, ip_elem in enumerate(ip_elems): + ip = network[z] + self.assertEqual(str(ip_elem.get('version')), + str(ip['version'])) + self.assertEqual(str(ip_elem.get('addr')), + str(ip['addr'])) def test_index(self): fixture = { @@ -3197,22 +3197,22 @@ class TestAddressesXMLSerialization(test.TestCase): }, } output = self.serializer.serialize(fixture, 'index') - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(""" - - - - - - - - - - - """.replace(" ", "")) - - self.assertEqual(expected.toxml(), actual.toxml()) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'addresses') + addresses_dict = fixture['addresses'] + network_elems = root.findall('{0}network'.format(NS)) + self.assertEqual(len(network_elems), 2) + for i, network_elem in enumerate(network_elems): + network = addresses_dict.items()[i] + self.assertEqual(str(network_elem.get('id')), str(network[0])) + ip_elems = network_elem.findall('{0}ip'.format(NS)) + for z, ip_elem in enumerate(ip_elems): + ip = network[1][z] + self.assertEqual(str(ip_elem.get('version')), + str(ip['version'])) + self.assertEqual(str(ip_elem.get('addr')), + str(ip['addr'])) class TestServerInstanceCreation(test.TestCase): -- cgit From 8e712b30911956531e346723fbbcaaa49f166ab7 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Thu, 25 Aug 2011 15:12:34 -0400 Subject: Added addresses schema --- nova/api/openstack/schemas/v1.1/addresses.rng | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 nova/api/openstack/schemas/v1.1/addresses.rng diff --git a/nova/api/openstack/schemas/v1.1/addresses.rng b/nova/api/openstack/schemas/v1.1/addresses.rng new file mode 100644 index 000000000..b498e8a63 --- /dev/null +++ b/nova/api/openstack/schemas/v1.1/addresses.rng @@ -0,0 +1,14 @@ + + + + + + + + + + + + + -- cgit From aafd1ff68f2f6085ddf0d6762ed9ed594d23a321 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Thu, 25 Aug 2011 15:30:52 -0400 Subject: updated addresses serializer to use etree instead of minidom --- nova/api/openstack/ips.py | 63 +++++++++++++++++++++++------------------------ 1 file changed, 31 insertions(+), 32 deletions(-) diff --git a/nova/api/openstack/ips.py b/nova/api/openstack/ips.py index a74fae487..0147d66f8 100644 --- a/nova/api/openstack/ips.py +++ b/nova/api/openstack/ips.py @@ -15,14 +15,15 @@ # License for the specific language governing permissions and limitations # under the License. +from lxml import etree import time -from xml.dom import minidom from webob import exc import nova import nova.api.openstack.views.addresses from nova.api.openstack import wsgi +from nova.api.openstack import xmlutil from nova import db @@ -102,42 +103,40 @@ class ControllerV11(Controller): class IPXMLSerializer(wsgi.XMLDictSerializer): + + NSMAP = {None: xmlutil.XMLNS_V11, 'atom': xmlutil.XMLNS_ATOM} + def __init__(self, xmlns=wsgi.XMLNS_V11): super(IPXMLSerializer, self).__init__(xmlns=xmlns) - def _ip_to_xml(self, xml_doc, ip_dict): - ip_node = xml_doc.createElement('ip') - ip_node.setAttribute('addr', ip_dict['addr']) - ip_node.setAttribute('version', str(ip_dict['version'])) - return ip_node - - def _network_to_xml(self, xml_doc, network_id, ip_dicts): - network_node = xml_doc.createElement('network') - network_node.setAttribute('id', network_id) + def _create_addresses_node(self, addresses_dict): + addresses_elem = etree.Element('addresses', nsmap=self.NSMAP) + for (network_id, ip_dicts) in addresses_dict.items(): + network_elem = self._create_network_node(network_id, ip_dicts) + addresses_elem.append(network_elem) + return addresses_elem + def _create_network_node(self, network_id, ip_dicts): + network_elem = etree.Element('network', nsmap=self.NSMAP) + network_elem.set('id', str(network_id)) for ip_dict in ip_dicts: - ip_node = self._ip_to_xml(xml_doc, ip_dict) - network_node.appendChild(ip_node) - - return network_node - - def networks_to_xml(self, xml_doc, networks_container): - addresses_node = xml_doc.createElement('addresses') - for (network_id, ip_dicts) in networks_container.items(): - network_node = self._network_to_xml(xml_doc, network_id, ip_dicts) - addresses_node.appendChild(network_node) - return addresses_node - - def show(self, network_container): - (network_id, ip_dicts) = network_container.items()[0] - xml_doc = minidom.Document() - node = self._network_to_xml(xml_doc, network_id, ip_dicts) - return self.to_xml_string(node, False) - - def index(self, addresses_container): - xml_doc = minidom.Document() - node = self.networks_to_xml(xml_doc, addresses_container['addresses']) - return self.to_xml_string(node, False) + ip_elem = etree.SubElement(network_elem, 'ip') + ip_elem.set('version', str(ip_dict['version'])) + ip_elem.set('addr', ip_dict['addr']) + return network_elem + + def _to_xml(self, root): + """Convert the xml object to an xml string.""" + return etree.tostring(root, encoding='UTF-8') + + def show(self, network_dict): + (network_id, ip_dicts) = network_dict.items()[0] + network = self._create_network_node(network_id, ip_dicts) + return self._to_xml(network) + + def index(self, addresses_dict): + addresses = self._create_addresses_node(addresses_dict['addresses']) + return self._to_xml(addresses) def create_resource(version): -- cgit From b02a5e4f581590c1bf31dae1c9c2bc1e448a6106 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Thu, 25 Aug 2011 16:35:04 -0400 Subject: DRYed up code by moving _to_xml into XMLDictSerializer --- nova/api/openstack/common.py | 5 -- nova/api/openstack/flavors.py | 5 -- nova/api/openstack/images.py | 5 -- nova/api/openstack/ips.py | 6 +-- nova/api/openstack/servers.py | 4 -- nova/api/openstack/wsgi.py | 5 ++ nova/tests/api/openstack/test_common.py | 14 +++++ nova/tests/api/openstack/test_flavors.py | 27 ++++++++++ nova/tests/api/openstack/test_images.py | 45 ++++++++++++++++ nova/tests/api/openstack/test_servers.py | 89 ++++++++++++++++++++++++++++++++ 10 files changed, 181 insertions(+), 24 deletions(-) diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index d9371b89a..7dde68975 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -302,11 +302,6 @@ class MetadataXMLSerializer(wsgi.XMLDictSerializer): meta_elem.text = value return meta_elem - def _to_xml(self, root): - """Convert the xml object to an xml string.""" - - return etree.tostring(root, encoding='UTF-8') - def index(self, metadata_dict): metadata = etree.Element('metadata', nsmap=self.NSMAP) self._populate_metadata(metadata, metadata_dict.get('metadata', {})) diff --git a/nova/api/openstack/flavors.py b/nova/api/openstack/flavors.py index f689aa7ab..805aad772 100644 --- a/nova/api/openstack/flavors.py +++ b/nova/api/openstack/flavors.py @@ -99,11 +99,6 @@ class FlavorXMLSerializer(wsgi.XMLDictSerializer): elem.set('href', link['href']) return flavor_elem - def _to_xml(self, root): - """Convert the xml object to an xml string.""" - - return etree.tostring(root, encoding='UTF-8') - def show(self, flavor_container): flavor = etree.Element('flavor', nsmap=self.NSMAP) self._populate_flavor(flavor, flavor_container['flavor'], True) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index edd26c171..48c53d6d5 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -254,11 +254,6 @@ class ImageXMLSerializer(wsgi.XMLDictSerializer): elem.set('href', link['href']) return image_elem - def _to_xml(self, root): - """Convert the xml object to an xml string.""" - - return etree.tostring(root, encoding='UTF-8') - def index(self, images_dict): images = etree.Element('images', nsmap=self.NSMAP) for image_dict in images_dict['images']: diff --git a/nova/api/openstack/ips.py b/nova/api/openstack/ips.py index 0147d66f8..d5a715dda 100644 --- a/nova/api/openstack/ips.py +++ b/nova/api/openstack/ips.py @@ -104,7 +104,7 @@ class ControllerV11(Controller): class IPXMLSerializer(wsgi.XMLDictSerializer): - NSMAP = {None: xmlutil.XMLNS_V11, 'atom': xmlutil.XMLNS_ATOM} + NSMAP = {None: xmlutil.XMLNS_V11} def __init__(self, xmlns=wsgi.XMLNS_V11): super(IPXMLSerializer, self).__init__(xmlns=xmlns) @@ -125,10 +125,6 @@ class IPXMLSerializer(wsgi.XMLDictSerializer): ip_elem.set('addr', ip_dict['addr']) return network_elem - def _to_xml(self, root): - """Convert the xml object to an xml string.""" - return etree.tostring(root, encoding='UTF-8') - def show(self, network_dict): (network_id, ip_dicts) = network_dict.items()[0] network = self._create_network_node(network_id, ip_dicts) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index b978dea08..fa5b7c023 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -914,10 +914,6 @@ class ServerXMLSerializer(wsgi.XMLDictSerializer): elem.set('href', link['href']) return server_elem - def _to_xml(self, root): - """Convert the xml object to an xml string.""" - return etree.tostring(root, encoding='UTF-8') - def index(self, servers_dict): servers = etree.Element('servers', nsmap=self.NSMAP) for server_dict in servers_dict['servers']: diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index 8641e960a..bdcadcb99 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -1,5 +1,6 @@ import json +from lxml import etree import webob from xml.dom import minidom from xml.parsers import expat @@ -392,6 +393,10 @@ class XMLDictSerializer(DictSerializer): link_nodes.append(link_node) return link_nodes + def _to_xml(self, root): + """Convert the xml object to an xml string.""" + return etree.tostring(root, encoding='UTF-8', xml_declaration=True) + class ResponseHeadersSerializer(ActionDispatcher): """Default response headers serialization""" diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py index 2e5bf08fa..867e9d446 100644 --- a/nova/tests/api/openstack/test_common.py +++ b/nova/tests/api/openstack/test_common.py @@ -320,6 +320,20 @@ class MetadataXMLDeserializationTest(test.TestCase): class MetadataXMLSerializationTest(test.TestCase): + def test_xml_declaration(self): + serializer = common.MetadataXMLSerializer() + fixture = { + 'metadata': { + 'one': 'two', + 'three': 'four', + }, + } + + output = serializer.serialize(fixture, 'index') + print output + has_dec = output.startswith("") + self.assertTrue(has_dec) + def test_index(self): serializer = common.MetadataXMLSerializer() fixture = { diff --git a/nova/tests/api/openstack/test_flavors.py b/nova/tests/api/openstack/test_flavors.py index ea5038b39..a3c5bd107 100644 --- a/nova/tests/api/openstack/test_flavors.py +++ b/nova/tests/api/openstack/test_flavors.py @@ -267,6 +267,33 @@ class FlavorsTest(test.TestCase): class FlavorsXMLSerializationTest(test.TestCase): + def test_xml_declaration(self): + serializer = flavors.FlavorXMLSerializer() + + fixture = { + "flavor": { + "id": "12", + "name": "asdf", + "ram": "256", + "disk": "10", + "links": [ + { + "rel": "self", + "href": "http://localhost/v1.1/fake/flavors/12", + }, + { + "rel": "bookmark", + "href": "http://localhost/fake/flavors/12", + }, + ], + }, + } + + output = serializer.serialize(fixture, 'show') + print output + has_dec = output.startswith("") + self.assertTrue(has_dec) + def test_show(self): serializer = flavors.FlavorXMLSerializer() diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 145cf527c..97e940974 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -1138,6 +1138,51 @@ class ImageXMLSerializationTest(test.TestCase): IMAGE_HREF = 'http://localhost/v1.1/fake/images/%s' IMAGE_BOOKMARK = 'http://localhost/fake/images/%s' + def test_xml_declaration(self): + serializer = images.ImageXMLSerializer() + + fixture = { + 'image': { + 'id': 1, + 'name': 'Image1', + 'created': self.TIMESTAMP, + 'updated': self.TIMESTAMP, + 'status': 'ACTIVE', + 'progress': 80, + 'server': { + 'id': '1', + 'links': [ + { + 'href': self.SERVER_HREF, + 'rel': 'self', + }, + { + 'href': self.SERVER_BOOKMARK, + 'rel': 'bookmark', + }, + ], + }, + 'metadata': { + 'key1': 'value1', + }, + 'links': [ + { + 'href': self.IMAGE_HREF % 1, + 'rel': 'self', + }, + { + 'href': self.IMAGE_BOOKMARK % 1, + 'rel': 'bookmark', + }, + ], + }, + } + + output = serializer.serialize(fixture, 'show') + print output + has_dec = output.startswith("") + self.assertTrue(has_dec) + def test_show(self): serializer = images.ImageXMLSerializer() diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 0c816f965..d80846747 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -3163,6 +3163,18 @@ class TestAddressesXMLSerialization(test.TestCase): serializer = nova.api.openstack.ips.IPXMLSerializer() + def test_xml_declaration(self): + fixture = { + 'network_2': [ + {'addr': '192.168.0.1', 'version': 4}, + {'addr': 'fe80::beef', 'version': 6}, + ], + } + output = self.serializer.serialize(fixture, 'show') + print output + has_dec = output.startswith("") + self.assertTrue(has_dec) + def test_show(self): fixture = { 'network_2': [ @@ -3918,6 +3930,83 @@ class ServerXMLSerializationTest(test.TestCase): self.maxDiff = None test.TestCase.setUp(self) + def test_xml_declaration(self): + serializer = servers.ServerXMLSerializer() + + fixture = { + "server": { + "id": 1, + "uuid": FAKE_UUID, + 'created': self.TIMESTAMP, + 'updated': self.TIMESTAMP, + "progress": 0, + "name": "test_server", + "status": "BUILD", + "hostId": 'e4d909c290d0fb1ca068ffaddf22cbd0', + "accessIPv4": "1.2.3.4", + "accessIPv6": "fead::1234", + "image": { + "id": "5", + "links": [ + { + "rel": "bookmark", + "href": self.IMAGE_BOOKMARK, + }, + ], + }, + "flavor": { + "id": "1", + "links": [ + { + "rel": "bookmark", + "href": self.FLAVOR_BOOKMARK, + }, + ], + }, + "addresses": { + "network_one": [ + { + "version": 4, + "addr": "67.23.10.138", + }, + { + "version": 6, + "addr": "::babe:67.23.10.138", + }, + ], + "network_two": [ + { + "version": 4, + "addr": "67.23.10.139", + }, + { + "version": 6, + "addr": "::babe:67.23.10.139", + }, + ], + }, + "metadata": { + "Open": "Stack", + "Number": "1", + }, + 'links': [ + { + 'href': self.SERVER_HREF, + 'rel': 'self', + }, + { + 'href': self.SERVER_BOOKMARK, + 'rel': 'bookmark', + }, + ], + } + } + + output = serializer.serialize(fixture, 'show') + print output + has_dec = output.startswith("") + self.assertTrue(has_dec) + def test_show(self): serializer = servers.ServerXMLSerializer() -- cgit From 73311c4e71e72f2866bb47063ddf9b5a04c3736d Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Thu, 25 Aug 2011 16:36:41 -0400 Subject: pep8 --- nova/api/openstack/common.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index 7dde68975..1d1c56459 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -286,6 +286,7 @@ class MetadataHeadersSerializer(wsgi.ResponseHeadersSerializer): class MetadataXMLSerializer(wsgi.XMLDictSerializer): NSMAP = {None: xmlutil.XMLNS_V11} + def __init__(self, xmlns=wsgi.XMLNS_V11): super(MetadataXMLSerializer, self).__init__(xmlns=xmlns) -- cgit From 358c346a7d01336f23b21a18f33c3213b3672f0a Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 25 Aug 2011 15:53:13 -0500 Subject: updated libvirt tests to use fake_network_info --- nova/network/manager.py | 2 +- nova/tests/fake_network_info.py | 30 ++++++++++++++++++++----- nova/tests/test_libvirt.py | 49 +++++++++++++++++++++++------------------ 3 files changed, 53 insertions(+), 28 deletions(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index dcd5aad9a..9a4b09ad8 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -483,7 +483,6 @@ class NetworkManager(manager.SchedulerDependentManager): 'bridge': network['bridge'], 'id': network['id'], 'cidr': network['cidr'], - 'cidr_v6': network['cidr_v6'], 'injected': network['injected'], 'vlan': network['vlan'], 'bridge_interface': network['bridge_interface'], @@ -509,6 +508,7 @@ class NetworkManager(manager.SchedulerDependentManager): if network['cidr_v6']: info['ip6s'] = [ip6_dict()] + network_dict['cidr_v6'] = network['cidr_v6'], # TODO(tr3buchet): handle ip6 routes here as well if network['gateway_v6']: info['gateway6'] = network['gateway_v6'] diff --git a/nova/tests/fake_network_info.py b/nova/tests/fake_network_info.py index 072585195..0bf117a90 100644 --- a/nova/tests/fake_network_info.py +++ b/nova/tests/fake_network_info.py @@ -16,11 +16,13 @@ # under the License. from nova import db +from nova import flags from nova import test from nova.network import manager as network_manager HOST = "testhost" +FLAGS = flags.FLAGS class FakeModel(dict): @@ -32,15 +34,14 @@ class FakeModel(dict): return self[name] -def fake_network(n): - return {'id': n, +def fake_network(n, ipv6=None): + if ipv6 = None: + ipv6 = Flags.use_ipv6 + rval = {'id': n, 'label': 'test%d' % n, 'injected': False, 'multi_host': False, 'cidr': '192.168.%d.0/24' % n, - 'cidr_v6': '2001:db8:0:%x::/64' % n, - 'gateway_v6': '2001:db8:0:%x::1' % n, - 'netmask_v6': '64', 'netmask': '255.255.255.0', 'bridge': 'fake_br%d' % n, 'bridge_interface': 'fake_eth%d' % n, @@ -52,6 +53,10 @@ def fake_network(n): 'host': None, 'project_id': 'fake_project', 'vpn_public_address': '192.168.%d.2' % n} + if ipv6: + rval['cidr_v6'] = '2001:db8:0:%x::/64' % n + rval['gateway_v6'] = '2001:db8:0:%x::1' % n + rval['netmask_v6'] = '64' def fixed_ips(num_networks, num_ips, num_floating_ips=0): @@ -91,7 +96,22 @@ def vifs(n): 'instance_id': 0} +def ipv4_like(ip, s): + ip = ip.split('.') + s = s.split('.') + + for i, octet in enumerate(s): + if octet == '*': + continue + if octet != ip[i]: + return False + return True + + def fake_get_instance_nw_info(stubs, n=1, ips_per_vif=2): + # stubs is the self.stubs from the test + # ips_per_vif is the number of ips each vif will have + # num_floating_ips is number of float ips for each fixed ip network = network_manager.FlatManager(host=HOST) network.db = db diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 6a213b4f0..1b5a2d55d 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -36,6 +36,7 @@ from nova.api.ec2 import cloud from nova.compute import power_state from nova.virt.libvirt import connection from nova.virt.libvirt import firewall +from nova.tests import fake_network_info libvirt = None FLAGS = flags.FLAGS @@ -45,7 +46,8 @@ def _concurrency(wait, done, target): wait.wait() done.send() - +_fake_network_info = fake_network_info.fake_get_instance_nw_info +_ipv4_like = fake_network_info.ipv4_like def _create_network_info(count=1, ipv6=None): if ipv6 is None: ipv6 = FLAGS.use_ipv6 @@ -276,12 +278,12 @@ class LibvirtConnTestCase(test.TestCase): instance_ref = db.instance_create(self.context, self.test_instance) result = conn._prepare_xml_info(instance_ref, - _create_network_info(), + _fake_network_info(self.stubs, 1), False) self.assertTrue(len(result['nics']) == 1) result = conn._prepare_xml_info(instance_ref, - _create_network_info(2), + _fake_network_info(self.stubs, 2), False) self.assertTrue(len(result['nics']) == 2) @@ -406,7 +408,7 @@ class LibvirtConnTestCase(test.TestCase): def test_multi_nic(self): instance_data = dict(self.test_instance) - network_info = _create_network_info(2) + network_info = _fake_network_info(self.stubs, 2) conn = connection.LibvirtConnection(True) instance_ref = db.instance_create(self.context, instance_data) xml = conn.to_xml(instance_ref, network_info, False) @@ -416,9 +418,9 @@ class LibvirtConnTestCase(test.TestCase): parameters = interfaces[0].findall('./filterref/parameter') self.assertEquals(interfaces[0].get('type'), 'bridge') self.assertEquals(parameters[0].get('name'), 'IP') - self.assertEquals(parameters[0].get('value'), '10.11.12.13') + self.assertTrue(_ipv4_like(parameters[0].get('value'), '192.168') self.assertEquals(parameters[1].get('name'), 'DHCPSERVER') - self.assertEquals(parameters[1].get('value'), '10.0.0.1') + self.assertTrue(_ipv4_like(parameters[1].get('value'), '192.168.*.1') def _check_xml_and_container(self, instance): user_context = context.RequestContext(self.user_id, @@ -432,7 +434,7 @@ class LibvirtConnTestCase(test.TestCase): uri = conn.get_uri() self.assertEquals(uri, 'lxc:///') - network_info = _create_network_info() + network_info = _fake_network_info(self.stubs, 1) xml = conn.to_xml(instance_ref, network_info) tree = xml_to_tree(xml) @@ -503,9 +505,11 @@ class LibvirtConnTestCase(test.TestCase): common_checks = [ (lambda t: t.find('.').tag, 'domain'), (lambda t: t.find(parameter).get('name'), 'IP'), - (lambda t: t.find(parameter).get('value'), '10.11.12.13'), + (lambda t: _ipv4_like(t.find(parameter).get('value'), '192.168'), + True), (lambda t: t.findall(parameter)[1].get('name'), 'DHCPSERVER'), - (lambda t: t.findall(parameter)[1].get('value'), '10.0.0.1'), + (lambda t: _ipv4_like(t.findall(parameter)[1].get('value'), + '192.168.*.1'), True), (lambda t: t.find('./devices/serial/source').get( 'path').split('/')[1], 'console.log'), (lambda t: t.find('./memory').text, '2097152')] @@ -530,7 +534,7 @@ class LibvirtConnTestCase(test.TestCase): uri = conn.get_uri() self.assertEquals(uri, expected_uri) - network_info = _create_network_info() + network_info = _fake_network_info(self.stubs, 1) xml = conn.to_xml(instance_ref, network_info, rescue) tree = xml_to_tree(xml) for i, (check, expected_result) in enumerate(checks): @@ -645,7 +649,7 @@ class LibvirtConnTestCase(test.TestCase): self.create_fake_libvirt_mock() instance_ref = db.instance_create(self.context, self.test_instance) - network_info = _create_network_info() + network_info = _fake_network_info(self.stubs, 1) # Start test self.mox.ReplayAll() @@ -828,7 +832,7 @@ class LibvirtConnTestCase(test.TestCase): conn.firewall_driver.setattr('setup_basic_filtering', fake_none) conn.firewall_driver.setattr('prepare_instance_filter', fake_none) - network_info = _create_network_info() + network_info = _fake_network_info(self.stubs, 1) try: conn.spawn(self.context, instance, network_info) @@ -1062,7 +1066,7 @@ class IptablesFirewallTestCase(test.TestCase): from nova.network import linux_net linux_net.iptables_manager.execute = fake_iptables_execute - network_info = _create_network_info() + network_info = _fake_network_info(self.stubs, 1) self.fw.prepare_instance_filter(instance_ref, network_info) self.fw.apply_instance_filter(instance_ref, network_info) @@ -1076,7 +1080,8 @@ class IptablesFirewallTestCase(test.TestCase): instance_chain = None for rule in self.out_rules: # This is pretty crude, but it'll do for now - if '-d 10.11.12.13 -j' in rule: + # last two octets change + if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule): instance_chain = rule.split(' ')[-1] break self.assertTrue(instance_chain, "The instance chain wasn't added") @@ -1112,14 +1117,14 @@ class IptablesFirewallTestCase(test.TestCase): def test_filters_for_instance_with_ip_v6(self): self.flags(use_ipv6=True) - network_info = _create_network_info() + network_info = _fake_network_info(self.stubs, 1) rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info) self.assertEquals(len(rulesv4), 2) self.assertEquals(len(rulesv6), 3) def test_filters_for_instance_without_ip_v6(self): self.flags(use_ipv6=False) - network_info = _create_network_info() + network_info = _fake_network_info(self.stubs, 1) rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info) self.assertEquals(len(rulesv4), 2) self.assertEquals(len(rulesv6), 0) @@ -1129,7 +1134,7 @@ class IptablesFirewallTestCase(test.TestCase): ipv6_rules_per_network = 3 networks_count = 5 instance_ref = self._create_instance_ref() - network_info = _create_network_info(networks_count) + network_info = _fake_network_info(self.stubs, networks_count) ipv4_len = len(self.fw.iptables.ipv4['filter'].rules) ipv6_len = len(self.fw.iptables.ipv6['filter'].rules) inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref, @@ -1169,7 +1174,7 @@ class IptablesFirewallTestCase(test.TestCase): instance_ref = self._create_instance_ref() _setup_networking(instance_ref['id'], self.test_ip) - network_info = _create_network_info() + network_info = _fake_network_info(self.stubs, 1) self.fw.setup_basic_filtering(instance_ref, network_info) self.fw.prepare_instance_filter(instance_ref, network_info) self.fw.apply_instance_filter(instance_ref, network_info) @@ -1190,7 +1195,7 @@ class IptablesFirewallTestCase(test.TestCase): # create a firewall via setup_basic_filtering like libvirt_conn.spawn # should have a chain with 0 rules - network_info = _create_network_info(1) + network_info = _fake_network_info(self.stubs, 1) self.fw.setup_basic_filtering(instance_ref, network_info) self.assertTrue('provider' in self.fw.iptables.ipv4['filter'].chains) rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules @@ -1390,7 +1395,7 @@ class NWFilterTestCase(test.TestCase): self.security_group.id) instance = db.instance_get(self.context, inst_id) - network_info = _create_network_info() + network_info = _fake_network_info(self.stubs, 1) self.fw.setup_basic_filtering(instance, network_info) self.fw.prepare_instance_filter(instance, network_info) self.fw.apply_instance_filter(instance, network_info) @@ -1400,7 +1405,7 @@ class NWFilterTestCase(test.TestCase): def test_create_network_filters(self): instance_ref = self._create_instance() - network_info = _create_network_info(3) + network_info = _fake_network_info(self.stubs, 3) result = self.fw._create_network_filters(instance_ref, network_info, "fake") @@ -1424,7 +1429,7 @@ class NWFilterTestCase(test.TestCase): instance = db.instance_get(self.context, inst_id) _setup_networking(instance_ref['id'], self.test_ip) - network_info = _create_network_info() + network_info = _fake_network_info(self.stubs, 1) self.fw.setup_basic_filtering(instance, network_info) self.fw.prepare_instance_filter(instance, network_info) self.fw.apply_instance_filter(instance, network_info) -- cgit From 86029908254850e9d04fcc2399eef54e7af58193 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Thu, 25 Aug 2011 16:56:21 -0400 Subject: updated tests --- nova/tests/api/openstack/test_servers.py | 46 +++----------------------------- nova/tests/integrated/test_xml.py | 7 ++--- 2 files changed, 8 insertions(+), 45 deletions(-) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index d80846747..87ae19f56 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -428,48 +428,10 @@ class ServersTest(test.TestCase): req = webob.Request.blank('/v1.1/fake/servers/1') req.headers['Accept'] = 'application/xml' res = req.get_response(fakes.wsgi_app()) - actual = minidom.parseString(res.body.replace(' ', '')) - expected_uuid = FAKE_UUID - expected_updated = "2010-11-11T11:00:00Z" - expected_created = "2010-10-10T12:00:00Z" - expected = minidom.parseString(""" - - - - - - - - - - - - 1 - - - - - - - - - - - - """.replace(" ", "") % (locals())) - - self.assertEqual(expected.toxml(), actual.toxml()) + output = res.body + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'server') def test_get_server_with_active_status_by_id_v1_1(self): image_bookmark = "http://localhost/fake/images/10" diff --git a/nova/tests/integrated/test_xml.py b/nova/tests/integrated/test_xml.py index 74baaacc2..6b4f4c0ec 100644 --- a/nova/tests/integrated/test_xml.py +++ b/nova/tests/integrated/test_xml.py @@ -34,8 +34,8 @@ class XmlTests(integrated_helpers._IntegratedTestBase): response = self.api.api_request('/limits', headers=headers) data = response.read() LOG.debug("data: %s" % data) - - prefix = ' Date: Thu, 25 Aug 2011 16:02:52 -0500 Subject: fixed a couple of syntax errors --- nova/tests/fake_network_info.py | 2 +- nova/tests/test_libvirt.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/tests/fake_network_info.py b/nova/tests/fake_network_info.py index 0bf117a90..113647d7f 100644 --- a/nova/tests/fake_network_info.py +++ b/nova/tests/fake_network_info.py @@ -35,7 +35,7 @@ class FakeModel(dict): def fake_network(n, ipv6=None): - if ipv6 = None: + if ipv6 == None: ipv6 = Flags.use_ipv6 rval = {'id': n, 'label': 'test%d' % n, diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 1b5a2d55d..06ad7d13a 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -418,9 +418,9 @@ class LibvirtConnTestCase(test.TestCase): parameters = interfaces[0].findall('./filterref/parameter') self.assertEquals(interfaces[0].get('type'), 'bridge') self.assertEquals(parameters[0].get('name'), 'IP') - self.assertTrue(_ipv4_like(parameters[0].get('value'), '192.168') + self.assertTrue(_ipv4_like(parameters[0].get('value'), '192.168')) self.assertEquals(parameters[1].get('name'), 'DHCPSERVER') - self.assertTrue(_ipv4_like(parameters[1].get('value'), '192.168.*.1') + self.assertTrue(_ipv4_like(parameters[1].get('value'), '192.168.*.1')) def _check_xml_and_container(self, instance): user_context = context.RequestContext(self.user_id, -- cgit From 5daa66cb91f95ff341ce5ec762fb1060f22e587f Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 25 Aug 2011 16:10:25 -0500 Subject: typo --- nova/tests/fake_network_info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/fake_network_info.py b/nova/tests/fake_network_info.py index 113647d7f..295ac8a97 100644 --- a/nova/tests/fake_network_info.py +++ b/nova/tests/fake_network_info.py @@ -36,7 +36,7 @@ class FakeModel(dict): def fake_network(n, ipv6=None): if ipv6 == None: - ipv6 = Flags.use_ipv6 + ipv6 = FLAGS.use_ipv6 rval = {'id': n, 'label': 'test%d' % n, 'injected': False, -- cgit From 75ef563099133efd47a97c7e989d0188a054eebc Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Thu, 25 Aug 2011 17:10:35 -0400 Subject: Fixed integrated.test_xml to be more robust --- nova/tests/integrated/test_xml.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/nova/tests/integrated/test_xml.py b/nova/tests/integrated/test_xml.py index 6b4f4c0ec..cf013da1d 100644 --- a/nova/tests/integrated/test_xml.py +++ b/nova/tests/integrated/test_xml.py @@ -15,6 +15,8 @@ # License for the specific language governing permissions and limitations # under the License. +from lxml import etree + from nova.log import logging from nova.tests.integrated import integrated_helpers from nova.api.openstack import common @@ -34,9 +36,8 @@ class XmlTests(integrated_helpers._IntegratedTestBase): response = self.api.api_request('/limits', headers=headers) data = response.read() LOG.debug("data: %s" % data) - declaration = "" - prefix = '%s Date: Thu, 25 Aug 2011 16:49:09 -0500 Subject: forget a return --- nova/tests/fake_network_info.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nova/tests/fake_network_info.py b/nova/tests/fake_network_info.py index 295ac8a97..b05cf7c95 100644 --- a/nova/tests/fake_network_info.py +++ b/nova/tests/fake_network_info.py @@ -58,6 +58,8 @@ def fake_network(n, ipv6=None): rval['gateway_v6'] = '2001:db8:0:%x::1' % n rval['netmask_v6'] = '64' + return rval + def fixed_ips(num_networks, num_ips, num_floating_ips=0): for network in xrange(num_networks): -- cgit From 1e6eed6b064632996b56ba6952b52c07e28c114c Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Thu, 25 Aug 2011 18:01:35 -0400 Subject: Updated limits serialization tests to use etree and added limits schema --- nova/api/openstack/schemas/v1.1/limits.rng | 28 +++++++++++++++ nova/tests/api/openstack/test_limits.py | 56 ++++++++++++++++-------------- 2 files changed, 58 insertions(+), 26 deletions(-) create mode 100644 nova/api/openstack/schemas/v1.1/limits.rng diff --git a/nova/api/openstack/schemas/v1.1/limits.rng b/nova/api/openstack/schemas/v1.1/limits.rng new file mode 100644 index 000000000..1af8108ec --- /dev/null +++ b/nova/api/openstack/schemas/v1.1/limits.rng @@ -0,0 +1,28 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py index 801b06230..a3bcecc3a 100644 --- a/nova/tests/api/openstack/test_limits.py +++ b/nova/tests/api/openstack/test_limits.py @@ -19,6 +19,7 @@ Tests dealing with HTTP rate-limiting. import httplib import json +from lxml import etree import StringIO import stubout import time @@ -29,6 +30,7 @@ from xml.dom import minidom import nova.context from nova.api.openstack import limits from nova.api.openstack import views +from nova.api.openstack import xmlutil from nova import test @@ -39,6 +41,8 @@ TEST_LIMITS = [ limits.Limit("PUT", "*", "", 10, limits.PER_MINUTE), limits.Limit("PUT", "/servers", "^/servers", 5, limits.PER_MINUTE), ] +NS = "{http://docs.openstack.org/compute/api/v1.1}" +ATOMNS = "{http://www.w3.org/2005/Atom}" class BaseLimitTestSuite(unittest.TestCase): @@ -1006,32 +1010,32 @@ class LimitsXMLSerializationTest(test.TestCase): "maxPersonalitySize": 10240}}} output = serializer.serialize(fixture, 'index') - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(""" - - - - - - - - - - - - - - - - - """.replace(" ", "")) - - self.assertEqual(expected.toxml(), actual.toxml()) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'limits') + + #verify absolute limits + absolute = root.find('{0}absolute'.format(NS)) + absolutes = absolute.findall('limit'.format(NS)) + for limit in absolutes: + name = limit.get('name') + value = limit.get('value') + self.assertEqual(value, str(fixture['limits']['absolute'][name])) + + #verify rate limits + rate_root = root.find('{0}rates'.format(NS)) + rates = rate_root.findall('{0}rate'.format(NS)) + for i in range(len(rates)): + rate = rates[i] + for key in ['uri', 'regex']: + self.assertEqual(rate.get(key), str(fixture['limits']['rate'][i][key])) + rate_limits = rate.findall('{0}limit'.format(NS)) + for z in range(len(rate_limits)): + limit = rate_limits[z] + for key in ['verb', 'value', 'remaining', 'unit', + 'next-available']: + self.assertEqual(limit.get(key), + str(fixture['limits']['rate'][i]['limit'][z][key])) def test_index_no_limits(self): serializer = limits.LimitsXMLSerializer() -- cgit From 9df1fe10f39d4b1b4753a5c5366e68dd1efa9e77 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 25 Aug 2011 17:04:26 -0500 Subject: altered fake network model --- nova/tests/fake_network_info.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nova/tests/fake_network_info.py b/nova/tests/fake_network_info.py index b05cf7c95..47f9abd05 100644 --- a/nova/tests/fake_network_info.py +++ b/nova/tests/fake_network_info.py @@ -42,10 +42,13 @@ def fake_network(n, ipv6=None): 'injected': False, 'multi_host': False, 'cidr': '192.168.%d.0/24' % n, + 'cidr_v6': None, 'netmask': '255.255.255.0', + 'netmask_v6': None, 'bridge': 'fake_br%d' % n, 'bridge_interface': 'fake_eth%d' % n, 'gateway': '192.168.%d.1' % n, + 'gateway_v6': None, 'broadcast': '192.168.%d.255' % n, 'dns1': '192.168.%d.3' % n, 'dns2': '192.168.%d.4' % n, -- cgit From bebc02efbf4f049efeb4e1f72a21a8fdc825903a Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Thu, 25 Aug 2011 18:08:46 -0400 Subject: pep8 --- nova/tests/api/openstack/test_limits.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py index a3bcecc3a..888f62697 100644 --- a/nova/tests/api/openstack/test_limits.py +++ b/nova/tests/api/openstack/test_limits.py @@ -1028,14 +1028,15 @@ class LimitsXMLSerializationTest(test.TestCase): for i in range(len(rates)): rate = rates[i] for key in ['uri', 'regex']: - self.assertEqual(rate.get(key), str(fixture['limits']['rate'][i][key])) + self.assertEqual(rate.get(key), + str(fixture['limits']['rate'][i][key])) rate_limits = rate.findall('{0}limit'.format(NS)) for z in range(len(rate_limits)): limit = rate_limits[z] for key in ['verb', 'value', 'remaining', 'unit', 'next-available']: self.assertEqual(limit.get(key), - str(fixture['limits']['rate'][i]['limit'][z][key])) + str(fixture['limits']['rate'][i]['limit'][z][key])) def test_index_no_limits(self): serializer = limits.LimitsXMLSerializer() -- cgit From bcdec7da59ade484d370fb4a605e4f6926038252 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Thu, 25 Aug 2011 18:10:43 -0400 Subject: updated additional limits test --- nova/tests/api/openstack/test_limits.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py index 888f62697..250d97101 100644 --- a/nova/tests/api/openstack/test_limits.py +++ b/nova/tests/api/openstack/test_limits.py @@ -1046,13 +1046,16 @@ class LimitsXMLSerializationTest(test.TestCase): "absolute": {}}} output = serializer.serialize(fixture, 'index') - actual = minidom.parseString(output.replace(" ", "")) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'limits') - expected = minidom.parseString(""" - - - - - """.replace(" ", "")) + #verify absolute limits + absolute = root.find('{0}absolute'.format(NS)) + absolutes = absolute.findall('limit'.format(NS)) + self.assertEqual(len(absolutes), 0) - self.assertEqual(expected.toxml(), actual.toxml()) + #verify rate limits + rate_root = root.find('{0}rates'.format(NS)) + rates = rate_root.findall('{0}rate'.format(NS)) + self.assertEqual(len(rates), 0) -- cgit From e4966cc21ca34380be98a9f24c76404ca43f663f Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Thu, 25 Aug 2011 18:31:41 -0400 Subject: updated LimitsXMLSerializer to use etree and supply the xml declaration --- nova/api/openstack/limits.py | 81 ++++++++++++++++----------------- nova/tests/api/openstack/test_limits.py | 12 +++++ 2 files changed, 52 insertions(+), 41 deletions(-) diff --git a/nova/api/openstack/limits.py b/nova/api/openstack/limits.py index 86afa3b62..5ee9a05b0 100644 --- a/nova/api/openstack/limits.py +++ b/nova/api/openstack/limits.py @@ -20,6 +20,7 @@ Module dedicated functions/classes dealing with rate limiting requests. import copy import httplib import json +from lxml import etree import math import re import time @@ -38,6 +39,7 @@ from nova.api.openstack import common from nova.api.openstack import faults from nova.api.openstack.views import limits as limits_views from nova.api.openstack import wsgi +from nova.api.openstack import xmlutil # Convenience constants for the limits dictionary passed to Limiter(). @@ -81,52 +83,49 @@ class LimitsXMLSerializer(wsgi.XMLDictSerializer): xmlns = wsgi.XMLNS_V11 + NSMAP = {None: xmlutil.XMLNS_V11, 'atom': xmlutil.XMLNS_ATOM} + def __init__(self): pass - def _create_rates_node(self, xml_doc, rates): - rates_node = xml_doc.createElement('rates') - for rate in rates: - rate_node = xml_doc.createElement('rate') - rate_node.setAttribute('uri', rate['uri']) - rate_node.setAttribute('regex', rate['regex']) - - for limit in rate['limit']: - limit_node = xml_doc.createElement('limit') - limit_node.setAttribute('value', str(limit['value'])) - limit_node.setAttribute('verb', limit['verb']) - limit_node.setAttribute('remaining', str(limit['remaining'])) - limit_node.setAttribute('unit', limit['unit']) - limit_node.setAttribute('next-available', - str(limit['next-available'])) - rate_node.appendChild(limit_node) - - rates_node.appendChild(rate_node) - return rates_node - - def _create_absolute_node(self, xml_doc, absolutes): - absolute_node = xml_doc.createElement('absolute') - for key, value in absolutes.iteritems(): - limit_node = xml_doc.createElement('limit') - limit_node.setAttribute('name', key) - limit_node.setAttribute('value', str(value)) - absolute_node.appendChild(limit_node) - return absolute_node - - def _limits_to_xml(self, xml_doc, limits): - limits_node = xml_doc.createElement('limits') - rates_node = self._create_rates_node(xml_doc, limits['rate']) - limits_node.appendChild(rates_node) - - absolute_node = self._create_absolute_node(xml_doc, limits['absolute']) - limits_node.appendChild(absolute_node) - - return limits_node + def _create_rates_node(self, rates_dict): + rates_elem = etree.Element('rates', nsmap=self.NSMAP) + for rate in rates_dict.items(): + rate_node = etree.SubElement(rates_elem, 'rate') + rate_node.set('uri', rate['uri']) + rate_node.set('regex', rate['regex']) + for limit in rate['limits']: + limit_elem = etree.SubElement(rate_node, 'limit') + limit_elem.set('value', str(rate['value'])) + limit_elem.set('verb', str(rate['verb'])) + limit_elem.set('remaining', str(rate['remaining'])) + limit_elem.set('unit', str(rate['unit'])) + limit_elem.set('next-available', str(rate['next-available'])) + return rates_elem + + def _create_absolute_node(self, absolute_dict): + absolute_elem = etree.Element('absolute', nsmap=self.NSMAP) + for key, value in absolute_dict.items(): + limit_elem = etree.SubElement(rate_node, 'limit') + limit_elem.set('name', str(key)) + limit_elem.set('value', str(value)) + return absolute_elem + + def _populate_limits(self, limits_elem, limits_dict): + """Populate a limits xml element from a dict.""" + + rates_elem = self._create_rates_node( + limits_dict.get('rates', {})) + limits_elem.append(rates_elem) + + absolutes_elem = self._create_absolute_node( + limits_dict.get('absolutes', {})) + limits_elem.append(absolutes_elem) def index(self, limits_dict): - xml_doc = minidom.Document() - node = self._limits_to_xml(xml_doc, limits_dict['limits']) - return self.to_xml_string(node, False) + limits = etree.Element('limits', nsmap=self.NSMAP) + self._populate_limits(limits, limits_dict) + return self._to_xml(limits) def create_resource(version='1.0'): diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py index 250d97101..f71d9c454 100644 --- a/nova/tests/api/openstack/test_limits.py +++ b/nova/tests/api/openstack/test_limits.py @@ -984,6 +984,18 @@ class LimitsXMLSerializationTest(test.TestCase): def tearDown(self): pass + def test_xml_declaration(self): + serializer = limits.LimitsXMLSerializer() + + fixture = {"limits": { + "rate": [], + "absolute": {}}} + + output = serializer.serialize(fixture, 'index') + print output + has_dec = output.startswith("") + self.assertTrue(has_dec) + def test_index(self): serializer = limits.LimitsXMLSerializer() fixture = {"limits": { -- cgit From 2e7447a9ca180b68b49984d57e224e7fdd6d27e2 Mon Sep 17 00:00:00 2001 From: Brad Hall Date: Thu, 25 Aug 2011 17:13:09 -0700 Subject: Minor fixes --- nova/network/quantum/manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/network/quantum/manager.py b/nova/network/quantum/manager.py index f712a93c4..f46eaf2f3 100644 --- a/nova/network/quantum/manager.py +++ b/nova/network/quantum/manager.py @@ -59,7 +59,7 @@ class QuantumManager(manager.FlatManager): FLAGS.quantum_default_tenant_id quantum_net_id = bridge if quantum_net_id: - if not q_conn.network_exists(q_tenant_id, quantum_net_id): + if not self.q_conn.network_exists(q_tenant_id, quantum_net_id): raise Exception("Unable to find existing quantum " \ " network for tenant '%s' with net-id '%s'" % \ (q_tenant_id, quantum_net_id)) @@ -105,7 +105,7 @@ class QuantumManager(manager.FlatManager): # Create a port via quantum and attach the vif for (net_id, project_id) in net_proj_pairs: vif_rec = manager.FlatManager.add_virtual_interface(self, - context, instance, None) + context, instance_id, None) q_tenant_id = project_id or FLAGS.quantum_default_tenant_id self.q_conn.create_and_attach_port(q_tenant_id, net_id, -- cgit From 39169914aa43a911735267577e60bc977bcd5117 Mon Sep 17 00:00:00 2001 From: Alex Meade Date: Thu, 25 Aug 2011 21:50:18 -0400 Subject: Updated server and image XML serializers to take advantage of the addresses and metadata serializers --- nova/api/openstack/common.py | 9 ++++----- nova/api/openstack/images.py | 10 +++++----- nova/api/openstack/ips.py | 8 ++++---- nova/api/openstack/servers.py | 20 ++++++++------------ 4 files changed, 21 insertions(+), 26 deletions(-) diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index 1d1c56459..c3379cd11 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -290,7 +290,7 @@ class MetadataXMLSerializer(wsgi.XMLDictSerializer): def __init__(self, xmlns=wsgi.XMLNS_V11): super(MetadataXMLSerializer, self).__init__(xmlns=xmlns) - def _populate_metadata(self, metadata_elem, meta_dict): + def populate_metadata(self, metadata_elem, meta_dict): for (key, value) in meta_dict.items(): elem = etree.SubElement(metadata_elem, 'meta') elem.set('key', str(key)) @@ -301,21 +301,20 @@ class MetadataXMLSerializer(wsgi.XMLDictSerializer): (key, value) = meta_item_dict.items()[0] meta_elem.set('key', str(key)) meta_elem.text = value - return meta_elem def index(self, metadata_dict): metadata = etree.Element('metadata', nsmap=self.NSMAP) - self._populate_metadata(metadata, metadata_dict.get('metadata', {})) + self.populate_metadata(metadata, metadata_dict.get('metadata', {})) return self._to_xml(metadata) def create(self, metadata_dict): metadata = etree.Element('metadata', nsmap=self.NSMAP) - self._populate_metadata(metadata, metadata_dict.get('metadata', {})) + self.populate_metadata(metadata, metadata_dict.get('metadata', {})) return self._to_xml(metadata) def update_all(self, metadata_dict): metadata = etree.Element('metadata', nsmap=self.NSMAP) - self._populate_metadata(metadata, metadata_dict.get('metadata', {})) + self.populate_metadata(metadata, metadata_dict.get('metadata', {})) return self._to_xml(metadata) def show(self, meta_item_dict): diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 48c53d6d5..893674f21 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -209,13 +209,13 @@ class ImageXMLSerializer(wsgi.XMLDictSerializer): NSMAP = {None: xmlutil.XMLNS_V11, 'atom': xmlutil.XMLNS_ATOM} + def __init__(self): + self.metadata_serializer = common.MetadataXMLSerializer() + def _create_metadata_node(self, metadata_dict): metadata_elem = etree.Element('metadata', nsmap=self.NSMAP) - for (key, value) in metadata_dict.items(): - elem = etree.SubElement(metadata_elem, 'meta') - elem.set('key', key) - elem.text = value - + self.metadata_serializer.populate_metadata(metadata_elem, + metadata_dict) return metadata_elem def _create_server_node(self, server_dict): diff --git a/nova/api/openstack/ips.py b/nova/api/openstack/ips.py index d5a715dda..7e644ba04 100644 --- a/nova/api/openstack/ips.py +++ b/nova/api/openstack/ips.py @@ -109,12 +109,10 @@ class IPXMLSerializer(wsgi.XMLDictSerializer): def __init__(self, xmlns=wsgi.XMLNS_V11): super(IPXMLSerializer, self).__init__(xmlns=xmlns) - def _create_addresses_node(self, addresses_dict): - addresses_elem = etree.Element('addresses', nsmap=self.NSMAP) + def populate_addresses_node(self, addresses_elem, addresses_dict): for (network_id, ip_dicts) in addresses_dict.items(): network_elem = self._create_network_node(network_id, ip_dicts) addresses_elem.append(network_elem) - return addresses_elem def _create_network_node(self, network_id, ip_dicts): network_elem = etree.Element('network', nsmap=self.NSMAP) @@ -131,7 +129,9 @@ class IPXMLSerializer(wsgi.XMLDictSerializer): return self._to_xml(network) def index(self, addresses_dict): - addresses = self._create_addresses_node(addresses_dict['addresses']) + addresses = etree.Element('addresses', nsmap=self.NSMAP) + self.populate_addresses_node(addresses, + addresses_dict.get('addresses', {})) return self._to_xml(addresses) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index fa5b7c023..bb403845d 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -838,13 +838,14 @@ class ServerXMLSerializer(wsgi.XMLDictSerializer): NSMAP = {None: xmlutil.XMLNS_V11, 'atom': xmlutil.XMLNS_ATOM} + def __init__(self): + self.metadata_serializer = common.MetadataXMLSerializer() + self.addresses_serializer = ips.IPXMLSerializer() + def _create_metadata_node(self, metadata_dict): metadata_elem = etree.Element('metadata', nsmap=self.NSMAP) - for (key, value) in metadata_dict.items(): - elem = etree.SubElement(metadata_elem, 'meta') - elem.set('key', key) - elem.text = value - + self.metadata_serializer.populate_metadata(metadata_elem, + metadata_dict) return metadata_elem def _create_image_node(self, image_dict): @@ -869,13 +870,8 @@ class ServerXMLSerializer(wsgi.XMLDictSerializer): def _create_addresses_node(self, addresses_dict): addresses_elem = etree.Element('addresses', nsmap=self.NSMAP) - for (network_id, ip_dicts) in addresses_dict.items(): - network_node = etree.SubElement(addresses_elem, 'network') - network_node.set('id', str(network_id)) - for ip_dict in ip_dicts: - ip_elem = etree.SubElement(network_node, 'ip') - ip_elem.set('version', str(ip_dict['version'])) - ip_elem.set('addr', ip_dict['addr']) + self.addresses_serializer.populate_addresses_node(addresses_elem, + addresses_dict) return addresses_elem def _populate_server(self, server_elem, server_dict, detailed=False): -- cgit From 5823a72690155d9d69e4d23a81be2ea0945809dc Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Thu, 25 Aug 2011 21:09:15 -0700 Subject: add priority for static networks --- nova/network/quantum/fake.py | 13 ++++++----- nova/network/quantum/manager.py | 9 ++++---- nova/network/quantum/melange_ipam_lib.py | 30 ++++++++++++++++++++++--- nova/network/quantum/nova_ipam_lib.py | 22 ++++++++++-------- nova/tests/test_quantum.py | 38 ++++++++++++++------------------ 5 files changed, 69 insertions(+), 43 deletions(-) diff --git a/nova/network/quantum/fake.py b/nova/network/quantum/fake.py index ff2b1e9d5..a923bbf1a 100644 --- a/nova/network/quantum/fake.py +++ b/nova/network/quantum/fake.py @@ -102,8 +102,8 @@ class FakeQuantumIPAMLib(): self.subnets = {} def create_subnet(self, context, label, tenant_id, quantum_net_id, - cidr=None, gateway_v6=None, cidr_v6=None, - dns1=None, dns2=None): + priority, cidr=None, gateway_v6=None, + cidr_v6=None, dns1=None, dns2=None): if int(cidr.split("/")[1]) != 24: raise Exception("fake ipam_lib only supports /24s") v4_ips = [] @@ -116,6 +116,7 @@ class FakeQuantumIPAMLib(): "instance_id": None}) self.subnets[quantum_net_id] = {\ "label": label, + "priority": priority, "gateway": net_start + "1", "netmask": "255.255.255.0", "broadcast": net_start + "255", @@ -138,12 +139,14 @@ class FakeQuantumIPAMLib(): del self.subnets[net_id] def get_project_and_global_net_ids(self, context, project_id): - net_ids = [] + net_list = [] + id_priority_map = {} for nid, s in self.subnets.items(): if s['project_id'] == project_id or \ s['project_id'] == None: - net_ids.append((nid, s['project_id'])) - return net_ids + net_list.append((nid, s['project_id'])) + id_priority_map[nid] = s['priority'] + return sorted(net_list, key=lambda x: id_priority_map[x[0]]) def allocate_fixed_ip(self, context, tenant_id, quantum_net_id, vif_rec): subnet = self.subnets[quantum_net_id] diff --git a/nova/network/quantum/manager.py b/nova/network/quantum/manager.py index f712a93c4..2c2fd4dd7 100644 --- a/nova/network/quantum/manager.py +++ b/nova/network/quantum/manager.py @@ -59,7 +59,7 @@ class QuantumManager(manager.FlatManager): FLAGS.quantum_default_tenant_id quantum_net_id = bridge if quantum_net_id: - if not q_conn.network_exists(q_tenant_id, quantum_net_id): + if not self.q_conn.network_exists(q_tenant_id, quantum_net_id): raise Exception("Unable to find existing quantum " \ " network for tenant '%s' with net-id '%s'" % \ (q_tenant_id, quantum_net_id)) @@ -68,8 +68,9 @@ class QuantumManager(manager.FlatManager): quantum_net_id = self.q_conn.create_network(q_tenant_id, label) ipam_tenant_id = kwargs.get("project_id", None) + priority = kwargs.get("priority", 0) self.ipam.create_subnet(context, label, ipam_tenant_id, quantum_net_id, - cidr, gateway_v6, cidr_v6, dns1, dns2) + priority, cidr, gateway_v6, cidr_v6, dns1, dns2) def delete_network(self, context, fixed_range): project_id = context.project_id @@ -105,7 +106,7 @@ class QuantumManager(manager.FlatManager): # Create a port via quantum and attach the vif for (net_id, project_id) in net_proj_pairs: vif_rec = manager.FlatManager.add_virtual_interface(self, - context, instance, None) + context, instance_id, None) q_tenant_id = project_id or FLAGS.quantum_default_tenant_id self.q_conn.create_and_attach_port(q_tenant_id, net_id, @@ -212,7 +213,7 @@ class QuantumManager(manager.FlatManager): self.ipam.deallocate_ips_by_vif(context, ipam_tenant_id, net_id, vif_ref) - self.net_manager.db.virtual_interface_delete_by_instance(admin_context, + db.virtual_interface_delete_by_instance(admin_context, instance_id) self._do_trigger_security_group_members_refresh_for_instance( instance_id) diff --git a/nova/network/quantum/melange_ipam_lib.py b/nova/network/quantum/melange_ipam_lib.py index 46038c349..526be6327 100644 --- a/nova/network/quantum/melange_ipam_lib.py +++ b/nova/network/quantum/melange_ipam_lib.py @@ -36,7 +36,7 @@ class QuantumMelangeIPAMLib: self.m_conn = melange_connection.MelangeConnection() def create_subnet(self, context, label, project_id, - quantum_net_id, cidr=None, + quantum_net_id, priority, cidr=None, gateway_v6=None, cidr_v6=None, dns1=None, dns2=None): tenant_id = project_id or FLAGS.quantum_default_tenant_id @@ -49,6 +49,13 @@ class QuantumMelangeIPAMLib: project_id=tenant_id, dns1=dns1, dns2=dns2) + # create a entry in the network table just to store + # the priority order for this network + net = {"bridge": quantum_net_id, + "project_id": project_id, + "priority": priority} + network = self.db.network_create_safe(context, net) + def allocate_fixed_ip(self, context, project_id, quantum_net_id, vif_ref): tenant_id = project_id or FLAGS.quantum_default_tenant_id self.m_conn.allocate_ip(quantum_net_id, @@ -61,19 +68,26 @@ class QuantumMelangeIPAMLib: for b in all_blocks['ip_blocks']: if b['cidr'] == cidr: return b['network_id'] + raise Exception("No network found for cidr %s" % cidr) def delete_subnets_by_net_id(self, context, net_id, project_id): + admin_context = context.elevated() tenant_id = project_id or FLAGS.quantum_default_tenant_id all_blocks = self.m_conn.get_blocks(tenant_id) for b in all_blocks['ip_blocks']: if b['network_id'] == net_id: self.m_conn.delete_block(b['id'], tenant_id) + network = db.network_get_by_bridge(admin_context, net_id) + if network is not None: + db.network_delete_safe(context, network['id']) + # get all networks with this project_id, as well as all networks # where the project-id is not set (these are shared networks) def get_project_and_global_net_ids(self, context, project_id): + admin_context = context.elevated() id_proj_map = {} - if not project_id: + if project_id is None: raise Exception("get_project_and_global_net_ids must be called" \ " with a non-null project_id") tenant_id = project_id @@ -84,7 +98,17 @@ class QuantumMelangeIPAMLib: all_provider_blocks = self.m_conn.get_blocks(tenant_id) for b in all_provider_blocks['ip_blocks']: id_proj_map[b['network_id']] = tenant_id - return id_proj_map.items() + + id_priority_map = {} + network = db.network_get_by_bridge(admin_context, net_id) + for net_id, project_id in id_project_map.item(): + network = db.network_get_by_bridge(admin_context, net_id) + if network is None: + del id_proj_map[net_id] + else: + id_priority_map[net_id] = network['priority'] + return sorted(id_priority_map.items(), + key=lambda x: id_priority_map[x[0]]) # FIXME: there must be a more efficient way to do this, # talk to the melange folks diff --git a/nova/network/quantum/nova_ipam_lib.py b/nova/network/quantum/nova_ipam_lib.py index 0fc74fa49..a9a6dcb29 100644 --- a/nova/network/quantum/nova_ipam_lib.py +++ b/nova/network/quantum/nova_ipam_lib.py @@ -42,10 +42,9 @@ class QuantumNovaIPAMLib: self.net_manager = net_manager def create_subnet(self, context, label, tenant_id, - quantum_net_id, cidr=None, + quantum_net_id, priority, cidr=None, gateway_v6=None, cidr_v6=None, dns1=None, dns2=None): - print "creating subnet %s" % cidr admin_context = context.elevated() subnet_size = int(math.pow(2, (32 - int(cidr.split("/")[1])))) manager.FlatManager.create_networks(self.net_manager, @@ -53,9 +52,10 @@ class QuantumNovaIPAMLib: False, 1, subnet_size, cidr_v6, gateway_v6, quantum_net_id, None, dns1, dns2) - # now grab the network and update project_id + # now grab the network and update project_id + priority network = db.network_get_by_bridge(admin_context, quantum_net_id) - net = {"project_id": tenant_id} + net = {"project_id": tenant_id, + "priority": priority} db.network_update(admin_context, network['id'], net) def get_network_id_by_cidr(self, context, cidr, project_id): @@ -82,7 +82,13 @@ class QuantumNovaIPAMLib: admin_context = context.elevated() networks = db.project_get_networks(admin_context, project_id, False) networks.extend(db.project_get_networks(admin_context, None, False)) - return [(n['bridge'], n['project_id']) for n in networks] + id_priority_map = {} + net_list = [] + for n in networks: + net_id = n['bridge'] + net_list.append((net_id, n["project_id"])) + id_priority_map[net_id] = n['priority'] + return sorted(net_list, key=lambda x: id_priority_map[x[0]]) def allocate_fixed_ip(self, context, tenant_id, quantum_net_id, vif_rec): admin_context = context.elevated() @@ -104,8 +110,7 @@ class QuantumNovaIPAMLib: 'broadcast': n['broadcast'], 'netmask': n['netmask'], 'dns1': n['dns1'], - 'dns2': n['dns2'] - } + 'dns2': n['dns2']} subnet_data_v6 = { 'network_id': n['bridge'], 'cidr': n['cidr_v6'], @@ -113,8 +118,7 @@ class QuantumNovaIPAMLib: 'broadcast': None, 'netmask': None, 'dns1': None, - 'dns2': None - } + 'dns2': None} return (subnet_data_v4, subnet_data_v6) def get_v4_ips_by_interface(self, context, net_id, vif_id, project_id): diff --git a/nova/tests/test_quantum.py b/nova/tests/test_quantum.py index 378beb6ed..80cab950e 100644 --- a/nova/tests/test_quantum.py +++ b/nova/tests/test_quantum.py @@ -41,7 +41,8 @@ networks = [{'label': 'project1-net1', 'vlan': None, 'host': None, 'vpn_public_address': None, - 'project_id': 'fake_project1'}, + 'project_id': 'fake_project1', + 'priority': 1}, {'label': 'project2-net1', 'injected': False, 'multi_host': False, @@ -59,7 +60,7 @@ networks = [{'label': 'project1-net1', 'vlan': None, 'host': None, 'project_id': 'fake_project2', - 'vpn_public_address': '192.168.1.2'}, + 'priority': 1}, {'label': "public", 'injected': False, 'multi_host': False, @@ -76,8 +77,8 @@ networks = [{'label': 'project1-net1', 'dns2': '10.0.0.2', 'vlan': None, 'host': None, - 'vpn_public_address': None, - 'project_id': None}, + 'project_id': None, + 'priority': 0}, {'label': "project2-net2", 'injected': False, 'multi_host': False, @@ -94,8 +95,8 @@ networks = [{'label': 'project1-net1', 'dns2': '9.0.0.2', 'vlan': None, 'host': None, - 'vpn_public_address': None, - 'project_id': "fake_project2"}] + 'project_id': "fake_project2", + 'priority': 2}] # this is a base class to be used by all other Quantum Test classes @@ -114,7 +115,8 @@ class QuantumTestCaseBase(object): num_networks=1, network_size=256, cidr_v6=n['cidr_v6'], gateway_v6=n['gateway_v6'], bridge=None, bridge_interface=None, dns1=n['dns1'], - dns2=n['dns2'], project_id=n['project_id']) + dns2=n['dns2'], project_id=n['project_id'], + priority=n['priority']) def _delete_nets(self): for n in networks: @@ -138,29 +140,21 @@ class QuantumTestCaseBase(object): # we don't know which order the NICs will be in until we # introduce the notion of priority # v4 cidr - self.assertTrue(nw_info[0][0]['cidr'].startswith("10.") or \ - nw_info[1][0]['cidr'].startswith("10.")) - self.assertTrue(nw_info[0][0]['cidr'].startswith("192.") or \ - nw_info[1][0]['cidr'].startswith("192.")) + self.assertTrue(nw_info[0][0]['cidr'].startswith("10.")) + self.assertTrue(nw_info[1][0]['cidr'].startswith("192.")) # v4 address - self.assertTrue(nw_info[0][1]['ips'][0]['ip'].startswith("10.") or \ - nw_info[1][1]['ips'][0]['ip'].startswith("10.")) - self.assertTrue(nw_info[0][1]['ips'][0]['ip'].startswith("192.") or \ - nw_info[1][1]['ips'][0]['ip'].startswith("192.")) + self.assertTrue(nw_info[0][1]['ips'][0]['ip'].startswith("10.")) + self.assertTrue(nw_info[1][1]['ips'][0]['ip'].startswith("192.")) # v6 cidr - self.assertTrue(nw_info[0][0]['cidr_v6'].startswith("2001:1dba:") or \ - nw_info[1][0]['cidr_v6'].startswith("2001:1dba:")) - self.assertTrue(nw_info[0][0]['cidr_v6'].startswith("2001:1db8:") or \ - nw_info[1][0]['cidr_v6'].startswith("2001:1db8:")) + self.assertTrue(nw_info[0][0]['cidr_v6'].startswith("2001:1dba:")) + self.assertTrue(nw_info[1][0]['cidr_v6'].startswith("2001:1db8:")) # v6 address self.assertTrue(\ - nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1dba:") or \ - nw_info[1][1]['ip6s'][0]['ip'].startswith("2001:1dba:")) + nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1dba:")) self.assertTrue(\ - nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1db8:") or \ nw_info[1][1]['ip6s'][0]['ip'].startswith("2001:1db8:")) self.net_man.deallocate_for_instance(ctx, -- cgit From 16290908eaca73caa6b3f2ce36fb8add0b7d3615 Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Thu, 25 Aug 2011 22:45:24 -0700 Subject: fix for quantum api changes, change nova-mange to have quantum_list command --- bin/nova-manage | 23 +++++++++++++++++------ nova/network/quantum/client.py | 2 +- nova/network/quantum/quantum_connection.py | 4 ++-- 3 files changed, 20 insertions(+), 9 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 9819ef206..4628e93b2 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -773,7 +773,7 @@ class NetworkCommands(object): def list(self): """List all created networks""" _fmt = "%-5s\t%-18s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s"\ - "\t%-15s\t%-15s\t-15s\t-15s" + "\t%-15s\t%-15s" print _fmt % (_('id'), _('IPv4'), _('IPv6'), @@ -782,9 +782,7 @@ class NetworkCommands(object): _('DNS2'), _('VlanID'), _('project'), - _("uuid"), - _('priority'), - _('bridge')) + _("uuid")) for network in db.network_get_all(context.get_admin_context()): print _fmt % (network.id, network.cidr, @@ -794,9 +792,22 @@ class NetworkCommands(object): network.dns2, network.vlan, network.project_id, - network.uuid, + network.uuid) + + def quantum_list(self): + """List all created networks with Quantum-relevant fields""" + _fmt = "%-32s\t%-10s\t%-10s\t%s , %s" + print _fmt % ( _('bridge / quantum-id'), + _('project'), + _('priority'), + _('cidr_v4'), + _('cidr_v6')) + for network in db.network_get_all(context.get_admin_context()): + print _fmt % (network.bridge, + network.project_id, network.priority, - network.bridge) + network.cidr, + network.cidr_v6) @args('--network', dest="fixed_range", metavar='', help='Network to delete') diff --git a/nova/network/quantum/client.py b/nova/network/quantum/client.py index a0c7dc6d8..b57294c55 100644 --- a/nova/network/quantum/client.py +++ b/nova/network/quantum/client.py @@ -63,7 +63,7 @@ class Client(object): """A base client class - derived from Glance.BaseClient""" - action_prefix = '/v0.1/tenants/{tenant_id}' + action_prefix = '/v1.0/tenants/{tenant_id}' """Action query strings""" networks_path = "/networks" diff --git a/nova/network/quantum/quantum_connection.py b/nova/network/quantum/quantum_connection.py index 3aa017bcd..d6f749bf2 100644 --- a/nova/network/quantum/quantum_connection.py +++ b/nova/network/quantum/quantum_connection.py @@ -46,9 +46,9 @@ class QuantumClientConnection: logger=LOG) def create_network(self, tenant_id, network_name): - data = {'network': {'net-name': network_name}} + data = {'network': {'name': network_name}} resdict = self.client.create_network(data, tenant=tenant_id) - return resdict["networks"]["network"]["id"] + return resdict["network"]["id"] def delete_network(self, tenant_id, net_id): self.client.delete_network(net_id, tenant=tenant_id) -- cgit From e8d02ac1b5e9a45cc19992d232d4148f9db720ca Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Thu, 25 Aug 2011 23:02:46 -0700 Subject: rearrange imports --- bin/nova-manage | 2 +- .../migrate_repo/versions/041_add_network_priority.py | 5 ++--- nova/network/quantum/fake.py | 5 +++-- nova/network/quantum/manager.py | 2 +- nova/network/quantum/melange_connection.py | 1 + nova/network/quantum/melange_ipam_lib.py | 1 + nova/network/quantum/nova_ipam_lib.py | 4 ++-- nova/network/quantum/quantum_connection.py | 10 +++++----- nova/tests/test_quantum.py | 2 +- 9 files changed, 17 insertions(+), 15 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 4628e93b2..3dac2963c 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -1,4 +1,4 @@ -#!/usr/bin/env python +!/usr/bin/env python # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the diff --git a/nova/db/sqlalchemy/migrate_repo/versions/041_add_network_priority.py b/nova/db/sqlalchemy/migrate_repo/versions/041_add_network_priority.py index e619b1fcd..e69380199 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/041_add_network_priority.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/041_add_network_priority.py @@ -1,4 +1,4 @@ -# Copyright 2011 OpenStack LLC. +# Copyright 2011 Nicira, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -13,14 +13,13 @@ # License for the specific language governing permissions and limitations # under the License. -import datetime - from sqlalchemy import * from migrate import * from nova import log as logging from nova import utils + meta = MetaData() # Add priority column to networks table diff --git a/nova/network/quantum/fake.py b/nova/network/quantum/fake.py index a923bbf1a..3f6996e63 100644 --- a/nova/network/quantum/fake.py +++ b/nova/network/quantum/fake.py @@ -15,12 +15,13 @@ # License for the specific language governing permissions and limitations # under the License. +import math +from netaddr import IPNetwork + from nova import exception from nova import ipv6 from nova import log as logging from nova import utils -import math -from netaddr import IPNetwork LOG = logging.getLogger("network.quantum.fake") diff --git a/nova/network/quantum/manager.py b/nova/network/quantum/manager.py index 2c2fd4dd7..e49d0cfdc 100644 --- a/nova/network/quantum/manager.py +++ b/nova/network/quantum/manager.py @@ -20,10 +20,10 @@ from nova import exception from nova import flags from nova import log as logging from nova import manager -from nova import utils from nova.network import manager from nova.network.quantum import quantum_connection from nova.network.quantum import fake +from nova import utils LOG = logging.getLogger("quantum_manager") diff --git a/nova/network/quantum/melange_connection.py b/nova/network/quantum/melange_connection.py index b3955138d..2d884fa60 100644 --- a/nova/network/quantum/melange_connection.py +++ b/nova/network/quantum/melange_connection.py @@ -19,6 +19,7 @@ import httplib import socket import urllib import json + from nova import flags diff --git a/nova/network/quantum/melange_ipam_lib.py b/nova/network/quantum/melange_ipam_lib.py index 526be6327..5b58520c2 100644 --- a/nova/network/quantum/melange_ipam_lib.py +++ b/nova/network/quantum/melange_ipam_lib.py @@ -21,6 +21,7 @@ from nova import flags from nova import log as logging from nova.network.quantum import melange_connection + LOG = logging.getLogger("quantum_melange_ipam") FLAGS = flags.FLAGS diff --git a/nova/network/quantum/nova_ipam_lib.py b/nova/network/quantum/nova_ipam_lib.py index a9a6dcb29..b72edeeff 100644 --- a/nova/network/quantum/nova_ipam_lib.py +++ b/nova/network/quantum/nova_ipam_lib.py @@ -17,15 +17,15 @@ import math -#from nova import context from nova import db from nova import exception from nova import flags from nova import ipv6 from nova import log as logging -from nova import utils from nova.network import manager from nova.network.quantum import melange_connection as melange +from nova import utils + LOG = logging.getLogger("quantum_nova_ipam_lib") diff --git a/nova/network/quantum/quantum_connection.py b/nova/network/quantum/quantum_connection.py index d6f749bf2..7a96d23fd 100644 --- a/nova/network/quantum/quantum_connection.py +++ b/nova/network/quantum/quantum_connection.py @@ -17,9 +17,9 @@ from nova import flags from nova import log as logging +from nova.network.quantum import client as quantum_client from nova import utils -from nova.network.quantum.client import Client LOG = logging.getLogger("nova.network.quantum") FLAGS = flags.FLAGS @@ -40,10 +40,10 @@ flags.DEFINE_string('quantum_default_tenant_id', class QuantumClientConnection: def __init__(self): - self.client = Client(FLAGS.quantum_connection_host, - FLAGS.quantum_connection_port, - format="json", - logger=LOG) + self.client = quantum_client.Client(FLAGS.quantum_connection_host, + FLAGS.quantum_connection_port, + format="json", + logger=LOG) def create_network(self, tenant_id, network_name): data = {'network': {'name': network_name}} diff --git a/nova/tests/test_quantum.py b/nova/tests/test_quantum.py index 80cab950e..8ef22ac7e 100644 --- a/nova/tests/test_quantum.py +++ b/nova/tests/test_quantum.py @@ -19,8 +19,8 @@ from nova import context from nova import db from nova import exception from nova import log as logging -from nova import test from nova.network.quantum import manager as quantum_manager +from nova import test LOG = logging.getLogger('nova.tests.quantum_network') -- cgit From b66840327ad183619995bc9e88a0d4ea01ab0b59 Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Thu, 25 Aug 2011 23:08:15 -0700 Subject: replace accidental deletion in nova-mange --- bin/nova-manage | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/nova-manage b/bin/nova-manage index 3dac2963c..4628e93b2 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -1,4 +1,4 @@ -!/usr/bin/env python +#!/usr/bin/env python # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the -- cgit From 87a5fefe5b0e3379ef93fede0750ddd76dd3c20d Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Thu, 25 Aug 2011 23:18:38 -0700 Subject: Add brad to Authors file --- Authors | 1 + 1 file changed, 1 insertion(+) diff --git a/Authors b/Authors index 864679929..112791cfc 100644 --- a/Authors +++ b/Authors @@ -11,6 +11,7 @@ Antony Messerli Armando Migliaccio Arvind Somya Bilal Akhtar +Brad Hall Brian Lamar Brian Schott Brian Waldon -- cgit From a7e7be67a79fab4348f68c5738656f1ac401c39a Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 26 Aug 2011 12:24:35 -0500 Subject: misplaced comma... --- nova/network/manager.py | 2 +- nova/tests/fake_network_info.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index 9a4b09ad8..74e7bbf26 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -508,7 +508,7 @@ class NetworkManager(manager.SchedulerDependentManager): if network['cidr_v6']: info['ip6s'] = [ip6_dict()] - network_dict['cidr_v6'] = network['cidr_v6'], + network_dict['cidr_v6'] = network['cidr_v6'] # TODO(tr3buchet): handle ip6 routes here as well if network['gateway_v6']: info['gateway6'] = network['gateway_v6'] diff --git a/nova/tests/fake_network_info.py b/nova/tests/fake_network_info.py index 47f9abd05..81edd998b 100644 --- a/nova/tests/fake_network_info.py +++ b/nova/tests/fake_network_info.py @@ -60,6 +60,8 @@ def fake_network(n, ipv6=None): rval['cidr_v6'] = '2001:db8:0:%x::/64' % n rval['gateway_v6'] = '2001:db8:0:%x::1' % n rval['netmask_v6'] = '64' + print 'asdf %s' % rval['cidr_v6'] + print type(rval['cidr_v6']) return rval -- cgit From 79602b6d9d7f3ef9777cc0e5f7a0476e1f71ffa0 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 26 Aug 2011 12:26:09 -0500 Subject: forgot test print statements --- nova/tests/fake_network_info.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nova/tests/fake_network_info.py b/nova/tests/fake_network_info.py index 81edd998b..47f9abd05 100644 --- a/nova/tests/fake_network_info.py +++ b/nova/tests/fake_network_info.py @@ -60,8 +60,6 @@ def fake_network(n, ipv6=None): rval['cidr_v6'] = '2001:db8:0:%x::/64' % n rval['gateway_v6'] = '2001:db8:0:%x::1' % n rval['netmask_v6'] = '64' - print 'asdf %s' % rval['cidr_v6'] - print type(rval['cidr_v6']) return rval -- cgit From 2d3c516f5943efac0ab836818a759ba9291910b4 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 26 Aug 2011 12:28:14 -0500 Subject: added memory_mb to instance flavor test model --- nova/tests/fake_network_info.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/tests/fake_network_info.py b/nova/tests/fake_network_info.py index 47f9abd05..6feb1e8f8 100644 --- a/nova/tests/fake_network_info.py +++ b/nova/tests/fake_network_info.py @@ -80,7 +80,8 @@ def fixed_ips(num_networks, num_ips, num_floating_ips=0): flavor = {'id': 0, - 'rxtx_cap': 3} + 'rxtx_cap': 3, + 'memory_mb': 512} def floating_ips(fixed_ip_id): for i in xrange(154): -- cgit From f684e293b02a168e3e45d915645142229fdc7561 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 26 Aug 2011 12:29:44 -0500 Subject: added vcpus to instance flavor test model --- nova/tests/fake_network_info.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/tests/fake_network_info.py b/nova/tests/fake_network_info.py index 6feb1e8f8..d89ddbe9f 100644 --- a/nova/tests/fake_network_info.py +++ b/nova/tests/fake_network_info.py @@ -81,7 +81,8 @@ def fixed_ips(num_networks, num_ips, num_floating_ips=0): flavor = {'id': 0, 'rxtx_cap': 3, - 'memory_mb': 512} + 'memory_mb': 512, + 'vcpus': 2} def floating_ips(fixed_ip_id): for i in xrange(154): -- cgit From 9f0015c727986d17a14f905a1779cc31397a071f Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 26 Aug 2011 12:33:08 -0500 Subject: updated instance type fake model --- nova/tests/fake_network_info.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/nova/tests/fake_network_info.py b/nova/tests/fake_network_info.py index d89ddbe9f..a8e36c06b 100644 --- a/nova/tests/fake_network_info.py +++ b/nova/tests/fake_network_info.py @@ -80,9 +80,14 @@ def fixed_ips(num_networks, num_ips, num_floating_ips=0): flavor = {'id': 0, - 'rxtx_cap': 3, + 'name': 'fake_flavor', 'memory_mb': 512, - 'vcpus': 2} + 'vcpus': 2, + 'local_gb': 10, + 'flavor_id': 0, + 'swap': 0, + 'rxtx_quota': 0, + 'rxtx_cap': 3} def floating_ips(fixed_ip_id): for i in xrange(154): -- cgit From 49d24725031d7ebfb1a90517bcead02c09a4ebaf Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 26 Aug 2011 12:38:20 -0500 Subject: update libvirt --- nova/virt/libvirt/firewall.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py index c2f4f91e8..0db10c7ce 100644 --- a/nova/virt/libvirt/firewall.py +++ b/nova/virt/libvirt/firewall.py @@ -338,8 +338,8 @@ class NWFilterFirewall(FirewallDriver): 'nova-allow-dhcp-server'] if FLAGS.use_ipv6: - networks = [network for (network, _m) in network_info if - network['gateway_v6']] + networks = [network for (network, info) in network_info if + info['gateway6']] if networks: instance_secgroup_filter_children.\ -- cgit From fe74d65f7a8ccba26a03c72edbadf4b00e9a5294 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 26 Aug 2011 13:26:33 -0500 Subject: update libvirt tests --- nova/tests/test_libvirt.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 06ad7d13a..02b2f0cfe 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -1377,9 +1377,9 @@ class NWFilterTestCase(test.TestCase): _setup_networking(instance_ref['id'], self.test_ip) - def _ensure_all_called(): + def _ensure_all_called(mac): instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'], - 'fake') + mac.translate(None, ':') secgroup_filter = 'nova-secgroup-%s' % self.security_group['id'] for required in [secgroup_filter, 'allow-dhcp-server', 'no-arp-spoofing', 'no-ip-spoofing', @@ -1396,10 +1396,15 @@ class NWFilterTestCase(test.TestCase): instance = db.instance_get(self.context, inst_id) network_info = _fake_network_info(self.stubs, 1) + # since there is one (network_info) there is one vif + # pass this vif's mac to _ensure_all_called() + # to set the instance_filter properly + mac = network_info[0][1]['mac'] + self.fw.setup_basic_filtering(instance, network_info) self.fw.prepare_instance_filter(instance, network_info) self.fw.apply_instance_filter(instance, network_info) - _ensure_all_called() + _ensure_all_called(mac) self.teardown_security_group() db.instance_destroy(context.get_admin_context(), instance_ref['id']) -- cgit From 2fdf2d3fb05f58d1778671830eab111328f624fc Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 26 Aug 2011 13:28:17 -0500 Subject: forgot ) --- nova/tests/test_libvirt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 02b2f0cfe..ded5a99ab 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -1379,7 +1379,7 @@ class NWFilterTestCase(test.TestCase): def _ensure_all_called(mac): instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'], - mac.translate(None, ':') + mac.translate(None, ':')) secgroup_filter = 'nova-secgroup-%s' % self.security_group['id'] for required in [secgroup_filter, 'allow-dhcp-server', 'no-arp-spoofing', 'no-ip-spoofing', -- cgit From 3ae6943354a8c22e65233bca38f38c3521b1cce4 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 26 Aug 2011 13:32:16 -0500 Subject: updated fake values --- nova/tests/fake_network_info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/fake_network_info.py b/nova/tests/fake_network_info.py index a8e36c06b..079cf539b 100644 --- a/nova/tests/fake_network_info.py +++ b/nova/tests/fake_network_info.py @@ -81,7 +81,7 @@ def fixed_ips(num_networks, num_ips, num_floating_ips=0): flavor = {'id': 0, 'name': 'fake_flavor', - 'memory_mb': 512, + 'memory_mb': 2048, 'vcpus': 2, 'local_gb': 10, 'flavor_id': 0, -- cgit From 698e6052d70bd6da4b6b1d2cfcf096ea576c36a6 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 26 Aug 2011 13:37:25 -0500 Subject: updated fake values --- nova/tests/test_libvirt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index ded5a99ab..6947a70b0 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -1120,7 +1120,7 @@ class IptablesFirewallTestCase(test.TestCase): network_info = _fake_network_info(self.stubs, 1) rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info) self.assertEquals(len(rulesv4), 2) - self.assertEquals(len(rulesv6), 3) + self.assertEquals(len(rulesv6), 1) def test_filters_for_instance_without_ip_v6(self): self.flags(use_ipv6=False) -- cgit From 02c501391ad9f6bb55771053418f10301e644c8f Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 26 Aug 2011 13:44:26 -0500 Subject: updated fake values --- nova/tests/test_libvirt.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 6947a70b0..1b09a3302 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -1130,11 +1130,14 @@ class IptablesFirewallTestCase(test.TestCase): self.assertEquals(len(rulesv6), 0) def test_multinic_iptables(self): - ipv4_rules_per_network = 2 - ipv6_rules_per_network = 3 + ipv4_rules_per_addr = 1 + ipv4_addr_per_network = 2 + ipv6_rules_per_addr = 1 + ipv6_addr_per_network = 1 networks_count = 5 instance_ref = self._create_instance_ref() - network_info = _fake_network_info(self.stubs, networks_count) + network_info = _fake_network_info(self.stubs, networks_count, + ipv4_addr_per_network) ipv4_len = len(self.fw.iptables.ipv4['filter'].rules) ipv6_len = len(self.fw.iptables.ipv6['filter'].rules) inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref, @@ -1145,9 +1148,9 @@ class IptablesFirewallTestCase(test.TestCase): ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len self.assertEquals(ipv4_network_rules, - ipv4_rules_per_network * networks_count) + ipv4_rules_per_addr * ipv4_addr_per_network * networks_count) self.assertEquals(ipv6_network_rules, - ipv6_rules_per_network * networks_count) + ipv6_rules_per_addr * ipv4_addr_per_network * networks_count) def test_do_refresh_security_group_rules(self): instance_ref = self._create_instance_ref() -- cgit From adf03e7274636cd58d9c04461896a50f43934e15 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 26 Aug 2011 13:47:54 -0500 Subject: updated fake values --- nova/tests/test_libvirt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 1b09a3302..d8bc9575b 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -1150,7 +1150,7 @@ class IptablesFirewallTestCase(test.TestCase): self.assertEquals(ipv4_network_rules, ipv4_rules_per_addr * ipv4_addr_per_network * networks_count) self.assertEquals(ipv6_network_rules, - ipv6_rules_per_addr * ipv4_addr_per_network * networks_count) + ipv6_rules_per_addr * ipv6_addr_per_network * networks_count) def test_do_refresh_security_group_rules(self): instance_ref = self._create_instance_ref() -- cgit From 477510214e80be21a40bff6c053090793653f6a1 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 26 Aug 2011 15:09:39 -0500 Subject: couple of pep8s --- nova/tests/fake_network_info.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/tests/fake_network_info.py b/nova/tests/fake_network_info.py index 079cf539b..2e8cf60ec 100644 --- a/nova/tests/fake_network_info.py +++ b/nova/tests/fake_network_info.py @@ -89,6 +89,7 @@ flavor = {'id': 0, 'rxtx_quota': 0, 'rxtx_cap': 3} + def floating_ips(fixed_ip_id): for i in xrange(154): yield {'id': 0, -- cgit From d0f42b4bf6e60485c7bebe1c60dccce48a0ddcb3 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 26 Aug 2011 16:55:52 -0500 Subject: stubbed some stuff in test_libvirt --- nova/tests/test_libvirt.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index d8bc9575b..58e78daf4 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -1063,10 +1063,17 @@ class IptablesFirewallTestCase(test.TestCase): return '', '' print cmd, kwargs + def get_fixed_ips(*args, **kwargs): + ips = [] + for network, info in network_info: + ips.extend(info['ips']) + return [ip['ip'] for ip in ips] + from nova.network import linux_net linux_net.iptables_manager.execute = fake_iptables_execute network_info = _fake_network_info(self.stubs, 1) + self.stubs.Set(self.db, 'instance_get_fixed_addresses', get_fixed_ips) self.fw.prepare_instance_filter(instance_ref, network_info) self.fw.apply_instance_filter(instance_ref, network_info) -- cgit From 9737731fff7f6d4d19213b453315610bd095a0e6 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 26 Aug 2011 17:00:06 -0500 Subject: updated libvirt test --- nova/tests/test_libvirt.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 58e78daf4..2ff6b3953 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -1073,7 +1073,7 @@ class IptablesFirewallTestCase(test.TestCase): linux_net.iptables_manager.execute = fake_iptables_execute network_info = _fake_network_info(self.stubs, 1) - self.stubs.Set(self.db, 'instance_get_fixed_addresses', get_fixed_ips) + self.stubs.Set(db, 'instance_get_fixed_addresses', get_fixed_ips) self.fw.prepare_instance_filter(instance_ref, network_info) self.fw.apply_instance_filter(instance_ref, network_info) @@ -1111,10 +1111,11 @@ class IptablesFirewallTestCase(test.TestCase): self.assertTrue(len(filter(regex.match, self.out_rules)) > 0, "ICMP Echo Request acceptance rule wasn't added") - regex = re.compile('-A .* -j ACCEPT -p tcp -m multiport ' - '--dports 80:81 -s %s' % (src_ip,)) - self.assertTrue(len(filter(regex.match, self.out_rules)) > 0, - "TCP port 80/81 acceptance rule wasn't added") + for ip in get_fixed_ips(): + regex = re.compile('-A .* -j ACCEPT -p tcp -m multiport ' + '--dports 80:81 -s %s' % ip) + self.assertTrue(len(filter(regex.match, self.out_rules)) > 0, + "TCP port 80/81 acceptance rule wasn't added") regex = re.compile('-A .* -j ACCEPT -p tcp ' '-m multiport --dports 80:81 -s 192.168.10.0/24') -- cgit From 156430b2d795833245c069f83c435a8a240556fd Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 26 Aug 2011 17:04:30 -0500 Subject: removed self.test ip and _setup_networking from libvirt --- nova/tests/test_libvirt.py | 64 +++------------------------------------------- 1 file changed, 3 insertions(+), 61 deletions(-) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 2ff6b3953..fe5470a6f 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -41,57 +41,14 @@ from nova.tests import fake_network_info libvirt = None FLAGS = flags.FLAGS +_fake_network_info = fake_network_info.fake_get_instance_nw_info +_ipv4_like = fake_network_info.ipv4_like + def _concurrency(wait, done, target): wait.wait() done.send() -_fake_network_info = fake_network_info.fake_get_instance_nw_info -_ipv4_like = fake_network_info.ipv4_like -def _create_network_info(count=1, ipv6=None): - if ipv6 is None: - ipv6 = FLAGS.use_ipv6 - fake = 'fake' - fake_ip = '10.11.12.13' - fake_ip_2 = '0.0.0.1' - fake_ip_3 = '0.0.0.1' - fake_vlan = 100 - fake_bridge_interface = 'eth0' - network = {'bridge': fake, - 'cidr': fake_ip, - 'cidr_v6': fake_ip, - 'gateway_v6': fake, - 'vlan': fake_vlan, - 'bridge_interface': fake_bridge_interface} - mapping = {'mac': fake, - 'dhcp_server': '10.0.0.1', - 'gateway': fake, - 'gateway6': fake, - 'ips': [{'ip': fake_ip}, {'ip': fake_ip}]} - if ipv6: - mapping['ip6s'] = [{'ip': fake_ip}, - {'ip': fake_ip_2}, - {'ip': fake_ip_3}] - return [(network, mapping) for x in xrange(0, count)] - - -def _setup_networking(instance_id, ip='1.2.3.4', mac='56:12:12:12:12:12'): - ctxt = context.get_admin_context() - network_ref = db.project_get_networks(ctxt, - 'fake', - associate=True)[0] - vif = {'address': mac, - 'network_id': network_ref['id'], - 'instance_id': instance_id} - vif_ref = db.virtual_interface_create(ctxt, vif) - - fixed_ip = {'address': ip, - 'network_id': network_ref['id'], - 'virtual_interface_id': vif_ref['id']} - db.fixed_ip_create(ctxt, fixed_ip) - db.fixed_ip_update(ctxt, ip, {'allocated': True, - 'instance_id': instance_id}) - class CacheConcurrencyTestCase(test.TestCase): def setUp(self): @@ -164,7 +121,6 @@ class LibvirtConnTestCase(test.TestCase): self.context = context.get_admin_context() self.flags(instances_path='') self.call_libvirt_dependant_setup = False - self.test_ip = '10.11.12.13' test_instance = {'memory_kb': '1024000', 'basepath': '/some/path', @@ -426,7 +382,6 @@ class LibvirtConnTestCase(test.TestCase): user_context = context.RequestContext(self.user_id, self.project_id) instance_ref = db.instance_create(user_context, instance) - _setup_networking(instance_ref['id'], self.test_ip) self.flags(libvirt_type='lxc') conn = connection.LibvirtConnection(True) @@ -458,8 +413,6 @@ class LibvirtConnTestCase(test.TestCase): network_ref = db.project_get_networks(context.get_admin_context(), self.project_id)[0] - _setup_networking(instance_ref['id'], self.test_ip) - type_uri_map = {'qemu': ('qemu:///system', [(lambda t: t.find('.').get('type'), 'qemu'), (lambda t: t.find('./os/type').text, 'hvm'), @@ -925,7 +878,6 @@ class IptablesFirewallTestCase(test.TestCase): """setup_basic_rules in nwfilter calls this.""" pass self.fake_libvirt_connection = FakeLibvirtConnection() - self.test_ip = '10.11.12.13' self.fw = firewall.IptablesFirewallDriver( get_connection=lambda: self.fake_libvirt_connection) @@ -989,10 +941,6 @@ class IptablesFirewallTestCase(test.TestCase): def test_static_filters(self): instance_ref = self._create_instance_ref() src_instance_ref = self._create_instance_ref() - src_ip = '10.11.12.14' - src_mac = '56:12:12:12:12:13' - _setup_networking(instance_ref['id'], self.test_ip, src_mac) - _setup_networking(src_instance_ref['id'], src_ip) admin_ctxt = context.get_admin_context() secgroup = db.security_group_create(admin_ctxt, @@ -1184,7 +1132,6 @@ class IptablesFirewallTestCase(test.TestCase): fakefilter.nwfilterLookupByName instance_ref = self._create_instance_ref() - _setup_networking(instance_ref['id'], self.test_ip) network_info = _fake_network_info(self.stubs, 1) self.fw.setup_basic_filtering(instance_ref, network_info) self.fw.prepare_instance_filter(instance_ref, network_info) @@ -1200,7 +1147,6 @@ class IptablesFirewallTestCase(test.TestCase): def test_provider_firewall_rules(self): # setup basic instance data instance_ref = self._create_instance_ref() - _setup_networking(instance_ref['id'], self.test_ip) # FRAGILE: peeks at how the firewall names chains chain_name = 'inst-%s' % instance_ref['id'] @@ -1270,7 +1216,6 @@ class NWFilterTestCase(test.TestCase): self.fake_libvirt_connection = Mock() - self.test_ip = '10.11.12.13' self.fw = firewall.NWFilterFirewall( lambda: self.fake_libvirt_connection) @@ -1386,8 +1331,6 @@ class NWFilterTestCase(test.TestCase): instance_ref = self._create_instance() inst_id = instance_ref['id'] - _setup_networking(instance_ref['id'], self.test_ip) - def _ensure_all_called(mac): instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'], mac.translate(None, ':')) @@ -1444,7 +1387,6 @@ class NWFilterTestCase(test.TestCase): instance = db.instance_get(self.context, inst_id) - _setup_networking(instance_ref['id'], self.test_ip) network_info = _fake_network_info(self.stubs, 1) self.fw.setup_basic_filtering(instance, network_info) self.fw.prepare_instance_filter(instance, network_info) -- cgit From 3a91fa89ea23d22cd34336aa9281a439579d4ce0 Mon Sep 17 00:00:00 2001 From: Brad Hall Date: Fri, 26 Aug 2011 15:48:53 -0700 Subject: Minor changes based on recent quantum changes --- nova/network/quantum/client.py | 3 ++- nova/network/quantum/manager.py | 3 ++- nova/network/quantum/quantum_connection.py | 6 +++--- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/nova/network/quantum/client.py b/nova/network/quantum/client.py index b57294c55..613369c7d 100644 --- a/nova/network/quantum/client.py +++ b/nova/network/quantum/client.py @@ -169,7 +169,8 @@ class Client(object): httplib.CREATED, httplib.ACCEPTED, httplib.NO_CONTENT): - return self.deserialize(data, status_code) + if data is not None and len(data): + return self.deserialize(data, status_code) else: raise Exception("Server returned error: %s" % res.read()) diff --git a/nova/network/quantum/manager.py b/nova/network/quantum/manager.py index e49d0cfdc..79bb65939 100644 --- a/nova/network/quantum/manager.py +++ b/nova/network/quantum/manager.py @@ -119,7 +119,8 @@ class QuantumManager(manager.FlatManager): def get_instance_nw_info(self, context, instance_id, instance_type_id, host): network_info = [] - project_id = context.project_id + instance = db.instance_get(context, instance_id) + project_id = instance.project_id admin_context = context.elevated() vifs = db.virtual_interface_get_by_instance(admin_context, diff --git a/nova/network/quantum/quantum_connection.py b/nova/network/quantum/quantum_connection.py index 7a96d23fd..c0eaad1fd 100644 --- a/nova/network/quantum/quantum_connection.py +++ b/nova/network/quantum/quantum_connection.py @@ -67,9 +67,9 @@ class QuantumClientConnection: (interface_id, net_id, tenant_id)) port_data = {'port': {'port-state': 'ACTIVE'}} resdict = self.client.create_port(net_id, port_data, tenant=tenant_id) - port_id = resdict["ports"]["port"]["id"] + port_id = resdict["port"]["id"] - attach_data = {'port': {'attachment-id': interface_id}} + attach_data = {'attachment': {'id': interface_id}} self.client.attach_resource(net_id, port_id, attach_data, tenant=tenant_id) @@ -92,6 +92,6 @@ class QuantumClientConnection: port_id = p["id"] port_get_resdict = self.client.show_port_attachment(net_id, port_id, tenant=tenant_id) - if attachment_id == port_get_resdict["attachment"]: + if attachment_id == port_get_resdict["attachment"]["id"]: return (net_id, port_id) return (None, None) -- cgit From e39ec75169ff3b7ac29212ca315ad213997a8cbc Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Sat, 27 Aug 2011 04:32:20 -0400 Subject: Updated VersionsXMLSerializer and corresponding tests to use lxml. --- nova/api/openstack/limits.py | 1 - nova/api/openstack/versions.py | 65 ++++++----- nova/api/openstack/views/versions.py | 2 +- nova/tests/api/openstack/test_versions.py | 176 ++++++++++++------------------ 4 files changed, 109 insertions(+), 135 deletions(-) diff --git a/nova/api/openstack/limits.py b/nova/api/openstack/limits.py index 5ee9a05b0..2896ac396 100644 --- a/nova/api/openstack/limits.py +++ b/nova/api/openstack/limits.py @@ -26,7 +26,6 @@ import re import time import urllib import webob.exc -from xml.dom import minidom from collections import defaultdict diff --git a/nova/api/openstack/versions.py b/nova/api/openstack/versions.py index e2f892fb6..16a4e8bfd 100644 --- a/nova/api/openstack/versions.py +++ b/nova/api/openstack/versions.py @@ -16,12 +16,14 @@ # under the License. from datetime import datetime +from lxml import etree import webob import webob.dec from xml.dom import minidom import nova.api.openstack.views.versions from nova.api.openstack import wsgi +from nova.api.openstack import xmlutil VERSIONS = { @@ -159,22 +161,6 @@ class VersionsRequestDeserializer(wsgi.RequestDeserializer): class VersionsXMLSerializer(wsgi.XMLDictSerializer): - #TODO(wwolf): this is temporary until we get rid of toprettyxml - # in the base class (XMLDictSerializer), which I plan to do in - # another branch - def to_xml_string(self, node, has_atom=False): - self._add_xmlns(node, has_atom) - return node.toxml(encoding='UTF-8') - - def _versions_to_xml(self, versions, name="versions", xmlns=None): - root = self._xml_doc.createElement(name) - root.setAttribute("xmlns", wsgi.XMLNS_V11) - root.setAttribute("xmlns:atom", wsgi.XMLNS_ATOM) - - for version in versions: - root.appendChild(self._create_version_node(version)) - - return root def _create_media_types(self, media_types): base = self._xml_doc.createElement('media-types') @@ -209,24 +195,45 @@ class VersionsXMLSerializer(wsgi.XMLDictSerializer): return version_node - def index(self, data): - self._xml_doc = minidom.Document() - node = self._versions_to_xml(data['versions']) + def _populate_version(self, version_node, version): + version_node.set('id', version['id']) + version_node.set('status', version['status']) + if 'updated' in version: + version_node.set('updated', version['updated']) + if 'media-types' in version: + media_types = etree.SubElement(version_node, 'media-types') + for mtype in version['media-types']: + elem = etree.SubElement(media_types, 'media-type') + elem.set('base', mtype['base']) + elem.set('type', mtype['type']) + for link in version.get('links', []): + elem = etree.SubElement(version_node, + '{%s}link' % xmlutil.XMLNS_ATOM) + elem.set('rel', link['rel']) + elem.set('href', link['href']) + if 'type' in link: + elem.set('type', link['type']) + + NSMAP = {None: xmlutil.XMLNS_V11, 'atom': xmlutil.XMLNS_ATOM} - return self.to_xml_string(node) + def index(self, data): + root = etree.Element('versions', nsmap=self.NSMAP) + for version in data['versions']: + version_elem = etree.SubElement(root, 'version') + self._populate_version(version_elem, version) + return etree.tostring(root, encoding='UTF-8') def show(self, data): - self._xml_doc = minidom.Document() - node = self._create_version_node(data['version'], True) - - return self.to_xml_string(node) + root = etree.Element('version', nsmap=self.NSMAP) + self._populate_version(root, data['version']) + return etree.tostring(root, encoding='UTF-8') def multi(self, data): - self._xml_doc = minidom.Document() - node = self._versions_to_xml(data['choices'], 'choices', - xmlns=wsgi.XMLNS_V11) - - return self.to_xml_string(node) + root = etree.Element('choices', nsmap=self.NSMAP) + for version in data['choices']: + version_elem = etree.SubElement(root, 'version') + self._populate_version(version_elem, version) + return etree.tostring(root, encoding='UTF-8') class VersionsAtomSerializer(wsgi.XMLDictSerializer): diff --git a/nova/api/openstack/views/versions.py b/nova/api/openstack/views/versions.py index 03da80818..1ac398706 100644 --- a/nova/api/openstack/views/versions.py +++ b/nova/api/openstack/views/versions.py @@ -52,7 +52,7 @@ class ViewBuilder(object): def build_versions(self, versions): version_objs = [] - for version in versions: + for version in sorted(versions.keys()): version = versions[version] version_objs.append({ "id": version['id'], diff --git a/nova/tests/api/openstack/test_versions.py b/nova/tests/api/openstack/test_versions.py index 1269f13c9..3b4396b1a 100644 --- a/nova/tests/api/openstack/test_versions.py +++ b/nova/tests/api/openstack/test_versions.py @@ -18,8 +18,7 @@ import json import stubout import webob -import xml.etree.ElementTree - +from lxml import etree from nova import context from nova import test @@ -28,6 +27,10 @@ from nova.api.openstack import versions from nova.api.openstack import views from nova.api.openstack import wsgi +NS = { + 'atom': 'http://www.w3.org/2005/Atom', + 'ns': 'http://docs.openstack.org/compute/api/v1.1' +} VERSIONS = { "v1.0": { "id": "v1.0", @@ -113,23 +116,23 @@ class VersionsTest(test.TestCase): versions = json.loads(res.body)["versions"] expected = [ { - "id": "v1.1", - "status": "CURRENT", + "id": "v1.0", + "status": "DEPRECATED", "updated": "2011-01-21T11:33:21Z", "links": [ { "rel": "self", - "href": "http://localhost/v1.1/", + "href": "http://localhost/v1.0/", }], }, { - "id": "v1.0", - "status": "DEPRECATED", + "id": "v1.1", + "status": "CURRENT", "updated": "2011-01-21T11:33:21Z", "links": [ { "rel": "self", - "href": "http://localhost/v1.0/", + "href": "http://localhost/v1.1/", }], }, ] @@ -233,48 +236,19 @@ class VersionsTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) self.assertEqual(res.content_type, "application/xml") - root = xml.etree.ElementTree.XML(res.body) - self.assertEqual(root.tag.split('}')[1], "version") - self.assertEqual(root.tag.split('}')[0].strip('{'), wsgi.XMLNS_V11) - children = list(root) - media_types = children[0] - media_type_nodes = list(media_types) - links = (children[1], children[2], children[3]) - - self.assertEqual(media_types.tag.split('}')[1], 'media-types') - for media_node in media_type_nodes: - self.assertEqual(media_node.tag.split('}')[1], 'media-type') - - expected = """ - - - - - - - - - - - - - """.replace(" ", "").replace("\n", "") % wsgi.XMLNS_V11 - - actual = res.body.replace(" ", "").replace("\n", "") - self.assertEqual(expected, actual) + version = etree.XML(res.body) + expected = VERSIONS['v1.0'] + self.assertTrue(version.xpath('/ns:version', namespaces=NS)) + media_types = version.xpath('ns:media-types/ns:media-type', + namespaces=NS) + self._compare_media_types(media_types, expected['media-types']) + for key in ['id', 'status', 'updated']: + self.assertEqual(version.get(key), expected[key]) + links = version.xpath('atom:link', namespaces=NS) + self._compare_links(links, + [{'rel': 'self', 'href': 'http://localhost/v1.0/'}] + + expected['links']) def test_get_version_1_1_detail_xml(self): req = webob.Request.blank('/v1.1/') @@ -282,35 +256,19 @@ class VersionsTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) self.assertEqual(res.content_type, "application/xml") - expected = """ - - - - - - - - - - - - - """.replace(" ", "").replace("\n", "") % wsgi.XMLNS_V11 - actual = res.body.replace(" ", "").replace("\n", "") - self.assertEqual(expected, actual) + version = etree.XML(res.body) + expected = VERSIONS['v1.1'] + self.assertTrue(version.xpath('/ns:version', namespaces=NS)) + media_types = version.xpath('ns:media-types/ns:media-type', + namespaces=NS) + self._compare_media_types(media_types, expected['media-types']) + for key in ['id', 'status', 'updated']: + self.assertEqual(version.get(key), expected[key]) + links = version.xpath('atom:link', namespaces=NS) + self._compare_links(links, + [{'rel': 'self', 'href': 'http://localhost/v1.1/'}] + + expected['links']) def test_get_version_list_xml(self): req = webob.Request.blank('/') @@ -319,21 +277,19 @@ class VersionsTest(test.TestCase): self.assertEqual(res.status_int, 200) self.assertEqual(res.content_type, "application/xml") - expected = """ - - - - - - - - """.replace(" ", "").replace("\n", "") % (wsgi.XMLNS_V11, - wsgi.XMLNS_ATOM) - - actual = res.body.replace(" ", "").replace("\n", "") + root = etree.XML(res.body) + self.assertTrue(root.xpath('/ns:versions', namespaces=NS)) + versions = root.xpath('ns:version', namespaces=NS) + self.assertEqual(len(versions), 2) - self.assertEqual(expected, actual) + for (i, v) in ((0, 'v1.0'), (1, 'v1.1')): + version = versions[i] + expected = VERSIONS[v] + for key in ['id', 'status', 'updated']: + self.assertEqual(version.get(key), expected[key]) + (link,) = version.xpath('atom:link', namespaces=NS) + self._compare_links(link, + [{'rel': 'self', 'href': 'http://localhost/%s/' % v}]) def test_get_version_1_0_detail_atom(self): req = webob.Request.blank('/v1.0/') @@ -427,21 +383,21 @@ class VersionsTest(test.TestCase): - http://localhost/v1.1/ - Version v1.1 + http://localhost/v1.0/ + Version v1.0 2011-01-21T11:33:21Z - + - Version v1.1 CURRENT (2011-01-21T11:33:21Z) + Version v1.0 DEPRECATED (2011-01-21T11:33:21Z) - http://localhost/v1.0/ - Version v1.0 + http://localhost/v1.1/ + Version v1.1 2011-01-21T11:33:21Z - + - Version v1.0 DEPRECATED (2011-01-21T11:33:21Z) + Version v1.1 CURRENT (2011-01-21T11:33:21Z) @@ -598,6 +554,18 @@ class VersionsTest(test.TestCase): self.assertDictMatch(expected, json.loads(res.body)) + def _compare_media_types(self, actual, expected): + for elem, data in zip(actual, expected): + self.assertEqual(elem.get('base'), data['base']) + self.assertEqual(elem.get('type'), data['type']) + + def _compare_links(self, actual, expected): + for elem, data in zip(actual, expected): + self.assertEqual(elem.get('rel'), data['rel']) + self.assertEqual(elem.get('href'), data['href']) + if 'type' in data: + self.assertEqual(elem.get('type'), data['type']) + class VersionsViewBuilderTests(test.TestCase): def test_view_builder(self): @@ -665,7 +633,7 @@ class VersionsSerializerTests(test.TestCase): serializer = versions.VersionsXMLSerializer() response = serializer.index(versions_data) - root = xml.etree.ElementTree.XML(response) + root = etree.XML(response) self.assertEqual(root.tag.split('}')[1], "versions") self.assertEqual(root.tag.split('}')[0].strip('{'), wsgi.XMLNS_V11) version = list(root)[0] @@ -703,7 +671,7 @@ class VersionsSerializerTests(test.TestCase): serializer = versions.VersionsXMLSerializer() response = serializer.multi(versions_data) - root = xml.etree.ElementTree.XML(response) + root = etree.XML(response) self.assertEqual(root.tag.split('}')[1], "choices") self.assertEqual(root.tag.split('}')[0].strip('{'), wsgi.XMLNS_V11) version = list(root)[0] @@ -770,7 +738,7 @@ class VersionsSerializerTests(test.TestCase): serializer = versions.VersionsXMLSerializer() response = serializer.show(version_data) - root = xml.etree.ElementTree.XML(response) + root = etree.XML(response) self.assertEqual(root.tag.split('}')[1], "version") self.assertEqual(root.tag.split('}')[0].strip('{'), wsgi.XMLNS_V11) @@ -812,7 +780,7 @@ class VersionsSerializerTests(test.TestCase): serializer = versions.VersionsAtomSerializer() response = serializer.index(versions_data) - root = xml.etree.ElementTree.XML(response) + root = etree.XML(response) self.assertEqual(root.tag.split('}')[1], "feed") self.assertEqual(root.tag.split('}')[0].strip('{'), "http://www.w3.org/2005/Atom") @@ -905,7 +873,7 @@ class VersionsSerializerTests(test.TestCase): serializer = versions.VersionsAtomSerializer() response = serializer.show(versions_data) - root = xml.etree.ElementTree.XML(response) + root = etree.XML(response) self.assertEqual(root.tag.split('}')[1], "feed") self.assertEqual(root.tag.split('}')[0].strip('{'), "http://www.w3.org/2005/Atom") -- cgit From d5b489383710605b10067550417a4e62a5f4f3e1 Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Sun, 28 Aug 2011 11:37:19 -0700 Subject: use 'uuid' field in networks table rather than 'bridge'. Specify project_id when creating instance in unit test --- bin/nova-manage | 16 +++++++++++----- nova/db/api.py | 5 +++++ nova/db/sqlalchemy/api.py | 13 +++++++++++++ nova/exception.py | 4 ++++ nova/network/quantum/manager.py | 5 +++-- nova/network/quantum/nova_ipam_lib.py | 30 +++++++++++++++++------------- nova/tests/test_quantum.py | 6 ++++-- 7 files changed, 57 insertions(+), 22 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 4628e93b2..0c2cee3ce 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -681,6 +681,8 @@ class NetworkCommands(object): help='Multi host') @args('--dns1', dest="dns1", metavar="", help='First DNS') @args('--dns2', dest="dns2", metavar="", help='Second DNS') + @args('--uuid', dest="net_uuid", metavar="", + help='Network UUID') @args('--project_id', dest="project_id", metavar="", help='Project id') @args('--priority', dest="priority", metavar="", @@ -689,7 +691,7 @@ class NetworkCommands(object): network_size=None, multi_host=None, vlan_start=None, vpn_start=None, fixed_range_v6=None, gateway_v6=None, bridge=None, bridge_interface=None, dns1=None, dns2=None, - project_id=None, priority=None): + project_id=None, priority=None, uuid=None): """Creates fixed ips for host by range""" # check for certain required inputs @@ -768,7 +770,8 @@ class NetworkCommands(object): dns1=dns1, dns2=dns2, project_id=project_id, - priority=priority) + priority=priority, + uuid=uuid) def list(self): """List all created networks""" @@ -797,13 +800,13 @@ class NetworkCommands(object): def quantum_list(self): """List all created networks with Quantum-relevant fields""" _fmt = "%-32s\t%-10s\t%-10s\t%s , %s" - print _fmt % ( _('bridge / quantum-id'), + print _fmt % ( _('uuid'), _('project'), _('priority'), _('cidr_v4'), _('cidr_v6')) for network in db.network_get_all(context.get_admin_context()): - print _fmt % (network.bridge, + print _fmt % (network.uuid, network.project_id, network.priority, network.cidr, @@ -811,12 +814,15 @@ class NetworkCommands(object): @args('--network', dest="fixed_range", metavar='', help='Network to delete') + @args('--uuid', dest="uuid", metavar='', + help='UUID of network to delete') def delete(self, fixed_range): """Deletes a network""" # delete the network net_manager = utils.import_object(FLAGS.network_manager) - net_manager.delete_network(context.get_admin_context(), fixed_range) + net_manager.delete_network(context.get_admin_context(), fixed_range, + uuid=None) diff --git a/nova/db/api.py b/nova/db/api.py index 9ff3a1c74..17ef0bd0b 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -708,6 +708,11 @@ def network_get_by_bridge(context, bridge): return IMPL.network_get_by_bridge(context, bridge) +def network_get_by_uuid(context, uuid): + """Get a network by uuid or raise if it does not exist.""" + return IMPL.network_get_by_uuid(context, uuid) + + def network_get_by_cidr(context, cidr): """Get a network by cidr or raise if it does not exist""" return IMPL.network_get_by_cidr(context, cidr) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index d96b951a1..80ce76e8f 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1846,6 +1846,19 @@ def network_get_by_bridge(context, bridge): return result +@require_admin_context +def network_get_by_uuid(context, uuid): + session = get_session() + result = session.query(models.Network).\ + filter_by(uuid=uuid).\ + filter_by(deleted=False).\ + first() + + if not result: + raise exception.NetworkNotFoundForUUID(uuid=uuid) + return result + + @require_admin_context def network_get_by_cidr(context, cidr): session = get_session() diff --git a/nova/exception.py b/nova/exception.py index 66740019b..8d6e84d74 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -411,6 +411,10 @@ class NetworkNotFoundForBridge(NetworkNotFound): message = _("Network could not be found for bridge %(bridge)s") +class NetworkNotFoundForUUID(NetworkNotFound): + message = _("Network could not be found for uuid %(uuid)s") + + class NetworkNotFoundForCidr(NetworkNotFound): message = _("Network could not be found with cidr %(cidr)s.") diff --git a/nova/network/quantum/manager.py b/nova/network/quantum/manager.py index 79bb65939..932bdd82f 100644 --- a/nova/network/quantum/manager.py +++ b/nova/network/quantum/manager.py @@ -51,13 +51,14 @@ class QuantumManager(manager.FlatManager): def create_networks(self, context, label, cidr, multi_host, num_networks, network_size, cidr_v6, gateway_v6, bridge, - bridge_interface, dns1=None, dns2=None, **kwargs): + bridge_interface, dns1=None, dns2=None, uuid=None, + **kwargs): if num_networks != 1: raise Exception("QuantumManager requires that only one" " network is created per call") q_tenant_id = kwargs["project_id"] or \ FLAGS.quantum_default_tenant_id - quantum_net_id = bridge + quantum_net_id = uuid if quantum_net_id: if not self.q_conn.network_exists(q_tenant_id, quantum_net_id): raise Exception("Unable to find existing quantum " \ diff --git a/nova/network/quantum/nova_ipam_lib.py b/nova/network/quantum/nova_ipam_lib.py index b72edeeff..5bca9d024 100644 --- a/nova/network/quantum/nova_ipam_lib.py +++ b/nova/network/quantum/nova_ipam_lib.py @@ -47,15 +47,19 @@ class QuantumNovaIPAMLib: dns1=None, dns2=None): admin_context = context.elevated() subnet_size = int(math.pow(2, (32 - int(cidr.split("/")[1])))) - manager.FlatManager.create_networks(self.net_manager, + networks = manager.FlatManager.create_networks(self.net_manager, admin_context, label, cidr, False, 1, subnet_size, cidr_v6, gateway_v6, quantum_net_id, None, dns1, dns2) - # now grab the network and update project_id + priority - network = db.network_get_by_bridge(admin_context, quantum_net_id) + if len(networks) != 1: + raise Exception("Error creating network entry") + + # now grab the network and update uuid, project_id, priority + network = networks[0] net = {"project_id": tenant_id, - "priority": priority} + "priority": priority, + "uuid": quantum_net_id} db.network_update(admin_context, network['id'], net) def get_network_id_by_cidr(self, context, cidr, project_id): @@ -64,11 +68,11 @@ class QuantumNovaIPAMLib: if not network: raise Exception("No network with fixed_range = %s" \ % fixed_range) - return network['bridge'] + return network['uuid'] def delete_subnets_by_net_id(self, context, net_id, project_id): admin_context = context.elevated() - network = db.network_get_by_bridge(admin_context, net_id) + network = db.network_get_by_uuid(admin_context, net_id) if not network: raise Exception("No network with net_id = %s" % net_id) manager.FlatManager.delete_network(self.net_manager, @@ -85,14 +89,14 @@ class QuantumNovaIPAMLib: id_priority_map = {} net_list = [] for n in networks: - net_id = n['bridge'] + net_id = n['uuid'] net_list.append((net_id, n["project_id"])) id_priority_map[net_id] = n['priority'] return sorted(net_list, key=lambda x: id_priority_map[x[0]]) def allocate_fixed_ip(self, context, tenant_id, quantum_net_id, vif_rec): admin_context = context.elevated() - network = db.network_get_by_bridge(admin_context, quantum_net_id) + network = db.network_get_by_uuid(admin_context, quantum_net_id) if network['cidr']: address = db.fixed_ip_associate_pool(admin_context, network['id'], @@ -102,9 +106,9 @@ class QuantumNovaIPAMLib: db.fixed_ip_update(admin_context, address, values) def get_subnets_by_net_id(self, context, tenant_id, net_id): - n = db.network_get_by_bridge(context.elevated(), net_id) + n = db.network_get_by_uuid(context.elevated(), net_id) subnet_data_v4 = { - 'network_id': n['bridge'], + 'network_id': n['uuid'], 'cidr': n['cidr'], 'gateway': n['gateway'], 'broadcast': n['broadcast'], @@ -112,7 +116,7 @@ class QuantumNovaIPAMLib: 'dns1': n['dns1'], 'dns2': n['dns2']} subnet_data_v6 = { - 'network_id': n['bridge'], + 'network_id': n['uuid'], 'cidr': n['cidr_v6'], 'gateway': n['gateway_v6'], 'broadcast': None, @@ -129,7 +133,7 @@ class QuantumNovaIPAMLib: def get_v6_ips_by_interface(self, context, net_id, vif_id, project_id): admin_context = context.elevated() - network = db.network_get_by_bridge(admin_context, net_id) + network = db.network_get_by_uuid(admin_context, net_id) vif_rec = db.virtual_interface_get_by_uuid(context, vif_id) if network['cidr_v6']: ip = ipv6.to_global(network['cidr_v6'], @@ -140,7 +144,7 @@ class QuantumNovaIPAMLib: def verify_subnet_exists(self, context, tenant_id, quantum_net_id): admin_context = context.elevated() - network = db.network_get_by_bridge(admin_context, quantum_net_id) + network = db.network_get_by_uuid(admin_context, quantum_net_id) def deallocate_ips_by_vif(self, context, tenant_id, net_id, vif_ref): try: diff --git a/nova/tests/test_quantum.py b/nova/tests/test_quantum.py index 8ef22ac7e..ae9900b67 100644 --- a/nova/tests/test_quantum.py +++ b/nova/tests/test_quantum.py @@ -129,7 +129,8 @@ class QuantumTestCaseBase(object): project_id = "fake_project1" ctx = context.RequestContext('user1', project_id) - instance_ref = db.api.instance_create(ctx, {}) + instance_ref = db.api.instance_create(ctx, + {"project_id": project_id}) nw_info = self.net_man.allocate_for_instance(ctx, instance_id=instance_ref['id'], host="", instance_type_id=instance_ref['instance_type_id'], @@ -173,7 +174,8 @@ class QuantumTestCaseBase(object): self.net_man.validate_networks(ctx, requested_networks) - instance_ref = db.api.instance_create(ctx, {}) + instance_ref = db.api.instance_create(ctx, + {"project_id": project_id}) nw_info = self.net_man.allocate_for_instance(ctx, instance_id=instance_ref['id'], host="", instance_type_id=instance_ref['instance_type_id'], -- cgit From 1bfc7ce80c3936a19434dfc45c44f8a1acfd65ed Mon Sep 17 00:00:00 2001 From: "danwent@gmail.com" <> Date: Sun, 28 Aug 2011 12:53:32 -0700 Subject: fix issue with setting 'Active' caused by Quantum API changes. Other misc fixes --- nova/network/quantum/quantum_connection.py | 6 +++--- nova/virt/libvirt/vif.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/network/quantum/quantum_connection.py b/nova/network/quantum/quantum_connection.py index c0eaad1fd..a13867af2 100644 --- a/nova/network/quantum/quantum_connection.py +++ b/nova/network/quantum/quantum_connection.py @@ -65,7 +65,7 @@ class QuantumClientConnection: def create_and_attach_port(self, tenant_id, net_id, interface_id): LOG.debug("Connecting interface %s to net %s for %s" % \ (interface_id, net_id, tenant_id)) - port_data = {'port': {'port-state': 'ACTIVE'}} + port_data = {'port': {'state': 'ACTIVE'}} resdict = self.client.create_port(net_id, port_data, tenant=tenant_id) port_id = resdict["port"]["id"] @@ -74,13 +74,13 @@ class QuantumClientConnection: tenant=tenant_id) def detach_and_delete_port(self, tenant_id, net_id, port_id): - LOG.debug("Deleteing port %s on net %s for %s" % \ + LOG.debug("Deleting port %s on net %s for %s" % \ (port_id, net_id, tenant_id)) self.client.detach_resource(net_id, port_id, tenant=tenant_id) self.client.delete_port(net_id, port_id, tenant=tenant_id) - # FIXME: this will be inefficient until API implements querying + # FIXME: (danwent) this will be inefficient until API implements querying def get_port_by_attachment(self, tenant_id, attachment_id): net_list_resdict = self.client.list_networks(tenant=tenant_id) diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py index 0b7438011..077c32474 100644 --- a/nova/virt/libvirt/vif.py +++ b/nova/virt/libvirt/vif.py @@ -101,7 +101,7 @@ class LibvirtOpenVswitchDriver(VIFDriver): """VIF driver for Open vSwitch.""" def get_dev_name(_self, iface_id): - return "tap-" + iface_id[0:15] + return "tap" + iface_id[0:11] def plug(self, instance, network, mapping): iface_id = mapping['vif_uuid'] -- cgit From 56891283f117997042363aee2e3ce00a5a12d9e0 Mon Sep 17 00:00:00 2001 From: "danwent@gmail.com" <> Date: Sun, 28 Aug 2011 15:39:27 -0700 Subject: update melange ipam lib to use network uuid, not bridge --- nova/network/quantum/melange_ipam_lib.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/nova/network/quantum/melange_ipam_lib.py b/nova/network/quantum/melange_ipam_lib.py index 5b58520c2..cfdcde045 100644 --- a/nova/network/quantum/melange_ipam_lib.py +++ b/nova/network/quantum/melange_ipam_lib.py @@ -52,7 +52,7 @@ class QuantumMelangeIPAMLib: # create a entry in the network table just to store # the priority order for this network - net = {"bridge": quantum_net_id, + net = {"uuid": quantum_net_id, "project_id": project_id, "priority": priority} network = self.db.network_create_safe(context, net) @@ -79,7 +79,7 @@ class QuantumMelangeIPAMLib: if b['network_id'] == net_id: self.m_conn.delete_block(b['id'], tenant_id) - network = db.network_get_by_bridge(admin_context, net_id) + network = db.network_get_by_uuid(admin_context, net_id) if network is not None: db.network_delete_safe(context, network['id']) @@ -101,9 +101,8 @@ class QuantumMelangeIPAMLib: id_proj_map[b['network_id']] = tenant_id id_priority_map = {} - network = db.network_get_by_bridge(admin_context, net_id) for net_id, project_id in id_project_map.item(): - network = db.network_get_by_bridge(admin_context, net_id) + network = db.network_get_by_uuid(admin_context, net_id) if network is None: del id_proj_map[net_id] else: @@ -111,8 +110,11 @@ class QuantumMelangeIPAMLib: return sorted(id_priority_map.items(), key=lambda x: id_priority_map[x[0]]) - # FIXME: there must be a more efficient way to do this, - # talk to the melange folks + # FIXME: (danwent) Melange actually returns the subnet info + # when we query for a particular interface. we may want to + # reworks the ipam_manager python API to let us take advantage of + # this, as right now we have to get all blocks and cycle through + # them. def get_subnets_by_net_id(self, context, project_id, net_id): subnet_v4 = None subnet_v6 = None -- cgit From 431cd5d17780aa7ea9d03b028a78ec4e20b22440 Mon Sep 17 00:00:00 2001 From: "danwent@gmail.com" <> Date: Sun, 28 Aug 2011 17:37:07 -0700 Subject: always set network_id in virtual_interfaces table, otherwise API commands that show IP addresses get confused --- nova/network/quantum/manager.py | 24 +++++++++++++++++++----- nova/network/quantum/melange_ipam_lib.py | 11 ++++++++--- nova/network/quantum/nova_ipam_lib.py | 2 +- 3 files changed, 28 insertions(+), 9 deletions(-) diff --git a/nova/network/quantum/manager.py b/nova/network/quantum/manager.py index 932bdd82f..fb13a8496 100644 --- a/nova/network/quantum/manager.py +++ b/nova/network/quantum/manager.py @@ -105,14 +105,28 @@ class QuantumManager(manager.FlatManager): project_id) # Create a port via quantum and attach the vif - for (net_id, project_id) in net_proj_pairs: + for (quantum_net_id, project_id) in net_proj_pairs: + + # FIXME: (danwent). We'd like to have the manager be completely + # decoupled from the nova networks table. + # However, other parts of nova sometimes go behind + # our back and access network data directly from the DB. So + # for now, the quantum manager knows that there is a nova + # networks DB table and accesses it here. + # updating the virtual_interfaces table to use UUIDs would + # be one solution, but this would require significant work + # elsewhere. + network_ref = db.network_get_by_uuid(context, quantum_net_id) + vif_rec = manager.FlatManager.add_virtual_interface(self, - context, instance_id, None) + context, instance_id, network_ref['id']) + # talk to Quantum API to create and attach port. q_tenant_id = project_id or FLAGS.quantum_default_tenant_id - self.q_conn.create_and_attach_port(q_tenant_id, net_id, + self.q_conn.create_and_attach_port(q_tenant_id, quantum_net_id, vif_rec['uuid']) - self.ipam.allocate_fixed_ip(context, project_id, net_id, vif_rec) + self.ipam.allocate_fixed_ip(context, project_id, quantum_net_id, + vif_rec) return self.get_instance_nw_info(context, instance_id, instance_type_id, host) @@ -173,7 +187,7 @@ class QuantumManager(manager.FlatManager): if v6_subnet['cidr']: network_dict['cidr_v6'] = v6_subnet['cidr'] info['ip6s'] = [ip_dict(ip, v6_subnet) for ip in v6_ips] - # TODO(tr3buchet): handle ip6 routes here as well + if v6_subnet['gateway']: info['gateway6'] = v6_subnet['gateway'] diff --git a/nova/network/quantum/melange_ipam_lib.py b/nova/network/quantum/melange_ipam_lib.py index cfdcde045..e2e09f139 100644 --- a/nova/network/quantum/melange_ipam_lib.py +++ b/nova/network/quantum/melange_ipam_lib.py @@ -50,11 +50,16 @@ class QuantumMelangeIPAMLib: project_id=tenant_id, dns1=dns1, dns2=dns2) - # create a entry in the network table just to store - # the priority order for this network + # create a entry in the network table, even though + # most data is stored in melange. This is used to + # store data not kept by melange (e.g., priority) + # and to 'fake' other parts of nova (e.g., the API) + # until we get get all accesses to be via the + # network manager API. net = {"uuid": quantum_net_id, "project_id": project_id, - "priority": priority} + "priority": priority, + "label": label} network = self.db.network_create_safe(context, net) def allocate_fixed_ip(self, context, project_id, quantum_net_id, vif_ref): diff --git a/nova/network/quantum/nova_ipam_lib.py b/nova/network/quantum/nova_ipam_lib.py index 5bca9d024..6e7e5d244 100644 --- a/nova/network/quantum/nova_ipam_lib.py +++ b/nova/network/quantum/nova_ipam_lib.py @@ -55,7 +55,7 @@ class QuantumNovaIPAMLib: if len(networks) != 1: raise Exception("Error creating network entry") - # now grab the network and update uuid, project_id, priority + # now grab the network and update additional fields network = networks[0] net = {"project_id": tenant_id, "priority": priority, -- cgit From 822d92ed1f6a5f2f0951c5e43be6ce0c8fb75e65 Mon Sep 17 00:00:00 2001 From: "danwent@gmail.com" <> Date: Sun, 28 Aug 2011 19:12:43 -0700 Subject: remove fake IPAM lib, since qmanager must now access nova DB directly --- nova/network/quantum/fake.py | 124 ------------------------------------------- nova/tests/test_quantum.py | 8 --- 2 files changed, 132 deletions(-) diff --git a/nova/network/quantum/fake.py b/nova/network/quantum/fake.py index 3f6996e63..00cdd8e08 100644 --- a/nova/network/quantum/fake.py +++ b/nova/network/quantum/fake.py @@ -91,127 +91,3 @@ class FakeQuantumClientConnection: return (net_id, port_id) return (None, None) - - -def get_ipam_lib(net_man): - return FakeQuantumIPAMLib() - - -class FakeQuantumIPAMLib(): - - def __init__(self): - self.subnets = {} - - def create_subnet(self, context, label, tenant_id, quantum_net_id, - priority, cidr=None, gateway_v6=None, - cidr_v6=None, dns1=None, dns2=None): - if int(cidr.split("/")[1]) != 24: - raise Exception("fake ipam_lib only supports /24s") - v4_ips = [] - net_start = cidr[0:cidr.rfind(".") + 1] - subnet_size = int(math.pow(2, (32 - int(cidr.split("/")[1])))) - for i in xrange(2, subnet_size - 1): - v4_ips.append({"ip": net_start + str(i), - "allocated": False, - "virtual_interface_id": None, - "instance_id": None}) - self.subnets[quantum_net_id] = {\ - "label": label, - "priority": priority, - "gateway": net_start + "1", - "netmask": "255.255.255.0", - "broadcast": net_start + "255", - "cidr": cidr, - "gateway_v6": gateway_v6, - "cidr_v6": cidr_v6, - "dns1": dns1, - "dns2": dns2, - "project_id": tenant_id, - "v4_ips": v4_ips} - - def get_network_id_by_cidr(self, context, cidr, project_id): - for net_id, s in self.subnets.items(): - if s['cidr'] == cidr or s['cidr_v6'] == cidr: - return net_id - return None - - def delete_subnets_by_net_id(self, context, net_id, project_id): - self.verify_subnet_exists(context, project_id, net_id) - del self.subnets[net_id] - - def get_project_and_global_net_ids(self, context, project_id): - net_list = [] - id_priority_map = {} - for nid, s in self.subnets.items(): - if s['project_id'] == project_id or \ - s['project_id'] == None: - net_list.append((nid, s['project_id'])) - id_priority_map[nid] = s['priority'] - return sorted(net_list, key=lambda x: id_priority_map[x[0]]) - - def allocate_fixed_ip(self, context, tenant_id, quantum_net_id, vif_rec): - subnet = self.subnets[quantum_net_id] - for i in xrange(0, len(subnet['v4_ips'])): - ip = subnet['v4_ips'][i] - if not ip['allocated']: - subnet['v4_ips'][i] = {'ip': ip['ip'], - 'allocated': True, - 'virtual_interface_id': vif_rec['uuid'], - 'instance_id': vif_rec['instance_id']} - return - raise Exception("Unable to find available IP for net '%s'" %\ - quantum_net_id) - - def get_subnets_by_net_id(self, context, tenant_id, net_id): - self.verify_subnet_exists(context, tenant_id, net_id) - - subnet_data = self.subnets[net_id] - subnet_data_v4 = { - 'network_id': net_id, - 'cidr': subnet_data['cidr'], - 'gateway': subnet_data['gateway'], - 'broadcast': subnet_data['broadcast'], - 'netmask': subnet_data['netmask'], - 'dns1': subnet_data['dns1'], - 'dns2': subnet_data['dns2']} - subnet_data_v6 = { - 'network_id': net_id, - 'cidr': subnet_data['cidr_v6'], - 'gateway': subnet_data['gateway_v6'], - 'broadcast': None, - 'netmask': None, - 'dns1': None, - 'dns2': None} - return (subnet_data_v4, subnet_data_v6) - - def get_v6_ips_by_interface(self, context, net_id, vif_id, project_id): - self.verify_subnet_exists(context, project_id, net_id) - - subnet_data = self.subnets[net_id] - if subnet_data['cidr_v6']: - ip = ipv6.to_global(subnet_data['cidr_v6'], - "ca:fe:de:ad:be:ef", - project_id) - return [ip] - return [] - - def get_v4_ips_by_interface(self, context, net_id, vif_id, project_id): - self.verify_subnet_exists(context, project_id, net_id) - - subnet_data = self.subnets[net_id] - for ip in subnet_data['v4_ips']: - if ip['virtual_interface_id'] == vif_id: - return [ip['ip']] - return [] - - def verify_subnet_exists(self, context, tenant_id, quantum_net_id): - if quantum_net_id not in self.subnets: - raise exception.NetworkNotFound(network_id=quantum_net_id) - - def deallocate_ips_by_vif(self, context, tenant_id, net_id, vif_ref): - s = self.subnets[net_id] - for ip in s['v4_ips']: - if ip['virtual_interface_id'] == vif_ref['id']: - ip['allocated'] = False - ip['instance_id'] = None - ip['virtual_interface_id'] = None diff --git a/nova/tests/test_quantum.py b/nova/tests/test_quantum.py index ae9900b67..e7a8a01fa 100644 --- a/nova/tests/test_quantum.py +++ b/nova/tests/test_quantum.py @@ -224,14 +224,6 @@ class QuantumTestCaseBase(object): self.net_man.validate_networks, ctx, [("", None)]) -class QuantumFakeIPAMTestCase(QuantumTestCaseBase, test.TestCase): - - def setUp(self): - super(QuantumFakeIPAMTestCase, self).setUp() - self.net_man = quantum_manager.QuantumManager( \ - ipam_lib="nova.network.quantum.fake") - - class QuantumNovaIPAMTestCase(QuantumTestCaseBase, test.TestCase): def setUp(self): -- cgit From 716303049eaee59841ca4679d73ecb4e5be52cfd Mon Sep 17 00:00:00 2001 From: "danwent@gmail.com" <> Date: Sun, 28 Aug 2011 19:13:02 -0700 Subject: add doc-strings for all major modules --- nova/network/quantum/manager.py | 97 +++++++++++++++++++++++++----- nova/network/quantum/melange_ipam_lib.py | 81 ++++++++++++++++++------- nova/network/quantum/nova_ipam_lib.py | 85 ++++++++++++++++++-------- nova/network/quantum/quantum_connection.py | 28 ++++++++- 4 files changed, 226 insertions(+), 65 deletions(-) diff --git a/nova/network/quantum/manager.py b/nova/network/quantum/manager.py index fb13a8496..a002a3d7b 100644 --- a/nova/network/quantum/manager.py +++ b/nova/network/quantum/manager.py @@ -35,8 +35,27 @@ flags.DEFINE_string('quantum_ipam_lib', class QuantumManager(manager.FlatManager): + """ NetworkManager class that communicates with a Quantum service + via a web services API to provision VM network connectivity. + + For IP Address management, QuantumManager can be configured to + use either Nova's local DB or the Melange IPAM service. + + Currently, the QuantumManager does NOT support any of the 'gateway' + functionality implemented by the Nova VlanManager, including: + * floating IPs + * DHCP + * NAT gateway + + Support for these capabilities are targted for future releases. + """ def __init__(self, ipam_lib=None, *args, **kwargs): + """ Initialize two key libraries, the connection to a + Quantum service, and the library for implementing IPAM. + + Calls inherited FlatManager constructor. + """ if FLAGS.fake_network: self.q_conn = fake.FakeQuantumClientConnection() @@ -53,6 +72,17 @@ class QuantumManager(manager.FlatManager): network_size, cidr_v6, gateway_v6, bridge, bridge_interface, dns1=None, dns2=None, uuid=None, **kwargs): + """ Unlike other NetworkManagers, with QuantumManager, each + create_networks calls should create only a single network. + + Two scenarios exist: + - no 'uuid' is specified, in which case we contact + Quantum and create a new network. + - an existing 'uuid' is specified, corresponding to + a Quantum network created out of band. + + In both cases, we initialize a subnet using the IPAM lib. + """ if num_networks != 1: raise Exception("QuantumManager requires that only one" " network is created per call") @@ -74,27 +104,46 @@ class QuantumManager(manager.FlatManager): priority, cidr, gateway_v6, cidr_v6, dns1, dns2) def delete_network(self, context, fixed_range): + """ Lookup network by IPv4 cidr, delete both the IPAM + subnet and the corresponding Quantum network. + """ project_id = context.project_id quantum_net_id = self.ipam.get_network_id_by_cidr( context, fixed_range, project_id) self.ipam.delete_subnets_by_net_id(context, quantum_net_id, project_id) - try: - q_tenant_id = project_id or FLAGS.quantum_default_tenant_id - self.q_conn.delete_network(q_tenant_id, quantum_net_id) - except Exception, e: - raise Exception("Unable to delete Quantum Network with " - "fixed_range = %s (ports still in use?)." % fixed_range) + q_tenant_id = project_id or FLAGS.quantum_default_tenant_id + self.q_conn.delete_network(q_tenant_id, quantum_net_id) def allocate_for_instance(self, context, **kwargs): + """ Called by compute when it is creating a new VM. + + There are three key tasks: + - Determine the number and order of vNICs to create + - Allocate IP addresses + - Create ports on a Quantum network and attach vNICs. + + We support two approaches to determining vNICs: + - By default, a VM gets a vNIC for any network belonging + to the VM's project, and a vNIC for any "global" network + that has a NULL project_id. vNIC order is determined + by the network's 'priority' field. + - If the 'os-create-server-ext' was used to create the VM, + only the networks in 'requested_networks' are used to + create vNICs, and the vNIC order is determiend by the + order in the requested_networks array. + + For each vNIC, use the FlatManager to create the entries + in the virtual_interfaces table, contact Quantum to + create a port and attachment the vNIC, and use the IPAM + lib to allocate IP addresses. + """ instance_id = kwargs.pop('instance_id') instance_type_id = kwargs['instance_type_id'] host = kwargs.pop('host') project_id = kwargs.pop('project_id') LOG.debug(_("network allocations for instance %s"), instance_id) - # if using the create-server-networks extension, 'requested_networks' - # will be defined, otherwise, just grab relevant nets from IPAM requested_networks = kwargs.get('requested_networks') if requested_networks: @@ -116,10 +165,12 @@ class QuantumManager(manager.FlatManager): # updating the virtual_interfaces table to use UUIDs would # be one solution, but this would require significant work # elsewhere. - network_ref = db.network_get_by_uuid(context, quantum_net_id) + admin_context = context.elevated() + network_ref = db.network_get_by_uuid(admin_context, + quantum_net_id) vif_rec = manager.FlatManager.add_virtual_interface(self, - context, instance_id, network_ref['id']) + context, instance_id, network_ref['id']) # talk to Quantum API to create and attach port. q_tenant_id = project_id or FLAGS.quantum_default_tenant_id @@ -133,6 +184,18 @@ class QuantumManager(manager.FlatManager): def get_instance_nw_info(self, context, instance_id, instance_type_id, host): + """ This method is used by compute to fetch all network data + that should be used when creating the VM. + + The method simply loops through all virtual interfaces + stored in the nova DB and queries the IPAM lib to get + the associated IP data. + + The format of returned data is 'defined' by the initial + set of NetworkManagers found in nova/network/manager.py . + Ideally this 'interface' will be more formally defined + in the future. + """ network_info = [] instance = db.instance_get(context, instance_id) project_id = instance.project_id @@ -202,6 +265,11 @@ class QuantumManager(manager.FlatManager): return network_info def deallocate_for_instance(self, context, **kwargs): + """ Called when a VM is terminated. Loop through each virtual + interface in the Nova DB and remove the Quantum port and + clear the IP allocation using the IPAM. Finally, remove + the virtual interfaces from the Nova DB. + """ instance_id = kwargs.get('instance_id') project_id = kwargs.pop('project_id', None) @@ -234,11 +302,12 @@ class QuantumManager(manager.FlatManager): self._do_trigger_security_group_members_refresh_for_instance( instance_id) - # validates that this tenant has quantum networks with the associated - # UUIDs. This is called by the 'os-create-server-ext' API extension - # code so that we can return an API error code to the caller if they - # request an invalid network. def validate_networks(self, context, networks): + """ Validates that this tenant has quantum networks with the associated + UUIDs. This is called by the 'os-create-server-ext' API extension + code so that we can return an API error code to the caller if they + request an invalid network. + """ if networks is None: return diff --git a/nova/network/quantum/melange_ipam_lib.py b/nova/network/quantum/melange_ipam_lib.py index e2e09f139..7b7baf281 100644 --- a/nova/network/quantum/melange_ipam_lib.py +++ b/nova/network/quantum/melange_ipam_lib.py @@ -32,43 +32,54 @@ def get_ipam_lib(net_man): class QuantumMelangeIPAMLib: + """ Implements Quantum IP Address Management (IPAM) interface + using the Melange service, which is access using the Melange + web services API. + """ def __init__(self): + """ Initialize class used to connect to Melange server""" self.m_conn = melange_connection.MelangeConnection() def create_subnet(self, context, label, project_id, quantum_net_id, priority, cidr=None, gateway_v6=None, cidr_v6=None, dns1=None, dns2=None): - tenant_id = project_id or FLAGS.quantum_default_tenant_id - if cidr: - self.m_conn.create_block(quantum_net_id, cidr, + """ Contact Melange and create a subnet for any non-NULL + IPv4 or IPv6 subnets. + + Also create a entry in the Nova networks DB, but only + to store values not represented in Melange or to + temporarily provide compatibility with Nova code that + accesses IPAM data directly via the DB (e.g., nova-api) + """ + tenant_id = project_id or FLAGS.quantum_default_tenant_id + if cidr: + self.m_conn.create_block(quantum_net_id, cidr, project_id=tenant_id, dns1=dns1, dns2=dns2) - if cidr_v6: - self.m_conn.create_block(quantum_net_id, cidr_v6, + if cidr_v6: + self.m_conn.create_block(quantum_net_id, cidr_v6, project_id=tenant_id, dns1=dns1, dns2=dns2) - # create a entry in the network table, even though - # most data is stored in melange. This is used to - # store data not kept by melange (e.g., priority) - # and to 'fake' other parts of nova (e.g., the API) - # until we get get all accesses to be via the - # network manager API. - net = {"uuid": quantum_net_id, + net = {"uuid": quantum_net_id, "project_id": project_id, "priority": priority, "label": label} - network = self.db.network_create_safe(context, net) + network = self.db.network_create_safe(context, net) def allocate_fixed_ip(self, context, project_id, quantum_net_id, vif_ref): + """ Pass call to allocate fixed IP on to Melange""" tenant_id = project_id or FLAGS.quantum_default_tenant_id self.m_conn.allocate_ip(quantum_net_id, vif_ref['uuid'], project_id=tenant_id, mac_address=vif_ref['address']) def get_network_id_by_cidr(self, context, cidr, project_id): + """ Find the Quantum UUID associated with a IPv4 CIDR + address for the specified tenant. + """ tenant_id = project_id or FLAGS.quantum_default_tenant_id all_blocks = self.m_conn.get_blocks(tenant_id) for b in all_blocks['ip_blocks']: @@ -77,6 +88,9 @@ class QuantumMelangeIPAMLib: raise Exception("No network found for cidr %s" % cidr) def delete_subnets_by_net_id(self, context, net_id, project_id): + """ Find Melange block associated with the Quantum UUID, + then tell Melange to delete that block. + """ admin_context = context.elevated() tenant_id = project_id or FLAGS.quantum_default_tenant_id all_blocks = self.m_conn.get_blocks(tenant_id) @@ -88,9 +102,11 @@ class QuantumMelangeIPAMLib: if network is not None: db.network_delete_safe(context, network['id']) - # get all networks with this project_id, as well as all networks - # where the project-id is not set (these are shared networks) def get_project_and_global_net_ids(self, context, project_id): + """ Fetches all networks associated with this project, or + that are "global" (i.e., have no project set). + Returns list sorted by 'priority'. + """ admin_context = context.elevated() id_proj_map = {} if project_id is None: @@ -115,12 +131,16 @@ class QuantumMelangeIPAMLib: return sorted(id_priority_map.items(), key=lambda x: id_priority_map[x[0]]) - # FIXME: (danwent) Melange actually returns the subnet info - # when we query for a particular interface. we may want to - # reworks the ipam_manager python API to let us take advantage of - # this, as right now we have to get all blocks and cycle through - # them. def get_subnets_by_net_id(self, context, project_id, net_id): + """ Returns information about the IPv4 and IPv6 subnets + associated with a Quantum Network UUID. + """ + + # FIXME: (danwent) Melange actually returns the subnet info + # when we query for a particular interface. we may want to + # reworks the ipam_manager python API to let us take advantage of + # this, as right now we have to get all blocks and cycle through + # them. subnet_v4 = None subnet_v6 = None tenant_id = project_id or FLAGS.quantum_default_tenant_id @@ -142,26 +162,41 @@ class QuantumMelangeIPAMLib: return (subnet_v4, subnet_v6) def get_v4_ips_by_interface(self, context, net_id, vif_id, project_id): - return self.get_ips_by_interface(context, net_id, vif_id, + """ Returns a list of IPv4 address strings associated with + the specified virtual interface. + """ + return self._get_ips_by_interface(context, net_id, vif_id, project_id, 4) def get_v6_ips_by_interface(self, context, net_id, vif_id, project_id): - return self.get_ips_by_interface(context, net_id, vif_id, + """ Returns a list of IPv6 address strings associated with + the specified virtual interface. + """ + return self._get_ips_by_interface(context, net_id, vif_id, project_id, 6) - def get_ips_by_interface(self, context, net_id, vif_id, project_id, + def _get_ips_by_interface(self, context, net_id, vif_id, project_id, ip_version): + """ Helper method to fetch v4 or v6 addresses for a particular + virtual interface. + """ tenant_id = project_id or FLAGS.quantum_default_tenant_id ip_list = self.m_conn.get_allocated_ips(net_id, vif_id, tenant_id) return [ip['address'] for ip in ip_list \ if IPNetwork(ip['address']).version == ip_version] def verify_subnet_exists(self, context, project_id, quantum_net_id): + """ Confirms that a subnet exists that is associated with the + specified Quantum Network UUID. + """ tenant_id = project_id or FLAGS.quantum_default_tenant_id v4_subnet, v6_subnet = self.get_subnets_by_net_id(context, tenant_id, quantum_net_id) return v4_subnet is not None def deallocate_ips_by_vif(self, context, project_id, net_id, vif_ref): + """ Deallocate all fixed IPs associated with the specified + virtual interface. + """ tenant_id = project_id or FLAGS.quantum_default_tenant_id self.m_conn.deallocate_ips(net_id, vif_ref['uuid'], tenant_id) diff --git a/nova/network/quantum/nova_ipam_lib.py b/nova/network/quantum/nova_ipam_lib.py index 6e7e5d244..ce7d3efcb 100644 --- a/nova/network/quantum/nova_ipam_lib.py +++ b/nova/network/quantum/nova_ipam_lib.py @@ -37,52 +37,69 @@ def get_ipam_lib(net_man): class QuantumNovaIPAMLib: + """ Implements Quantum IP Address Management (IPAM) interface + using the local Nova database. This implementation is inline + with how IPAM is used by other NetworkManagers. + """ def __init__(self, net_manager): + """ Holds a reference to the "parent" network manager, used + to take advantage of various FlatManager methods to avoid + code duplication. + """ self.net_manager = net_manager def create_subnet(self, context, label, tenant_id, quantum_net_id, priority, cidr=None, gateway_v6=None, cidr_v6=None, dns1=None, dns2=None): - admin_context = context.elevated() - subnet_size = int(math.pow(2, (32 - int(cidr.split("/")[1])))) - networks = manager.FlatManager.create_networks(self.net_manager, + """ Re-use the basic FlatManager create_networks method to + initialize the networks and fixed_ips tables in Nova DB. + + Also stores a few more fields in the networks table that + are needed by Quantum but not the FlatManager. + """ + admin_context = context.elevated() + subnet_size = int(math.pow(2, (32 - int(cidr.split("/")[1])))) + networks = manager.FlatManager.create_networks(self.net_manager, admin_context, label, cidr, False, 1, subnet_size, cidr_v6, gateway_v6, quantum_net_id, None, dns1, dns2) - if len(networks) != 1: - raise Exception("Error creating network entry") + if len(networks) != 1: + raise Exception("Error creating network entry") - # now grab the network and update additional fields - network = networks[0] - net = {"project_id": tenant_id, + network = networks[0] + net = {"project_id": tenant_id, "priority": priority, "uuid": quantum_net_id} - db.network_update(admin_context, network['id'], net) + db.network_update(admin_context, network['id'], net) def get_network_id_by_cidr(self, context, cidr, project_id): - admin_context = context.elevated() - network = db.network_get_by_cidr(admin_context, cidr) - if not network: - raise Exception("No network with fixed_range = %s" \ - % fixed_range) - return network['uuid'] + """ Grabs Quantum network UUID based on IPv4 CIDR. """ + admin_context = context.elevated() + network = db.network_get_by_cidr(admin_context, cidr) + if not network: + raise Exception("No network with fixed_range = %s" % fixed_range) + return network['uuid'] def delete_subnets_by_net_id(self, context, net_id, project_id): - admin_context = context.elevated() - network = db.network_get_by_uuid(admin_context, net_id) - if not network: - raise Exception("No network with net_id = %s" % net_id) - manager.FlatManager.delete_network(self.net_manager, + """ Deletes a network based on Quantum UUID. Uses FlatManager + delete_network to avoid duplication. + """ + admin_context = context.elevated() + network = db.network_get_by_uuid(admin_context, net_id) + if not network: + raise Exception("No network with net_id = %s" % net_id) + manager.FlatManager.delete_network(self.net_manager, admin_context, network['cidr'], require_disassociated=False) def get_project_and_global_net_ids(self, context, project_id): - - # get all networks with this project_id, as well as all networks - # where the project-id is not set (these are shared networks) + """ Fetches all networks associated with this project, or + that are "global" (i.e., have no project set). + Returns list sorted by 'priority'. + """ admin_context = context.elevated() networks = db.project_get_networks(admin_context, project_id, False) networks.extend(db.project_get_networks(admin_context, None, False)) @@ -95,6 +112,8 @@ class QuantumNovaIPAMLib: return sorted(net_list, key=lambda x: id_priority_map[x[0]]) def allocate_fixed_ip(self, context, tenant_id, quantum_net_id, vif_rec): + """ Allocates a single fixed IPv4 address for a virtual interface. + """ admin_context = context.elevated() network = db.network_get_by_uuid(admin_context, quantum_net_id) if network['cidr']: @@ -106,6 +125,9 @@ class QuantumNovaIPAMLib: db.fixed_ip_update(admin_context, address, values) def get_subnets_by_net_id(self, context, tenant_id, net_id): + """ Returns information about the IPv4 and IPv6 subnets + associated with a Quantum Network UUID. + """ n = db.network_get_by_uuid(context.elevated(), net_id) subnet_data_v4 = { 'network_id': n['uuid'], @@ -126,12 +148,18 @@ class QuantumNovaIPAMLib: return (subnet_data_v4, subnet_data_v6) def get_v4_ips_by_interface(self, context, net_id, vif_id, project_id): + """ Returns a list of IPv4 address strings associated with + the specified virtual interface, based on the fixed_ips table. + """ vif_rec = db.virtual_interface_get_by_uuid(context, vif_id) fixed_ips = db.fixed_ip_get_by_virtual_interface(context, vif_rec['id']) return [f['address'] for f in fixed_ips] def get_v6_ips_by_interface(self, context, net_id, vif_id, project_id): + """ Returns a list containing a single IPv6 address strings + associated with the specified virtual interface. + """ admin_context = context.elevated() network = db.network_get_by_uuid(admin_context, net_id) vif_rec = db.virtual_interface_get_by_uuid(context, vif_id) @@ -143,10 +171,17 @@ class QuantumNovaIPAMLib: return [] def verify_subnet_exists(self, context, tenant_id, quantum_net_id): + """ Confirms that a subnet exists that is associated with the + specified Quantum Network UUID. Raises an exception if no + such subnet exists. + """ admin_context = context.elevated() - network = db.network_get_by_uuid(admin_context, quantum_net_id) + db.network_get_by_uuid(admin_context, quantum_net_id) def deallocate_ips_by_vif(self, context, tenant_id, net_id, vif_ref): + """ Deallocate all fixed IPs associated with the specified + virtual interface. + """ try: admin_context = context.elevated() fixed_ips = db.fixed_ip_get_by_virtual_interface(admin_context, @@ -156,5 +191,5 @@ class QuantumNovaIPAMLib: {'allocated': False, 'virtual_interface_id': None}) except exception.FixedIpNotFoundForInstance: - LOG.error(_('Failed to deallocate fixed IP for vif %s' % \ + LOG.error(_('No fixed IPs to deallocate for vif %s' % \ vif_ref['id'])) diff --git a/nova/network/quantum/quantum_connection.py b/nova/network/quantum/quantum_connection.py index a13867af2..bd3592c2c 100644 --- a/nova/network/quantum/quantum_connection.py +++ b/nova/network/quantum/quantum_connection.py @@ -38,31 +38,49 @@ flags.DEFINE_string('quantum_default_tenant_id', class QuantumClientConnection: + """ Abstracts connection to Quantum service into higher level + operations performed by the QuantumManager. + + Separating this out as a class also let's us create a 'fake' + version of this class for unit tests. + """ def __init__(self): + """ Initialize Quantum client class based on flags. """ self.client = quantum_client.Client(FLAGS.quantum_connection_host, FLAGS.quantum_connection_port, format="json", logger=LOG) def create_network(self, tenant_id, network_name): + """ Create network using specified name, return Quantum + network UUID. + """ data = {'network': {'name': network_name}} resdict = self.client.create_network(data, tenant=tenant_id) return resdict["network"]["id"] def delete_network(self, tenant_id, net_id): + """ Deletes Quantum network with specified UUID. """ self.client.delete_network(net_id, tenant=tenant_id) def network_exists(self, tenant_id, net_id): + """ Determine if a Quantum network exists for the + specified tenant. + """ try: self.client.show_network_details(net_id, tenant=tenant_id) except: - # FIXME: client lib should expose more granular exceptions + # FIXME: (danwent) client lib should expose granular exceptions # so we can confirm we're getting a 404 and not some other error return False return True def create_and_attach_port(self, tenant_id, net_id, interface_id): + """ Creates a Quantum port on the specified network, sets + status to ACTIVE to enable traffic, and attaches the + vNIC with the specified interface-id. + """ LOG.debug("Connecting interface %s to net %s for %s" % \ (interface_id, net_id, tenant_id)) port_data = {'port': {'state': 'ACTIVE'}} @@ -74,15 +92,19 @@ class QuantumClientConnection: tenant=tenant_id) def detach_and_delete_port(self, tenant_id, net_id, port_id): + """ Detach and delete the specified Quantum port. """ LOG.debug("Deleting port %s on net %s for %s" % \ (port_id, net_id, tenant_id)) self.client.detach_resource(net_id, port_id, tenant=tenant_id) self.client.delete_port(net_id, port_id, tenant=tenant_id) - # FIXME: (danwent) this will be inefficient until API implements querying def get_port_by_attachment(self, tenant_id, attachment_id): - + """ Given a tenant, search for the Quantum network and port + UUID that has the specified interface-id attachment. + """ + # FIXME: (danwent) this will be inefficient until the Quantum + # API implements querying a port by the interface-id net_list_resdict = self.client.list_networks(tenant=tenant_id) for n in net_list_resdict["networks"]: net_id = n['id'] -- cgit From 605fe4f19af3af830a2a8c82809e9ce5909c602d Mon Sep 17 00:00:00 2001 From: "danwent@gmail.com" <> Date: Sun, 28 Aug 2011 20:00:38 -0700 Subject: restore fixed_ip_associate_pool in nova/db/sqlalchemy.py to its original form before this branch. Figured out how to make unit tests pass without requiring that this function changes --- nova/db/sqlalchemy/api.py | 4 +++- nova/tests/test_quantum.py | 11 +++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 80ce76e8f..00af62682 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -688,8 +688,10 @@ def fixed_ip_associate(context, address, instance_id, network_id=None): def fixed_ip_associate_pool(context, network_id, instance_id=None, host=None): session = get_session() with session.begin(): + network_or_none = or_(models.FixedIp.network_id == network_id, + models.FixedIp.network_id == None) fixed_ip_ref = session.query(models.FixedIp).\ - filter_by(network_id=network_id).\ + filter(network_or_none).\ filter_by(reserved=False).\ filter_by(deleted=False).\ filter_by(instance=None).\ diff --git a/nova/tests/test_quantum.py b/nova/tests/test_quantum.py index e7a8a01fa..3efdba910 100644 --- a/nova/tests/test_quantum.py +++ b/nova/tests/test_quantum.py @@ -17,6 +17,8 @@ from nova import context from nova import db +from nova.db.sqlalchemy import models +from nova.db.sqlalchemy.session import get_session from nova import exception from nova import log as logging from nova.network.quantum import manager as quantum_manager @@ -238,6 +240,15 @@ class QuantumNovaIPAMTestCase(QuantumTestCaseBase, test.TestCase): for n in db.network_get_all(ctx): db.network_delete_safe(ctx, n['id']) + # I've found that other unit tests have a nasty habit of + # of creating fixed IPs and not cleaning up, which can + # confuse these tests, so we clean them all. + session = get_session() + result = session.query(models.FixedIp).all() + with session.begin(): + for fip_ref in result: + session.delete(fip_ref) + # Cannot run this unit tests auotmatically for now, as it requires # melange to be running locally. # -- cgit From 81d30e900d3329f40bfd05682b73e73951c435ca Mon Sep 17 00:00:00 2001 From: "danwent@gmail.com" <> Date: Mon, 29 Aug 2011 08:31:56 -0700 Subject: update file name for db migrate script after merge --- .../versions/041_add_network_priority.py | 44 ---------------------- .../versions/043_add_network_priority.py | 44 ++++++++++++++++++++++ 2 files changed, 44 insertions(+), 44 deletions(-) delete mode 100644 nova/db/sqlalchemy/migrate_repo/versions/041_add_network_priority.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/043_add_network_priority.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/041_add_network_priority.py b/nova/db/sqlalchemy/migrate_repo/versions/041_add_network_priority.py deleted file mode 100644 index e69380199..000000000 --- a/nova/db/sqlalchemy/migrate_repo/versions/041_add_network_priority.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2011 Nicira, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import * -from migrate import * - -from nova import log as logging -from nova import utils - - -meta = MetaData() - -# Add priority column to networks table -priority = Column('priority', Integer()) - - -def upgrade(migrate_engine): - meta.bind = migrate_engine - - # grab tables and (column for dropping later) - networks = Table('networks', meta, autoload=True) - - try: - networks.create_column(priority) - except Exception: - logging.error(_("priority column not added to networks table")) - raise - - -def downgrade(migrate_engine): - meta.bind = migrate_engine - networks.drop_column(priority) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/043_add_network_priority.py b/nova/db/sqlalchemy/migrate_repo/versions/043_add_network_priority.py new file mode 100644 index 000000000..e69380199 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/043_add_network_priority.py @@ -0,0 +1,44 @@ +# Copyright 2011 Nicira, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging +from nova import utils + + +meta = MetaData() + +# Add priority column to networks table +priority = Column('priority', Integer()) + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + + # grab tables and (column for dropping later) + networks = Table('networks', meta, autoload=True) + + try: + networks.create_column(priority) + except Exception: + logging.error(_("priority column not added to networks table")) + raise + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + networks.drop_column(priority) -- cgit From ed1399b5a4a023b606263b5464dfe684e460a126 Mon Sep 17 00:00:00 2001 From: "danwent@gmail.com" <> Date: Mon, 29 Aug 2011 08:33:12 -0700 Subject: update file name for db migrate script after merge (again) --- .../versions/043_add_network_priority.py | 44 ---------------------- .../versions/044_add_network_priority.py | 44 ++++++++++++++++++++++ 2 files changed, 44 insertions(+), 44 deletions(-) delete mode 100644 nova/db/sqlalchemy/migrate_repo/versions/043_add_network_priority.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/044_add_network_priority.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/043_add_network_priority.py b/nova/db/sqlalchemy/migrate_repo/versions/043_add_network_priority.py deleted file mode 100644 index e69380199..000000000 --- a/nova/db/sqlalchemy/migrate_repo/versions/043_add_network_priority.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2011 Nicira, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import * -from migrate import * - -from nova import log as logging -from nova import utils - - -meta = MetaData() - -# Add priority column to networks table -priority = Column('priority', Integer()) - - -def upgrade(migrate_engine): - meta.bind = migrate_engine - - # grab tables and (column for dropping later) - networks = Table('networks', meta, autoload=True) - - try: - networks.create_column(priority) - except Exception: - logging.error(_("priority column not added to networks table")) - raise - - -def downgrade(migrate_engine): - meta.bind = migrate_engine - networks.drop_column(priority) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/044_add_network_priority.py b/nova/db/sqlalchemy/migrate_repo/versions/044_add_network_priority.py new file mode 100644 index 000000000..e69380199 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/044_add_network_priority.py @@ -0,0 +1,44 @@ +# Copyright 2011 Nicira, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging +from nova import utils + + +meta = MetaData() + +# Add priority column to networks table +priority = Column('priority', Integer()) + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + + # grab tables and (column for dropping later) + networks = Table('networks', meta, autoload=True) + + try: + networks.create_column(priority) + except Exception: + logging.error(_("priority column not added to networks table")) + raise + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + networks.drop_column(priority) -- cgit From dc129eeb96a0a201fb3d032078eaf8ab192ca207 Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Mon, 29 Aug 2011 09:03:56 -0700 Subject: add alias to mailmap --- .mailmap | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.mailmap b/.mailmap index 5c8df80e0..3c7a31e03 100644 --- a/.mailmap +++ b/.mailmap @@ -15,6 +15,7 @@ + @@ -36,7 +37,7 @@ - + -- cgit From 6d8663a887a1241b1c3136626e7b915be860273b Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Mon, 29 Aug 2011 09:04:43 -0700 Subject: remove 'uuid' param for nova-manage network delete that I had add previously --- bin/nova-manage | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index a95890e36..051079ef3 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -818,15 +818,12 @@ class NetworkCommands(object): @args('--network', dest="fixed_range", metavar='', help='Network to delete') - @args('--uuid', dest="uuid", metavar='', - help='UUID of network to delete') def delete(self, fixed_range): """Deletes a network""" # delete the network net_manager = utils.import_object(FLAGS.network_manager) - net_manager.delete_network(context.get_admin_context(), fixed_range, - uuid=None) + net_manager.delete_network(context.get_admin_context(), fixed_range) @args('--network', dest="fixed_range", metavar='', -- cgit From 7e8e39160d1329f4923334fa822310d266651907 Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Mon, 29 Aug 2011 10:43:26 -0700 Subject: access db directly in networkmanagers's delete_network method, so stubbed test call works correctly --- nova/network/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index d1883ff8d..b625e7823 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -791,7 +791,7 @@ class NetworkManager(manager.SchedulerDependentManager): if require_disassociated and network.project_id is not None: raise ValueError(_('Network must be disassociated from project %s' ' before delete' % network.project_id)) - self.db.network_delete_safe(context, network.id) + db.network_delete_safe(context, network.id) @property def _bottom_reserved_ips(self): # pylint: disable=R0201 -- cgit From bb4f7129e945602a8e830520e7877a33b2539530 Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Mon, 29 Aug 2011 11:04:27 -0700 Subject: remove brackets from mailmap entry --- .mailmap | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.mailmap b/.mailmap index 3c7a31e03..a46dd6673 100644 --- a/.mailmap +++ b/.mailmap @@ -15,7 +15,7 @@ - + danwent@gmail.com -- cgit From edc43980ff56777e8200022f888c7170ca9baaea Mon Sep 17 00:00:00 2001 From: Chuck Short Date: Tue, 30 Aug 2011 11:26:59 -0400 Subject: Use getCapabilities rather than getInfo() since some versions of libvirt dont provide dmi information --- nova/virt/libvirt/connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 363a20ed0..6ae458537 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -196,7 +196,7 @@ class LibvirtConnection(driver.ComputeDriver): def _test_connection(self): try: - self._wrapped_conn.getInfo() + self._wrapped_conn.geCapabilities() return True except libvirt.libvirtError as e: if e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and \ -- cgit From b515d427e05010ba5a984dd549cb6418629de50d Mon Sep 17 00:00:00 2001 From: Thuleau Édouard Date: Tue, 30 Aug 2011 18:18:23 +0200 Subject: With OS API, if the property 'ramdisk_id' isn't set on the AMI image, Nova can not instantiate it. With EC2 API, the AMI image can be instantiate. --- nova/api/openstack/create_instance_helper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index 483ff4985..269e46dba 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -283,7 +283,7 @@ class CreateInstanceHelper(object): try: ramdisk_id = image_meta['properties']['ramdisk_id'] except KeyError: - raise exception.RamdiskNotFoundForImage(image_id=image_id) + ramdisk_id = None return kernel_id, ramdisk_id -- cgit From e8db401156cdb842ef7bf15e44e34ac5ae672e46 Mon Sep 17 00:00:00 2001 From: Thuleau Édouard Date: Tue, 30 Aug 2011 18:27:48 +0200 Subject: The exception 'RamdiskNotFoundForImage' is no longer used. --- nova/exception.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/nova/exception.py b/nova/exception.py index 32981f4d5..aa3407480 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -402,10 +402,6 @@ class KernelNotFoundForImage(ImageNotFound): message = _("Kernel not found for image %(image_id)s.") -class RamdiskNotFoundForImage(ImageNotFound): - message = _("Ramdisk not found for image %(image_id)s.") - - class UserNotFound(NotFound): message = _("User %(user_id)s could not be found.") -- cgit From 9e3fd76a2f7d55ef111631e6ffac5575a6dd4817 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Wed, 31 Aug 2011 02:35:01 -0400 Subject: Use feedparser to parse the generated atom feeds in the tests for the versions resource. --- nova/tests/api/openstack/test_versions.py | 374 +++++++++++++----------------- 1 file changed, 167 insertions(+), 207 deletions(-) diff --git a/nova/tests/api/openstack/test_versions.py b/nova/tests/api/openstack/test_versions.py index 3b4396b1a..57a81738c 100644 --- a/nova/tests/api/openstack/test_versions.py +++ b/nova/tests/api/openstack/test_versions.py @@ -15,6 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. +import feedparser import json import stubout import webob @@ -297,36 +298,43 @@ class VersionsTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) self.assertEqual("application/atom+xml", res.content_type) - expected = """ - - About This Version - 2011-01-21T11:33:21Z - http://localhost/v1.0/ - - Rackspace - http://www.rackspace.com/ - - - - http://localhost/v1.0/ - Version v1.0 - 2011-01-21T11:33:21Z - - - - - Version v1.0 DEPRECATED (2011-01-21T11:33:21Z) - - - """.replace(" ", "").replace("\n", "") - - actual = res.body.replace(" ", "").replace("\n", "") - self.assertEqual(expected, actual) + + f = feedparser.parse(res.body) + self.assertEqual(f.feed.title, 'About This Version') + self.assertEqual(f.feed.updated, '2011-01-21T11:33:21Z') + self.assertEqual(f.feed.id, 'http://localhost/v1.0/') + self.assertEqual(f.feed.author, 'Rackspace') + self.assertEqual(f.feed.author_detail.href, 'http://www.rackspace.com/') + self.assertEqual(f.feed.links, [{'href': 'http://localhost/v1.0/', + 'type': 'application/atom+xml', + 'rel': 'self'}]) + + self.assertEqual(len(f.entries), 1) + entry = f.entries[0] + self.assertEqual(entry.id, 'http://localhost/v1.0/') + self.assertEqual(entry.title, 'Version v1.0') + self.assertEqual(entry.updated, '2011-01-21T11:33:21Z') + self.assertEqual(len(entry.content), 1) + self.assertEqual(entry.content[0].value, + 'Version v1.0 DEPRECATED (2011-01-21T11:33:21Z)') + self.assertEqual(entry.links, [ + { + 'href': 'http://localhost/v1.0/', + 'type': 'application/atom+xml', + 'rel': 'self' + }, + { + 'href': 'http://docs.rackspacecloud.com/servers/api/v1.0/'\ + 'cs-devguide-20110125.pdf', + 'type': 'application/pdf', + 'rel': 'describedby' + }, + { + 'href': 'http://docs.rackspacecloud.com/servers/api/v1.0/'\ + 'application.wadl', + 'type': 'application/vnd.sun.wadl+xml', + 'rel': 'describedby' + }]) def test_get_version_1_1_detail_atom(self): req = webob.Request.blank('/v1.1/') @@ -334,36 +342,43 @@ class VersionsTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) self.assertEqual("application/atom+xml", res.content_type) - expected = """ - - About This Version - 2011-01-21T11:33:21Z - http://localhost/v1.1/ - - Rackspace - http://www.rackspace.com/ - - - - http://localhost/v1.1/ - Version v1.1 - 2011-01-21T11:33:21Z - - - - - Version v1.1 CURRENT (2011-01-21T11:33:21Z) - - - """.replace(" ", "").replace("\n", "") - - actual = res.body.replace(" ", "").replace("\n", "") - self.assertEqual(expected, actual) + + f = feedparser.parse(res.body) + self.assertEqual(f.feed.title, 'About This Version') + self.assertEqual(f.feed.updated, '2011-01-21T11:33:21Z') + self.assertEqual(f.feed.id, 'http://localhost/v1.1/') + self.assertEqual(f.feed.author, 'Rackspace') + self.assertEqual(f.feed.author_detail.href, 'http://www.rackspace.com/') + self.assertEqual(f.feed.links, [{'href': 'http://localhost/v1.1/', + 'type': 'application/atom+xml', + 'rel': 'self'}]) + + self.assertEqual(len(f.entries), 1) + entry = f.entries[0] + self.assertEqual(entry.id, 'http://localhost/v1.1/') + self.assertEqual(entry.title, 'Version v1.1') + self.assertEqual(entry.updated, '2011-01-21T11:33:21Z') + self.assertEqual(len(entry.content), 1) + self.assertEqual(entry.content[0].value, + 'Version v1.1 CURRENT (2011-01-21T11:33:21Z)') + self.assertEqual(entry.links, [ + { + 'href': 'http://localhost/v1.1/', + 'type': 'application/atom+xml', + 'rel': 'self' + }, + { + 'href': 'http://docs.rackspacecloud.com/servers/api/v1.1/'\ + 'cs-devguide-20110125.pdf', + 'type': 'application/pdf', + 'rel': 'describedby' + }, + { + 'href': 'http://docs.rackspacecloud.com/servers/api/v1.1/'\ + 'application.wadl', + 'type': 'application/vnd.sun.wadl+xml', + 'rel': 'describedby' + }]) def test_get_version_list_atom(self): req = webob.Request.blank('/') @@ -372,40 +387,39 @@ class VersionsTest(test.TestCase): self.assertEqual(res.status_int, 200) self.assertEqual(res.content_type, "application/atom+xml") - expected = """ - - Available API Versions - 2011-01-21T11:33:21Z - http://localhost/ - - Rackspace - http://www.rackspace.com/ - - - - http://localhost/v1.0/ - Version v1.0 - 2011-01-21T11:33:21Z - - - Version v1.0 DEPRECATED (2011-01-21T11:33:21Z) - - - - http://localhost/v1.1/ - Version v1.1 - 2011-01-21T11:33:21Z - - - Version v1.1 CURRENT (2011-01-21T11:33:21Z) - - - - """.replace(" ", "").replace("\n", "") - - actual = res.body.replace(" ", "").replace("\n", "") - - self.assertEqual(expected, actual) + f = feedparser.parse(res.body) + self.assertEqual(f.feed.title, 'Available API Versions') + self.assertEqual(f.feed.updated, '2011-01-21T11:33:21Z') + self.assertEqual(f.feed.id, 'http://localhost/') + self.assertEqual(f.feed.author, 'Rackspace') + self.assertEqual(f.feed.author_detail.href, 'http://www.rackspace.com/') + self.assertEqual(f.feed.links, [{'href': 'http://localhost/', + 'type': 'application/atom+xml', + 'rel': 'self'}]) + + self.assertEqual(len(f.entries), 2) + entry = f.entries[0] + self.assertEqual(entry.id, 'http://localhost/v1.0/') + self.assertEqual(entry.title, 'Version v1.0') + self.assertEqual(entry.updated, '2011-01-21T11:33:21Z') + self.assertEqual(len(entry.content), 1) + self.assertEqual(entry.content[0].value, + 'Version v1.0 DEPRECATED (2011-01-21T11:33:21Z)') + self.assertEqual(entry.links, [{ + 'href': 'http://localhost/v1.0/', + 'type': 'application/atom+xml', + 'rel': 'self'}]) + entry = f.entries[1] + self.assertEqual(entry.id, 'http://localhost/v1.1/') + self.assertEqual(entry.title, 'Version v1.1') + self.assertEqual(entry.updated, '2011-01-21T11:33:21Z') + self.assertEqual(len(entry.content), 1) + self.assertEqual(entry.content[0].value, + 'Version v1.1 CURRENT (2011-01-21T11:33:21Z)') + self.assertEqual(entry.links, [{ + 'href': 'http://localhost/v1.1/', + 'type': 'application/atom+xml', + 'rel': 'self'}]) def test_multi_choice_image(self): req = webob.Request.blank('/images/1') @@ -779,59 +793,28 @@ class VersionsSerializerTests(test.TestCase): serializer = versions.VersionsAtomSerializer() response = serializer.index(versions_data) - - root = etree.XML(response) - self.assertEqual(root.tag.split('}')[1], "feed") - self.assertEqual(root.tag.split('}')[0].strip('{'), - "http://www.w3.org/2005/Atom") - - children = list(root) - title = children[0] - updated = children[1] - id = children[2] - author = children[3] - link = children[4] - entry = children[5] - - self.assertEqual(title.tag.split('}')[1], 'title') - self.assertEqual(title.text, 'Available API Versions') - self.assertEqual(updated.tag.split('}')[1], 'updated') - self.assertEqual(updated.text, '2011-07-20T11:40:00Z') - self.assertEqual(id.tag.split('}')[1], 'id') - self.assertEqual(id.text, 'http://test/') - - self.assertEqual(author.tag.split('}')[1], 'author') - author_name = list(author)[0] - author_uri = list(author)[1] - self.assertEqual(author_name.tag.split('}')[1], 'name') - self.assertEqual(author_name.text, 'Rackspace') - self.assertEqual(author_uri.tag.split('}')[1], 'uri') - self.assertEqual(author_uri.text, 'http://www.rackspace.com/') - - self.assertEqual(link.get('href'), 'http://test/') - self.assertEqual(link.get('rel'), 'self') - - self.assertEqual(entry.tag.split('}')[1], 'entry') - entry_children = list(entry) - entry_id = entry_children[0] - entry_title = entry_children[1] - entry_updated = entry_children[2] - entry_link = entry_children[3] - entry_content = entry_children[4] - self.assertEqual(entry_id.tag.split('}')[1], "id") - self.assertEqual(entry_id.text, "http://test/2.9.8") - self.assertEqual(entry_title.tag.split('}')[1], "title") - self.assertEqual(entry_title.get('type'), "text") - self.assertEqual(entry_title.text, "Version 2.9.8") - self.assertEqual(entry_updated.tag.split('}')[1], "updated") - self.assertEqual(entry_updated.text, "2011-07-20T11:40:00Z") - self.assertEqual(entry_link.tag.split('}')[1], "link") - self.assertEqual(entry_link.get('href'), "http://test/2.9.8") - self.assertEqual(entry_link.get('rel'), "self") - self.assertEqual(entry_content.tag.split('}')[1], "content") - self.assertEqual(entry_content.get('type'), "text") - self.assertEqual(entry_content.text, - "Version 2.9.8 CURRENT (2011-07-20T11:40:00Z)") + f = feedparser.parse(response) + + self.assertEqual(f.feed.title, 'Available API Versions') + self.assertEqual(f.feed.updated, '2011-07-20T11:40:00Z') + self.assertEqual(f.feed.id, 'http://test/') + self.assertEqual(f.feed.author, 'Rackspace') + self.assertEqual(f.feed.author_detail.href, 'http://www.rackspace.com/') + self.assertEqual(f.feed.links, [{'href': 'http://test/', + 'type': 'application/atom+xml', + 'rel': 'self'}]) + self.assertEqual(len(f.entries), 1) + entry = f.entries[0] + self.assertEqual(entry.id, 'http://test/2.9.8') + self.assertEqual(entry.title, 'Version 2.9.8') + self.assertEqual(entry.updated, '2011-07-20T11:40:00Z') + self.assertEqual(len(entry.content), 1) + self.assertEqual(entry.content[0].value, + 'Version 2.9.8 CURRENT (2011-07-20T11:40:00Z)') + self.assertEqual(entry.links, [{ + 'href': 'http://test/2.9.8', + 'type': 'application/atom+xml', + 'rel': 'self'}]) def test_version_detail_atom_serializer(self): versions_data = { @@ -872,63 +855,40 @@ class VersionsSerializerTests(test.TestCase): serializer = versions.VersionsAtomSerializer() response = serializer.show(versions_data) - - root = etree.XML(response) - self.assertEqual(root.tag.split('}')[1], "feed") - self.assertEqual(root.tag.split('}')[0].strip('{'), - "http://www.w3.org/2005/Atom") - - children = list(root) - title = children[0] - updated = children[1] - id = children[2] - author = children[3] - link = children[4] - entry = children[5] - - self.assertEqual(root.tag.split('}')[1], 'feed') - self.assertEqual(title.tag.split('}')[1], 'title') - self.assertEqual(title.text, 'About This Version') - self.assertEqual(updated.tag.split('}')[1], 'updated') - self.assertEqual(updated.text, '2011-01-21T11:33:21Z') - self.assertEqual(id.tag.split('}')[1], 'id') - self.assertEqual(id.text, 'http://localhost/v1.1/') - - self.assertEqual(author.tag.split('}')[1], 'author') - author_name = list(author)[0] - author_uri = list(author)[1] - self.assertEqual(author_name.tag.split('}')[1], 'name') - self.assertEqual(author_name.text, 'Rackspace') - self.assertEqual(author_uri.tag.split('}')[1], 'uri') - self.assertEqual(author_uri.text, 'http://www.rackspace.com/') - - self.assertEqual(link.get('href'), - 'http://localhost/v1.1/') - self.assertEqual(link.get('rel'), 'self') - - self.assertEqual(entry.tag.split('}')[1], 'entry') - entry_children = list(entry) - entry_id = entry_children[0] - entry_title = entry_children[1] - entry_updated = entry_children[2] - entry_links = (entry_children[3], entry_children[4], entry_children[5]) - entry_content = entry_children[6] - - self.assertEqual(entry_id.tag.split('}')[1], "id") - self.assertEqual(entry_id.text, - "http://localhost/v1.1/") - self.assertEqual(entry_title.tag.split('}')[1], "title") - self.assertEqual(entry_title.get('type'), "text") - self.assertEqual(entry_title.text, "Version v1.1") - self.assertEqual(entry_updated.tag.split('}')[1], "updated") - self.assertEqual(entry_updated.text, "2011-01-21T11:33:21Z") - - for i, link in enumerate(versions_data["version"]["links"]): - self.assertEqual(entry_links[i].tag.split('}')[1], "link") - for key, val in versions_data["version"]["links"][i].items(): - self.assertEqual(entry_links[i].get(key), val) - - self.assertEqual(entry_content.tag.split('}')[1], "content") - self.assertEqual(entry_content.get('type'), "text") - self.assertEqual(entry_content.text, - "Version v1.1 CURRENT (2011-01-21T11:33:21Z)") + f = feedparser.parse(response) + + self.assertEqual(f.feed.title, 'About This Version') + self.assertEqual(f.feed.updated, '2011-01-21T11:33:21Z') + self.assertEqual(f.feed.id, 'http://localhost/v1.1/') + self.assertEqual(f.feed.author, 'Rackspace') + self.assertEqual(f.feed.author_detail.href, 'http://www.rackspace.com/') + self.assertEqual(f.feed.links, [{'href': 'http://localhost/v1.1/', + 'type': 'application/atom+xml', + 'rel': 'self'}]) + self.assertEqual(len(f.entries), 1) + entry = f.entries[0] + self.assertEqual(entry.id, 'http://localhost/v1.1/') + self.assertEqual(entry.title, 'Version v1.1') + self.assertEqual(entry.updated, '2011-01-21T11:33:21Z') + self.assertEqual(len(entry.content), 1) + self.assertEqual(entry.content[0].value, + 'Version v1.1 CURRENT (2011-01-21T11:33:21Z)') + self.assertEqual(entry.links, [ + { + 'rel': 'self', + 'type': 'application/atom+xml', + 'href': 'http://localhost/v1.1/', + }, + { + 'rel': 'describedby', + 'type': 'application/pdf', + 'href': 'http://docs.rackspacecloud.com/' + 'servers/api/v1.1/cs-devguide-20110125.pdf', + }, + { + 'rel': 'describedby', + 'type': 'application/vnd.sun.wadl+xml', + 'href': 'http://docs.rackspacecloud.com/' + 'servers/api/v1.1/application.wadl', + }, + ]) -- cgit From 13d68b59833f55d69497a0f5ac5ec8904af9ab0a Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Wed, 31 Aug 2011 03:40:42 -0400 Subject: Code cleanup. --- nova/tests/api/openstack/test_versions.py | 94 +++++++++++++++---------------- 1 file changed, 46 insertions(+), 48 deletions(-) diff --git a/nova/tests/api/openstack/test_versions.py b/nova/tests/api/openstack/test_versions.py index 57a81738c..129d1e69d 100644 --- a/nova/tests/api/openstack/test_versions.py +++ b/nova/tests/api/openstack/test_versions.py @@ -243,13 +243,14 @@ class VersionsTest(test.TestCase): self.assertTrue(version.xpath('/ns:version', namespaces=NS)) media_types = version.xpath('ns:media-types/ns:media-type', namespaces=NS) - self._compare_media_types(media_types, expected['media-types']) + self.assertTrue(_compare_media_types(media_types, + expected['media-types'])) for key in ['id', 'status', 'updated']: self.assertEqual(version.get(key), expected[key]) links = version.xpath('atom:link', namespaces=NS) - self._compare_links(links, + self.assertTrue(_compare_links(links, [{'rel': 'self', 'href': 'http://localhost/v1.0/'}] - + expected['links']) + + expected['links'])) def test_get_version_1_1_detail_xml(self): req = webob.Request.blank('/v1.1/') @@ -263,13 +264,14 @@ class VersionsTest(test.TestCase): self.assertTrue(version.xpath('/ns:version', namespaces=NS)) media_types = version.xpath('ns:media-types/ns:media-type', namespaces=NS) - self._compare_media_types(media_types, expected['media-types']) + self.assertTrue(_compare_media_types(media_types, + expected['media-types'])) for key in ['id', 'status', 'updated']: self.assertEqual(version.get(key), expected[key]) links = version.xpath('atom:link', namespaces=NS) - self._compare_links(links, + self.assertTrue(_compare_links(links, [{'rel': 'self', 'href': 'http://localhost/v1.1/'}] - + expected['links']) + + expected['links'])) def test_get_version_list_xml(self): req = webob.Request.blank('/') @@ -289,8 +291,8 @@ class VersionsTest(test.TestCase): for key in ['id', 'status', 'updated']: self.assertEqual(version.get(key), expected[key]) (link,) = version.xpath('atom:link', namespaces=NS) - self._compare_links(link, - [{'rel': 'self', 'href': 'http://localhost/%s/' % v}]) + self.assertTrue(_compare_links(link, + [{'rel': 'self', 'href': 'http://localhost/%s/' % v}])) def test_get_version_1_0_detail_atom(self): req = webob.Request.blank('/v1.0/') @@ -568,18 +570,6 @@ class VersionsTest(test.TestCase): self.assertDictMatch(expected, json.loads(res.body)) - def _compare_media_types(self, actual, expected): - for elem, data in zip(actual, expected): - self.assertEqual(elem.get('base'), data['base']) - self.assertEqual(elem.get('type'), data['type']) - - def _compare_links(self, actual, expected): - for elem, data in zip(actual, expected): - self.assertEqual(elem.get('rel'), data['rel']) - self.assertEqual(elem.get('href'), data['href']) - if 'type' in data: - self.assertEqual(elem.get('type'), data['type']) - class VersionsViewBuilderTests(test.TestCase): def test_view_builder(self): @@ -648,21 +638,19 @@ class VersionsSerializerTests(test.TestCase): response = serializer.index(versions_data) root = etree.XML(response) - self.assertEqual(root.tag.split('}')[1], "versions") - self.assertEqual(root.tag.split('}')[0].strip('{'), wsgi.XMLNS_V11) - version = list(root)[0] - self.assertEqual(version.tag.split('}')[1], "version") - self.assertEqual(version.get('id'), - versions_data['versions'][0]['id']) + self.assertTrue(root.xpath('/ns:versions', namespaces=NS)) + version_elems = root.xpath('ns:version', namespaces=NS) + self.assertEqual(len(version_elems), 1) + version = version_elems[0] + self.assertEqual(version.get('id'), versions_data['versions'][0]['id']) self.assertEqual(version.get('status'), versions_data['versions'][0]['status']) - link = list(version)[0] - - self.assertEqual(link.tag.split('}')[1], "link") - self.assertEqual(link.tag.split('}')[0].strip('{'), wsgi.XMLNS_ATOM) - for key, val in versions_data['versions'][0]['links'][0].items(): - self.assertEqual(link.get(key), val) + (link,) = version.xpath('atom:link', namespaces=NS) + self.assertTrue(_compare_links(link, [{ + 'rel': 'self', + 'href': 'http://test/2.7.1', + 'type': 'application/atom+xml'}])) def test_versions_multi_xml_serializer(self): versions_data = { @@ -686,10 +674,8 @@ class VersionsSerializerTests(test.TestCase): response = serializer.multi(versions_data) root = etree.XML(response) - self.assertEqual(root.tag.split('}')[1], "choices") - self.assertEqual(root.tag.split('}')[0].strip('{'), wsgi.XMLNS_V11) - version = list(root)[0] - self.assertEqual(version.tag.split('}')[1], "version") + self.assertTrue(root.xpath('/ns:choices', namespaces=NS)) + (version,) = root.xpath('ns:version', namespaces=NS) self.assertEqual(version.get('id'), versions_data['choices'][0]['id']) self.assertEqual(version.get('status'), versions_data['choices'][0]['status']) @@ -698,19 +684,14 @@ class VersionsSerializerTests(test.TestCase): media_type_nodes = list(media_types) self.assertEqual(media_types.tag.split('}')[1], "media-types") - set_types = versions_data['choices'][0]['media-types'] - for i, type in enumerate(set_types): - node = media_type_nodes[i] - self.assertEqual(node.tag.split('}')[1], "media-type") - for key, val in set_types[i].items(): - self.assertEqual(node.get(key), val) - - link = list(version)[1] + media_types = version.xpath('ns:media-types/ns:media-type', + namespaces=NS) + self.assertTrue(_compare_media_types(media_types, + versions_data['choices'][0]['media-types'])) - self.assertEqual(link.tag.split('}')[1], "link") - self.assertEqual(link.tag.split('}')[0].strip('{'), wsgi.XMLNS_ATOM) - for key, val in versions_data['choices'][0]['links'][0].items(): - self.assertEqual(link.get(key), val) + (link,) = version.xpath('atom:link', namespaces=NS) + self.assertTrue(_compare_links(link, + versions_data['choices'][0]['links'])) def test_version_detail_xml_serializer(self): version_data = { @@ -892,3 +873,20 @@ class VersionsSerializerTests(test.TestCase): 'servers/api/v1.1/application.wadl', }, ]) + + +def _compare_links(actual, expected): + for elem, data in zip(actual, expected): + for key in ('rel', 'href', 'type'): + if elem.get(key) != data.get(key): + return False + return True + + +def _compare_media_types(actual, expected): + for elem, data in zip(actual, expected): + for key in ('base', 'type'): + if elem.get(key) != data.get(key): + return False + return True + -- cgit From ced3ea3e8d7cf02f988d968d6078182815226719 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 31 Aug 2011 12:23:43 -0700 Subject: fix for chris behrens' comment - move tenant_id => project_id mapping to compute.api.get_all --- nova/api/openstack/servers.py | 5 ----- nova/compute/api.py | 1 + nova/tests/api/openstack/test_servers.py | 11 ++++++----- 3 files changed, 7 insertions(+), 10 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 3e76fa1a0..5bbb4e52e 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -107,11 +107,6 @@ class Controller(object): LOG.error(reason) raise exception.InvalidInput(reason=reason) - # translate tenant_id filter to internal project_id - if 'tenant_id' in search_opts: - search_opts['project_id'] = search_opts['tenant_id'] - del search_opts['tenant_id'] - # By default, compute's get_all() will return deleted instances. # If an admin hasn't specified a 'deleted' search option, we need # to filter out deleted instances by setting the filter ourselves. diff --git a/nova/compute/api.py b/nova/compute/api.py index 60a13631a..95d6a901d 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -854,6 +854,7 @@ class API(base.Base): 'image': 'image_ref', 'name': 'display_name', 'instance_name': 'name', + 'tenant_id': 'project_id', 'recurse_zones': None, 'flavor': _remap_flavor_filter, 'fixed_ip': _remap_fixed_ip_filter} diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 435644dcc..1ecb35f63 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -1200,13 +1200,14 @@ class ServersTest(test.TestCase): self.assertEqual(servers[0]['id'], 100) def test_tenant_id_filter_converts_to_project_id_for_admin(self): - def fake_get_all(compute_self, context, search_opts=None): - self.assertNotEqual(search_opts, None) - self.assertEqual(search_opts['project_id'], 'faketenant') - self.assertFalse(search_opts.get('tenant_id')) + def fake_get_all(context, filters=None): + self.assertNotEqual(filters, None) + self.assertEqual(filters['project_id'], 'faketenant') + self.assertFalse(filters.get('tenant_id')) return [stub_instance(100)] - self.stubs.Set(nova.compute.API, 'get_all', fake_get_all) + self.stubs.Set(nova.db.api, 'instance_get_all_by_filters', + fake_get_all) self.flags(allow_admin_api=True) req = webob.Request.blank('/v1.1/fake/servers?tenant_id=faketenant') -- cgit From 162f1750344a4e1812a15ec132fefd6e1fd47ca5 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Wed, 31 Aug 2011 18:38:55 -0400 Subject: Updating test for xml to use lxml. --- nova/tests/api/openstack/test_versions.py | 61 ++++++++++++++++--------------- 1 file changed, 32 insertions(+), 29 deletions(-) diff --git a/nova/tests/api/openstack/test_versions.py b/nova/tests/api/openstack/test_versions.py index 129d1e69d..03a47bf24 100644 --- a/nova/tests/api/openstack/test_versions.py +++ b/nova/tests/api/openstack/test_versions.py @@ -483,28 +483,32 @@ class VersionsTest(test.TestCase): self.assertEqual(res.status_int, 300) self.assertEqual(res.content_type, "application/xml") - expected = """ - - - - - - - - - - - - - - - - """.replace(" ", "").replace("\n", "") % (wsgi.XMLNS_V11, - wsgi.XMLNS_ATOM) + root = etree.XML(res.body) + self.assertTrue(root.xpath('/ns:choices', namespaces=NS)) + versions = root.xpath('ns:version', namespaces=NS) + self.assertEqual(len(versions), 2) + + version = versions[0] + self.assertEqual(version.get('id'), 'v1.1'); + self.assertEqual(version.get('status'), 'CURRENT') + media_types = version.xpath('ns:media-types/ns:media-type', + namespaces=NS) + self.assertTrue(_compare_media_types(media_types, + VERSIONS['v1.1']['media-types'])) + links = version.xpath('atom:link', namespaces=NS) + self.assertTrue(_compare_links(links, + [{'rel': 'self', 'href': 'http://localhost/v1.1/images/1'}])) + + version = versions[1] + self.assertEqual(version.get('id'), 'v1.0'); + self.assertEqual(version.get('status'), 'DEPRECATED') + media_types = version.xpath('ns:media-types/ns:media-type', + namespaces=NS) + self.assertTrue(_compare_media_types(media_types, + VERSIONS['v1.0']['media-types'])) + links = version.xpath('atom:link', namespaces=NS) + self.assertTrue(_compare_links(links, + [{'rel': 'self', 'href': 'http://localhost/v1.0/images/1'}])) def test_multi_choice_server_atom(self): """ @@ -876,17 +880,16 @@ class VersionsSerializerTests(test.TestCase): def _compare_links(actual, expected): - for elem, data in zip(actual, expected): - for key in ('rel', 'href', 'type'): - if elem.get(key) != data.get(key): - return False - return True + return _compare_tree_to_dict(actual, expected, ('rel', 'href', 'type')) def _compare_media_types(actual, expected): + return _compare_tree_to_dict(actual, expected, ('base', 'type')) + + +def _compare_tree_to_dict(actual, expected, keys): for elem, data in zip(actual, expected): - for key in ('base', 'type'): + for key in keys: if elem.get(key) != data.get(key): return False return True - -- cgit From d99588a4caf855f3876ea83fa0d8517a77727aef Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Wed, 31 Aug 2011 19:32:55 -0400 Subject: Updated VersionsAtomSerializer.index to use lxml.etree to generate atom feed. --- nova/api/openstack/versions.py | 102 ++++++++++++++++++----------------------- 1 file changed, 45 insertions(+), 57 deletions(-) diff --git a/nova/api/openstack/versions.py b/nova/api/openstack/versions.py index 16a4e8bfd..0766e1eb7 100644 --- a/nova/api/openstack/versions.py +++ b/nova/api/openstack/versions.py @@ -162,39 +162,6 @@ class VersionsRequestDeserializer(wsgi.RequestDeserializer): class VersionsXMLSerializer(wsgi.XMLDictSerializer): - def _create_media_types(self, media_types): - base = self._xml_doc.createElement('media-types') - for type in media_types: - node = self._xml_doc.createElement('media-type') - node.setAttribute('base', type['base']) - node.setAttribute('type', type['type']) - base.appendChild(node) - - return base - - def _create_version_node(self, version, create_ns=False): - version_node = self._xml_doc.createElement('version') - if create_ns: - xmlns = wsgi.XMLNS_V11 - xmlns_atom = wsgi.XMLNS_ATOM - version_node.setAttribute('xmlns', xmlns) - version_node.setAttribute('xmlns:atom', xmlns_atom) - - version_node.setAttribute('id', version['id']) - version_node.setAttribute('status', version['status']) - if 'updated' in version: - version_node.setAttribute('updated', version['updated']) - - if 'media-types' in version: - media_types = self._create_media_types(version['media-types']) - version_node.appendChild(media_types) - - link_nodes = self._create_link_nodes(self._xml_doc, version['links']) - for link in link_nodes: - version_node.appendChild(link) - - return version_node - def _populate_version(self, version_node, version): version_node.set('id', version['id']) version_node.set('status', version['status']) @@ -237,6 +204,9 @@ class VersionsXMLSerializer(wsgi.XMLDictSerializer): class VersionsAtomSerializer(wsgi.XMLDictSerializer): + + NSMAP = {None: xmlutil.XMLNS_ATOM} + #TODO(wwolf): this is temporary until we get rid of toprettyxml # in the base class (XMLDictSerializer), which I plan to do in # another branch @@ -301,31 +271,53 @@ class VersionsAtomSerializer(wsgi.XMLDictSerializer): root.appendChild(author) root.appendChild(link) - def _create_list_meta(self, root, versions): - title = self._create_text_elem('title', "Available API Versions", - type='text') + def _create_feed(self, versions): + feed = etree.Element('feed', nsmap=self.NSMAP) + title = etree.SubElement(feed, 'title') + title.set('type', 'text') + title.text = 'Available API Versions' + # Set this updated to the most recently updated version recent = self._get_most_recent_update(versions) - updated = self._create_text_elem('updated', recent) + etree.SubElement(feed, 'updated').text = recent base_url = self._get_base_url(versions[0]['links'][0]['href']) - id = self._create_text_elem('id', base_url) + etree.SubElement(feed, 'id').text = base_url - link = self._xml_doc.createElement('link') - link.setAttribute('rel', 'self') - link.setAttribute('href', base_url) + link = etree.SubElement(feed, 'link') + link.set('rel', 'self') + link.set('href', base_url) - author = self._xml_doc.createElement('author') - author_name = self._create_text_elem('name', 'Rackspace') - author_uri = self._create_text_elem('uri', 'http://www.rackspace.com/') - author.appendChild(author_name) - author.appendChild(author_uri) + author = etree.SubElement(feed, 'author') + etree.SubElement(author, 'name').text = 'Rackspace' + etree.SubElement(author, 'uri').text = 'http://www.rackspace.com/' - root.appendChild(title) - root.appendChild(updated) - root.appendChild(id) - root.appendChild(author) - root.appendChild(link) + for version in versions: + feed.append(self._create_version_entry(version)) + + return feed + + def _create_version_entry(self, version): + entry = etree.Element('entry') + etree.SubElement(entry, 'id').text = version['links'][0]['href'] + title = etree.SubElement(entry, 'title') + title.set('type', 'text') + title.text = 'Version %s' % version['id'] + etree.SubElement(entry, 'updated').text = version['updated'] + + for link in version['links']: + link_elem = etree.SubElement(entry, 'link') + link_elem.set('rel', link['rel']) + link_elem.set('href', link['href']) + if 'type' in link: + link_elem.set('type', link['type']) + + content = etree.SubElement(entry, 'content') + content.set('type', 'text') + content.text = 'Version %s %s (%s)' % (version['id'], + version['status'], + version['updated']) + return entry def _create_version_entries(self, root, versions): for version in versions: @@ -361,12 +353,8 @@ class VersionsAtomSerializer(wsgi.XMLDictSerializer): root.appendChild(entry) def index(self, data): - self._xml_doc = minidom.Document() - node = self._xml_doc.createElementNS(self.xmlns, 'feed') - self._create_list_meta(node, data['versions']) - self._create_version_entries(node, data['versions']) - - return self.to_xml_string(node) + feed = self._create_feed(data['versions']) + return etree.tostring(feed, encoding='UTF-8') def show(self, data): self._xml_doc = minidom.Document() -- cgit From e1cd8f036f34fc622416e74a302959c9e50a798c Mon Sep 17 00:00:00 2001 From: Tushar Patil Date: Wed, 31 Aug 2011 17:06:15 -0700 Subject: Fix for LP Bug #838251 --- nova/api/ec2/cloud.py | 12 ++---------- nova/api/openstack/contrib/createserverext.py | 24 +++++++++++++++++++----- nova/api/openstack/servers.py | 18 ++++++++++++++++++ nova/utils.py | 9 +++++++++ 4 files changed, 48 insertions(+), 15 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 9aebf92e3..d3b3a21ef 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -995,14 +995,6 @@ class CloudController(object): 'status': volume['attach_status'], 'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)} - @staticmethod - def _convert_to_set(lst, label): - if lst is None or lst == []: - return None - if not isinstance(lst, list): - lst = [lst] - return [{label: x} for x in lst] - def _format_kernel_id(self, instance_ref, result, key): kernel_id = instance_ref['kernel_id'] if kernel_id is None: @@ -1160,7 +1152,7 @@ class CloudController(object): if instance.get('security_groups'): for security_group in instance['security_groups']: security_group_names.append(security_group['name']) - result['groupSet'] = CloudController._convert_to_set( + result['groupSet'] = utils.convert_to_set( security_group_names, 'groupId') def _format_instances(self, context, instance_id=None, use_v6=False, @@ -1224,7 +1216,7 @@ class CloudController(object): i['keyName'] = '%s (%s, %s)' % (i['keyName'], instance['project_id'], instance['host']) - i['productCodesSet'] = self._convert_to_set([], 'product_codes') + i['productCodesSet'] = utils.convert_to_set([], 'product_codes') self._format_instance_type(instance, i) i['launchTime'] = instance['created_at'] i['amiLaunchIndex'] = instance['launch_index'] diff --git a/nova/api/openstack/contrib/createserverext.py b/nova/api/openstack/contrib/createserverext.py index ba72fdb0b..4cc093891 100644 --- a/nova/api/openstack/contrib/createserverext.py +++ b/nova/api/openstack/contrib/createserverext.py @@ -14,18 +14,32 @@ # License for the specific language governing permissions and limitations # under the License +from nova import utils from nova.api.openstack import create_instance_helper as helper from nova.api.openstack import extensions from nova.api.openstack import servers from nova.api.openstack import wsgi -class Createserverext(extensions.ExtensionDescriptor): - """The servers create ext +class CreateServerController(servers.ControllerV11): + def _build_view(self, req, instance, is_detail=False): + server = super(CreateServerController, self)._build_view(req, + instance, + is_detail) + if is_detail: + self._build_security_groups(server['server'], instance) + return server + + def _build_security_groups(self, response, inst): + sg_names = [] + if inst.get('security_groups'): + sg_names = [security_group['name'] for security_group in \ + inst['security_groups']] + response['security_groups'] = utils.convert_to_set(sg_names, 'name') - Exposes addFixedIp and removeFixedIp actions on servers. - """ +class Createserverext(extensions.ExtensionDescriptor): + """The servers create ext""" def get_name(self): return "Createserverext" @@ -58,7 +72,7 @@ class Createserverext(extensions.ExtensionDescriptor): deserializer = wsgi.RequestDeserializer(body_deserializers) res = extensions.ResourceExtension('os-create-server-ext', - controller=servers.ControllerV11(), + controller=CreateServerController(), deserializer=deserializer, serializer=serializer) resources.append(res) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 27c67e79e..e2cb46165 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -912,6 +912,11 @@ class ServerXMLSerializer(wsgi.XMLDictSerializer): server['addresses']) server_node.appendChild(addresses_node) + if 'security_groups' in server: + security_groups_node = self._create_security_groups_node(xml_doc, + server['security_groups']) + server_node.appendChild(security_groups_node) + return server_node def _server_list_to_xml(self, xml_doc, servers, detailed): @@ -964,6 +969,19 @@ class ServerXMLSerializer(wsgi.XMLDictSerializer): server_dict['server']) return self.to_xml_string(node, True) + def _security_group_to_xml(self, doc, security_group): + node = doc.createElement('security_group') + node.setAttribute('name', str(security_group.get('name'))) + return node + + def _create_security_groups_node(self, xml_doc, security_groups): + security_groups_node = xml_doc.createElement('security_groups') + if security_groups: + for security_group in security_groups: + node = self._security_group_to_xml(xml_doc, security_group) + security_groups_node.appendChild(node) + return security_groups_node + def create_resource(version='1.0'): controller = { diff --git a/nova/utils.py b/nova/utils.py index 21e6221b2..4855f6fd6 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -901,3 +901,12 @@ def monkey_patch(): func = import_class("%s.%s" % (module, key)) setattr(sys.modules[module], key,\ decorator("%s.%s" % (module, key), func)) + + +def convert_to_set(lst, label): + """Convert a value or list into a set""" + if lst is None or lst == []: + return None + if not isinstance(lst, list): + lst = [lst] + return [{label: x} for x in lst] -- cgit From 20beec509aff1bb3a30e9f1d95d3e2825f2b38ea Mon Sep 17 00:00:00 2001 From: Tushar Patil Date: Wed, 31 Aug 2011 17:30:11 -0700 Subject: Fix for LP Bug #838466 --- nova/compute/api.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index e045ef3de..6806522f7 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -383,10 +383,6 @@ class API(base.Base): If you are changing this method, be sure to update both call paths. """ - instance = dict(launch_index=num, **base_options) - instance = self.db.instance_create(context, instance) - instance_id = instance['id'] - elevated = context.elevated() if security_group is None: security_group = ['default'] @@ -400,6 +396,10 @@ class API(base.Base): security_group_name) security_groups.append(group['id']) + instance = dict(launch_index=num, **base_options) + instance = self.db.instance_create(context, instance) + instance_id = instance['id'] + for security_group_id in security_groups: self.db.instance_add_security_group(elevated, instance_id, -- cgit From f4dc231069c530f8f6055b1b7fa006750795b6e4 Mon Sep 17 00:00:00 2001 From: Brad Hall Date: Wed, 31 Aug 2011 18:54:15 -0700 Subject: Add comment for an uncommon failure case that we need to fix --- nova/network/quantum/manager.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/nova/network/quantum/manager.py b/nova/network/quantum/manager.py index c03622218..2fd9175ae 100644 --- a/nova/network/quantum/manager.py +++ b/nova/network/quantum/manager.py @@ -213,6 +213,12 @@ class QuantumManager(manager.FlatManager): net_id, port_id = self.q_conn.get_port_by_attachment( q_tenant_id, vif['uuid']) if not net_id: + # TODO(bgh): We need to figure out a way to tell if we + # should actually be raising this exception or not. + # In the case that a VM spawn failed it may not have + # attached the vif and raising the exception here + # prevents deltion of the VM. In that case we should + # probably just log, continue, and move on. raise Exception(_("No network for for virtual interface %s") % vif['uuid']) (v4_subnet, v6_subnet) = self.ipam.get_subnets_by_net_id(context, -- cgit From 13c74c252b0b7f900cde6a09201ea01c389f73a8 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Wed, 31 Aug 2011 22:43:26 -0400 Subject: move GlanceImageService tests to proper module; remove translation of non-standard image attributes to properties; ensure all image properties are available, defaulting to None if not provided --- nova/api/openstack/images.py | 2 +- nova/image/glance.py | 60 ++++-- nova/tests/api/openstack/test_images.py | 279 +--------------------------- nova/tests/image/test_glance.py | 318 +++++++++++++++++++++++++++++++- 4 files changed, 363 insertions(+), 296 deletions(-) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 1c8fc10c9..fcaa94651 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -50,7 +50,7 @@ class Controller(object): """Initialize new `ImageController`. :param compute_service: `nova.compute.api:API` - :param image_service: `nova.image.service:BaseImageService` + :param image_service: `nova.image.glance:GlancemageService` """ self._compute_service = compute_service or compute.API() diff --git a/nova/image/glance.py b/nova/image/glance.py index 7233eb18d..2e0ed555d 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -57,17 +57,9 @@ def pick_glance_api_server(): return host, port -class GlanceImageService(service.BaseImageService): +class GlanceImageService(object): """Provides storage and retrieval of disk image objects within Glance.""" - GLANCE_ONLY_ATTRS = ['size', 'location', 'disk_format', - 'container_format', 'checksum'] - - # NOTE(sirp): Overriding to use _translate_to_service provided by - # BaseImageService - SERVICE_IMAGE_ATTRS = service.BaseImageService.BASE_IMAGE_ATTRS +\ - GLANCE_ONLY_ATTRS - def __init__(self, client=None): self._client = client @@ -266,20 +258,46 @@ class GlanceImageService(service.BaseImageService): @classmethod def _translate_to_service(cls, image_meta): - image_meta = super(GlanceImageService, - cls)._translate_to_service(image_meta) image_meta = _convert_to_string(image_meta) return image_meta @classmethod def _translate_to_base(cls, image_meta): """Override translation to handle conversion to datetime objects.""" - image_meta = service.BaseImageService._propertify_metadata( - image_meta, cls.SERVICE_IMAGE_ATTRS) + image_meta = _limit_attributes(image_meta) image_meta = _convert_timestamps_to_datetimes(image_meta) image_meta = _convert_from_string(image_meta) return image_meta + @staticmethod + def _is_image_available(context, image_meta): + """Check image availability. + + Images are always available if they are public or if the user is an + admin. + + Otherwise, we filter by project_id (if present) and then fall-back to + images owned by user. + + """ + # FIXME(sirp): We should be filtering by user_id on the Glance side + # for security; however, we can't do that until we get authn/authz + # sorted out. Until then, filtering in Nova. + if image_meta['is_public'] or context.is_admin: + return True + + properties = image_meta['properties'] + + if context.project_id and ('project_id' in properties): + return str(properties['project_id']) == str(context.project_id) + + try: + user_id = properties['user_id'] + except KeyError: + return False + + return str(user_id) == str(context.user_id) + # utility functions def _convert_timestamps_to_datetimes(image_meta): @@ -338,3 +356,19 @@ def _convert_from_string(metadata): def _convert_to_string(metadata): return _convert(_json_dumps, metadata) + + + +def _limit_attributes(image_meta): + IMAGE_ATTRIBUTES = ['size', 'location', 'disk_format', + 'container_format', 'checksum', 'id', + 'name', 'created_at', 'updated_at', + 'deleted_at', 'deleted', 'status', + 'is_public'] + output = {} + for attr in IMAGE_ATTRIBUTES: + output[attr] = image_meta.get(attr) + + output['properties'] = image_meta.get('properties', {}) + + return output diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 2a7cfc382..b9bd18eda 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -41,280 +41,7 @@ from nova.api.openstack import images from nova.tests.api.openstack import fakes -class _BaseImageServiceTests(test.TestCase): - """Tasks to test for all image services""" - - def __init__(self, *args, **kwargs): - super(_BaseImageServiceTests, self).__init__(*args, **kwargs) - self.service = None - self.context = None - - def test_create(self): - fixture = self._make_fixture('test image') - num_images = len(self.service.index(self.context)) - - image_id = self.service.create(self.context, fixture)['id'] - - self.assertNotEquals(None, image_id) - self.assertEquals(num_images + 1, - len(self.service.index(self.context))) - - def test_create_and_show_non_existing_image(self): - fixture = self._make_fixture('test image') - num_images = len(self.service.index(self.context)) - - image_id = self.service.create(self.context, fixture)['id'] - - self.assertNotEquals(None, image_id) - self.assertRaises(exception.NotFound, - self.service.show, - self.context, - 'bad image id') - - def test_create_and_show_non_existing_image_by_name(self): - fixture = self._make_fixture('test image') - num_images = len(self.service.index(self.context)) - - image_id = self.service.create(self.context, fixture)['id'] - - self.assertNotEquals(None, image_id) - self.assertRaises(exception.ImageNotFound, - self.service.show_by_name, - self.context, - 'bad image id') - - def test_update(self): - fixture = self._make_fixture('test image') - image_id = self.service.create(self.context, fixture)['id'] - fixture['status'] = 'in progress' - - self.service.update(self.context, image_id, fixture) - - new_image_data = self.service.show(self.context, image_id) - self.assertEquals('in progress', new_image_data['status']) - - def test_delete(self): - fixture1 = self._make_fixture('test image 1') - fixture2 = self._make_fixture('test image 2') - fixtures = [fixture1, fixture2] - - num_images = len(self.service.index(self.context)) - self.assertEquals(0, num_images, str(self.service.index(self.context))) - - ids = [] - for fixture in fixtures: - new_id = self.service.create(self.context, fixture)['id'] - ids.append(new_id) - - num_images = len(self.service.index(self.context)) - self.assertEquals(2, num_images, str(self.service.index(self.context))) - - self.service.delete(self.context, ids[0]) - - num_images = len(self.service.index(self.context)) - self.assertEquals(1, num_images) - - def test_index(self): - fixture = self._make_fixture('test image') - image_id = self.service.create(self.context, fixture)['id'] - image_metas = self.service.index(self.context) - expected = [{'id': 'DONTCARE', 'name': 'test image'}] - self.assertDictListMatch(image_metas, expected) - - @staticmethod - def _make_fixture(name): - fixture = {'name': name, - 'updated': None, - 'created': None, - 'status': None, - 'is_public': True} - return fixture - - -class GlanceImageServiceTest(_BaseImageServiceTests): - - """Tests the Glance image service, in particular that metadata translation - works properly. - - At a high level, the translations involved are: - - 1. Glance -> ImageService - This is needed so we can support - multple ImageServices (Glance, Local, etc) - - 2. ImageService -> API - This is needed so we can support multple - APIs (OpenStack, EC2) - """ - def setUp(self): - super(GlanceImageServiceTest, self).setUp() - self.stubs = stubout.StubOutForTesting() - fakes.stub_out_glance(self.stubs) - fakes.stub_out_compute_api_snapshot(self.stubs) - service_class = 'nova.image.glance.GlanceImageService' - self.service = utils.import_object(service_class) - self.context = context.RequestContext('fake', 'fake') - self.service.delete_all() - self.sent_to_glance = {} - fakes.stub_out_glance_add_image(self.stubs, self.sent_to_glance) - - def tearDown(self): - self.stubs.UnsetAll() - super(GlanceImageServiceTest, self).tearDown() - - def test_create_with_instance_id(self): - """Ensure instance_id is persisted as an image-property""" - fixture = {'name': 'test image', - 'is_public': False, - 'properties': {'instance_id': '42', 'user_id': 'fake'}} - - image_id = self.service.create(self.context, fixture)['id'] - expected = fixture - self.assertDictMatch(self.sent_to_glance['metadata'], expected) - - image_meta = self.service.show(self.context, image_id) - expected = {'id': image_id, - 'name': 'test image', - 'is_public': False, - 'properties': {'instance_id': '42', 'user_id': 'fake'}} - self.assertDictMatch(image_meta, expected) - - image_metas = self.service.detail(self.context) - self.assertDictMatch(image_metas[0], expected) - - def test_create_without_instance_id(self): - """ - Ensure we can create an image without having to specify an - instance_id. Public images are an example of an image not tied to an - instance. - """ - fixture = {'name': 'test image'} - image_id = self.service.create(self.context, fixture)['id'] - - expected = {'name': 'test image', 'properties': {}} - self.assertDictMatch(self.sent_to_glance['metadata'], expected) - - def test_index_default_limit(self): - fixtures = [] - ids = [] - for i in range(10): - fixture = self._make_fixture('TestImage %d' % (i)) - fixtures.append(fixture) - ids.append(self.service.create(self.context, fixture)['id']) - - image_metas = self.service.index(self.context) - i = 0 - for meta in image_metas: - expected = {'id': 'DONTCARE', - 'name': 'TestImage %d' % (i)} - self.assertDictMatch(meta, expected) - i = i + 1 - - def test_index_marker(self): - fixtures = [] - ids = [] - for i in range(10): - fixture = self._make_fixture('TestImage %d' % (i)) - fixtures.append(fixture) - ids.append(self.service.create(self.context, fixture)['id']) - - image_metas = self.service.index(self.context, marker=ids[1]) - self.assertEquals(len(image_metas), 8) - i = 2 - for meta in image_metas: - expected = {'id': 'DONTCARE', - 'name': 'TestImage %d' % (i)} - self.assertDictMatch(meta, expected) - i = i + 1 - - def test_index_limit(self): - fixtures = [] - ids = [] - for i in range(10): - fixture = self._make_fixture('TestImage %d' % (i)) - fixtures.append(fixture) - ids.append(self.service.create(self.context, fixture)['id']) - - image_metas = self.service.index(self.context, limit=3) - self.assertEquals(len(image_metas), 3) - - def test_index_marker_and_limit(self): - fixtures = [] - ids = [] - for i in range(10): - fixture = self._make_fixture('TestImage %d' % (i)) - fixtures.append(fixture) - ids.append(self.service.create(self.context, fixture)['id']) - - image_metas = self.service.index(self.context, marker=ids[3], limit=1) - self.assertEquals(len(image_metas), 1) - i = 4 - for meta in image_metas: - expected = {'id': 'DONTCARE', - 'name': 'TestImage %d' % (i)} - self.assertDictMatch(meta, expected) - i = i + 1 - - def test_detail_marker(self): - fixtures = [] - ids = [] - for i in range(10): - fixture = self._make_fixture('TestImage %d' % (i)) - fixtures.append(fixture) - ids.append(self.service.create(self.context, fixture)['id']) - - image_metas = self.service.detail(self.context, marker=ids[1]) - self.assertEquals(len(image_metas), 8) - i = 2 - for meta in image_metas: - expected = { - 'id': 'DONTCARE', - 'status': None, - 'is_public': True, - 'name': 'TestImage %d' % (i), - 'properties': { - 'updated': None, - 'created': None, - }, - } - - self.assertDictMatch(meta, expected) - i = i + 1 - - def test_detail_limit(self): - fixtures = [] - ids = [] - for i in range(10): - fixture = self._make_fixture('TestImage %d' % (i)) - fixtures.append(fixture) - ids.append(self.service.create(self.context, fixture)['id']) - - image_metas = self.service.detail(self.context, limit=3) - self.assertEquals(len(image_metas), 3) - - def test_detail_marker_and_limit(self): - fixtures = [] - ids = [] - for i in range(10): - fixture = self._make_fixture('TestImage %d' % (i)) - fixtures.append(fixture) - ids.append(self.service.create(self.context, fixture)['id']) - - image_metas = self.service.detail(self.context, marker=ids[3], limit=3) - self.assertEquals(len(image_metas), 3) - i = 4 - for meta in image_metas: - expected = { - 'id': 'DONTCARE', - 'status': None, - 'is_public': True, - 'name': 'TestImage %d' % (i), - 'properties': { - 'updated': None, 'created': None}, - } - self.assertDictMatch(meta, expected) - i = i + 1 - - -class ImageControllerWithGlanceServiceTest(test.TestCase): +class ImagesTest(test.TestCase): """ Test of the OpenStack API /images application controller w/Glance. """ @@ -323,7 +50,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): def setUp(self): """Run before each test.""" - super(ImageControllerWithGlanceServiceTest, self).setUp() + super(ImagesTest, self).setUp() self.flags(image_service='nova.image.glance.GlanceImageService') self.stubs = stubout.StubOutForTesting() fakes.stub_out_networking(self.stubs) @@ -337,7 +64,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): def tearDown(self): """Run after each test.""" self.stubs.UnsetAll() - super(ImageControllerWithGlanceServiceTest, self).tearDown() + super(ImagesTest, self).tearDown() def _get_fake_context(self): class Context(object): diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py index 5df25df37..cee17ba72 100644 --- a/nova/tests/image/test_glance.py +++ b/nova/tests/image/test_glance.py @@ -17,11 +17,15 @@ import datetime +import stubout import unittest +from nova.tests.api.openstack import fakes from nova import context -from nova import test +from nova import exception from nova.image import glance +from nova import test +from nova import utils class StubGlanceClient(object): @@ -91,10 +95,13 @@ class BaseGlanceTest(unittest.TestCase): class TestGlanceImageServiceProperties(BaseGlanceTest): + """ + Ensure attributes which aren't base attributes are ignored. Missing + attributes should be added as None + + """ + def test_show_passes_through_to_client(self): - """Ensure attributes which aren't BASE_IMAGE_ATTRS are stored in the - properties dict - """ fixtures = {'image1': {'id': '1', 'name': 'image1', 'is_public': True, 'foo': 'bar', 'properties': {'prop1': 'propvalue1'}}} @@ -102,7 +109,11 @@ class TestGlanceImageServiceProperties(BaseGlanceTest): image_meta = self.service.show(self.context, 'image1') expected = {'id': '1', 'name': 'image1', 'is_public': True, - 'properties': {'prop1': 'propvalue1', 'foo': 'bar'}} + 'size': None, 'location': None, 'disk_format': None, + 'container_format': None, 'checksum': None, + 'created_at': None, 'updated_at': None, + 'deleted_at': None, 'deleted': None, 'status': None, + 'properties': {'prop1': 'propvalue1'}} self.assertEqual(image_meta, expected) def test_detail_passes_through_to_client(self): @@ -112,7 +123,11 @@ class TestGlanceImageServiceProperties(BaseGlanceTest): self.client.images = fixtures image_meta = self.service.detail(self.context) expected = [{'id': '1', 'name': 'image1', 'is_public': True, - 'properties': {'prop1': 'propvalue1', 'foo': 'bar'}}] + 'size': None, 'location': None, 'disk_format': None, + 'container_format': None, 'checksum': None, + 'created_at': None, 'updated_at': None, + 'deleted_at': None, 'deleted': None, 'status': None, + 'properties': {'prop1': 'propvalue1'}}] self.assertEqual(image_meta, expected) @@ -286,3 +301,294 @@ class TestGlanceSerializer(unittest.TestCase): converted = glance._convert_to_string(metadata) self.assertEqual(converted, converted_expected) self.assertEqual(glance._convert_from_string(converted), metadata) + + +class GlanceImageServiceTest(test.TestCase): + + """Tests the Glance image service, in particular that metadata translation + works properly. + + At a high level, the translations involved are: + + 1. Glance -> ImageService - This is needed so we can support + multple ImageServices (Glance, Local, etc) + + 2. ImageService -> API - This is needed so we can support multple + APIs (OpenStack, EC2) + """ + def __init__(self, *args, **kwargs): + super(GlanceImageServiceTest, self).__init__(*args, **kwargs) + self.service = None + self.context = None + + @staticmethod + def _make_fixture(name): + fixture = {'name': name, + 'properties': {'one': 'two'}, + 'status': None, + 'is_public': True} + return fixture + + def setUp(self): + super(GlanceImageServiceTest, self).setUp() + self.stubs = stubout.StubOutForTesting() + fakes.stub_out_glance(self.stubs) + fakes.stub_out_compute_api_snapshot(self.stubs) + service_class = 'nova.image.glance.GlanceImageService' + self.service = utils.import_object(service_class) + self.context = context.RequestContext('fake', 'fake') + self.service.delete_all() + self.sent_to_glance = {} + fakes.stub_out_glance_add_image(self.stubs, self.sent_to_glance) + + def tearDown(self): + self.stubs.UnsetAll() + super(GlanceImageServiceTest, self).tearDown() + + def test_create_with_instance_id(self): + """Ensure instance_id is persisted as an image-property""" + fixture = {'name': 'test image', + 'is_public': False, + 'properties': {'instance_id': '42', 'user_id': 'fake'}} + + image_id = self.service.create(self.context, fixture)['id'] + expected = fixture + self.assertDictMatch(self.sent_to_glance['metadata'], expected) + + image_meta = self.service.show(self.context, image_id) + expected = {'id': image_id, 'name': 'test image', 'is_public': False, + 'size': None, 'location': None, 'disk_format': None, + 'container_format': None, 'checksum': None, + 'created_at': None, 'updated_at': None, + 'deleted_at': None, 'deleted': None, 'status': None, + 'properties': {'instance_id': '42', 'user_id': 'fake'}} + self.assertDictMatch(image_meta, expected) + + image_metas = self.service.detail(self.context) + self.assertDictMatch(image_metas[0], expected) + + def test_create_without_instance_id(self): + """ + Ensure we can create an image without having to specify an + instance_id. Public images are an example of an image not tied to an + instance. + """ + fixture = {'name': 'test image'} + image_id = self.service.create(self.context, fixture)['id'] + + expected = {'name': 'test image'} + self.assertDictMatch(self.sent_to_glance['metadata'], expected) + + def test_create(self): + fixture = self._make_fixture('test image') + num_images = len(self.service.index(self.context)) + + image_id = self.service.create(self.context, fixture)['id'] + + self.assertNotEquals(None, image_id) + self.assertEquals(num_images + 1, + len(self.service.index(self.context))) + + def test_create_and_show_non_existing_image(self): + fixture = self._make_fixture('test image') + num_images = len(self.service.index(self.context)) + + image_id = self.service.create(self.context, fixture)['id'] + + self.assertNotEquals(None, image_id) + self.assertRaises(exception.NotFound, + self.service.show, + self.context, + 'bad image id') + + def test_create_and_show_non_existing_image_by_name(self): + fixture = self._make_fixture('test image') + num_images = len(self.service.index(self.context)) + + image_id = self.service.create(self.context, fixture)['id'] + + self.assertNotEquals(None, image_id) + self.assertRaises(exception.ImageNotFound, + self.service.show_by_name, + self.context, + 'bad image id') + + def test_index(self): + fixture = self._make_fixture('test image') + image_id = self.service.create(self.context, fixture)['id'] + image_metas = self.service.index(self.context) + expected = [{'id': 'DONTCARE', 'name': 'test image'}] + self.assertDictListMatch(image_metas, expected) + + def test_index_default_limit(self): + fixtures = [] + ids = [] + for i in range(10): + fixture = self._make_fixture('TestImage %d' % (i)) + fixtures.append(fixture) + ids.append(self.service.create(self.context, fixture)['id']) + + image_metas = self.service.index(self.context) + i = 0 + for meta in image_metas: + expected = {'id': 'DONTCARE', + 'name': 'TestImage %d' % (i)} + self.assertDictMatch(meta, expected) + i = i + 1 + + def test_index_marker(self): + fixtures = [] + ids = [] + for i in range(10): + fixture = self._make_fixture('TestImage %d' % (i)) + fixtures.append(fixture) + ids.append(self.service.create(self.context, fixture)['id']) + + image_metas = self.service.index(self.context, marker=ids[1]) + self.assertEquals(len(image_metas), 8) + i = 2 + for meta in image_metas: + expected = {'id': 'DONTCARE', + 'name': 'TestImage %d' % (i)} + self.assertDictMatch(meta, expected) + i = i + 1 + + def test_index_limit(self): + fixtures = [] + ids = [] + for i in range(10): + fixture = self._make_fixture('TestImage %d' % (i)) + fixtures.append(fixture) + ids.append(self.service.create(self.context, fixture)['id']) + + image_metas = self.service.index(self.context, limit=3) + self.assertEquals(len(image_metas), 3) + + def test_index_marker_and_limit(self): + fixtures = [] + ids = [] + for i in range(10): + fixture = self._make_fixture('TestImage %d' % (i)) + fixtures.append(fixture) + ids.append(self.service.create(self.context, fixture)['id']) + + image_metas = self.service.index(self.context, marker=ids[3], limit=1) + self.assertEquals(len(image_metas), 1) + i = 4 + for meta in image_metas: + expected = {'id': 'DONTCARE', + 'name': 'TestImage %d' % (i)} + self.assertDictMatch(meta, expected) + i = i + 1 + + def test_detail_marker(self): + fixtures = [] + ids = [] + for i in range(10): + fixture = self._make_fixture('TestImage %d' % (i)) + fixtures.append(fixture) + ids.append(self.service.create(self.context, fixture)['id']) + + image_metas = self.service.detail(self.context, marker=ids[1]) + self.assertEquals(len(image_metas), 8) + i = 2 + for meta in image_metas: + expected = { + 'id': 'DONTCARE', + 'status': None, + 'is_public': True, + 'name': 'TestImage %d' % (i), + 'properties': {'one': 'two'}, + 'size': None, + 'location': None, + 'disk_format': None, + 'container_format': None, + 'checksum': None, + 'created_at': None, + 'updated_at': None, + 'deleted_at': None, + 'deleted': None + } + + print meta + print expected + self.assertDictMatch(meta, expected) + i = i + 1 + + def test_detail_limit(self): + fixtures = [] + ids = [] + for i in range(10): + fixture = self._make_fixture('TestImage %d' % (i)) + fixtures.append(fixture) + ids.append(self.service.create(self.context, fixture)['id']) + + image_metas = self.service.detail(self.context, limit=3) + self.assertEquals(len(image_metas), 3) + + def test_detail_marker_and_limit(self): + fixtures = [] + ids = [] + for i in range(10): + fixture = self._make_fixture('TestImage %d' % (i)) + fixtures.append(fixture) + ids.append(self.service.create(self.context, fixture)['id']) + + image_metas = self.service.detail(self.context, marker=ids[3], limit=3) + self.assertEquals(len(image_metas), 3) + i = 4 + for meta in image_metas: + expected = { + 'id': 'DONTCARE', + 'status': None, + 'is_public': True, + 'name': 'TestImage %d' % (i), + 'properties': {'one': 'two'}, + 'size': None, + 'location': None, + 'disk_format': None, + 'container_format': None, + 'checksum': None, + 'created_at': None, + 'updated_at': None, + 'deleted_at': None, + 'deleted': None + } + self.assertDictMatch(meta, expected) + i = i + 1 + + + def test_update(self): + fixture = self._make_fixture('test image') + image_id = self.service.create(self.context, fixture)['id'] + fixture['status'] = 'in progress' + + self.service.update(self.context, image_id, fixture) + + new_image_data = self.service.show(self.context, image_id) + self.assertEquals('in progress', new_image_data['status']) + + def test_delete(self): + fixture1 = self._make_fixture('test image 1') + fixture2 = self._make_fixture('test image 2') + fixtures = [fixture1, fixture2] + + num_images = len(self.service.index(self.context)) + self.assertEquals(0, num_images, str(self.service.index(self.context))) + + ids = [] + for fixture in fixtures: + new_id = self.service.create(self.context, fixture)['id'] + ids.append(new_id) + + num_images = len(self.service.index(self.context)) + self.assertEquals(2, num_images, str(self.service.index(self.context))) + + self.service.delete(self.context, ids[0]) + + num_images = len(self.service.index(self.context)) + self.assertEquals(1, num_images) + + + + -- cgit From 62e2ad123685ab01accfb9988013225fe377dfbe Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Wed, 31 Aug 2011 23:07:07 -0400 Subject: pep8 --- nova/image/glance.py | 1 - nova/tests/image/test_glance.py | 5 ----- 2 files changed, 6 deletions(-) diff --git a/nova/image/glance.py b/nova/image/glance.py index 2e0ed555d..915e1fd3b 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -358,7 +358,6 @@ def _convert_to_string(metadata): return _convert(_json_dumps, metadata) - def _limit_attributes(image_meta): IMAGE_ATTRIBUTES = ['size', 'location', 'disk_format', 'container_format', 'checksum', 'id', diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py index cee17ba72..3922036a9 100644 --- a/nova/tests/image/test_glance.py +++ b/nova/tests/image/test_glance.py @@ -557,7 +557,6 @@ class GlanceImageServiceTest(test.TestCase): self.assertDictMatch(meta, expected) i = i + 1 - def test_update(self): fixture = self._make_fixture('test image') image_id = self.service.create(self.context, fixture)['id'] @@ -588,7 +587,3 @@ class GlanceImageServiceTest(test.TestCase): num_images = len(self.service.index(self.context)) self.assertEquals(1, num_images) - - - - -- cgit From 69aeb326d6da85a3b0566d973588eab6668ffa36 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Wed, 31 Aug 2011 23:13:21 -0400 Subject: remove BaseImageService --- nova/api/ec2/cloud.py | 2 +- nova/image/fake.py | 3 +- nova/image/s3.py | 2 +- nova/image/service.py | 200 -------------------------------------------------- 4 files changed, 3 insertions(+), 204 deletions(-) delete mode 100644 nova/image/service.py diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index fe44191c8..80fa2c496 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -1472,7 +1472,7 @@ class CloudController(object): return image def _format_image(self, image): - """Convert from format defined by BaseImageService to S3 format.""" + """Convert from format defined by GlanceImageService to S3 format.""" i = {} image_type = self._image_type(image.get('container_format')) ec2_id = self.image_ec2_id(image.get('id'), image_type) diff --git a/nova/image/fake.py b/nova/image/fake.py index 97af81711..4eceabc11 100644 --- a/nova/image/fake.py +++ b/nova/image/fake.py @@ -24,7 +24,6 @@ import random from nova import exception from nova import flags from nova import log as logging -from nova.image import service LOG = logging.getLogger('nova.image.fake') @@ -33,7 +32,7 @@ LOG = logging.getLogger('nova.image.fake') FLAGS = flags.FLAGS -class _FakeImageService(service.BaseImageService): +class _FakeImageService(object): """Mock (fake) image service for unit testing.""" def __init__(self): diff --git a/nova/image/s3.py b/nova/image/s3.py index abf01a942..1e74dd433 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -48,7 +48,7 @@ flags.DEFINE_string('s3_secret_key', 'notchecked', 'secret key to use for s3 server for images') -class S3ImageService(service.BaseImageService): +class S3ImageService(object): """Wraps an existing image service to support s3 based register.""" def __init__(self, service=None, *args, **kwargs): diff --git a/nova/image/service.py b/nova/image/service.py deleted file mode 100644 index 5361cfc89..000000000 --- a/nova/image/service.py +++ /dev/null @@ -1,200 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from nova import utils - - -class BaseImageService(object): - """Base class for providing image search and retrieval services. - - ImageService exposes two concepts of metadata: - - 1. First-class attributes: This is metadata that is common to all - ImageService subclasses and is shared across all hypervisors. These - attributes are defined by IMAGE_ATTRS. - - 2. Properties: This is metdata that is specific to an ImageService, - and Image, or a particular hypervisor. Any attribute not present in - BASE_IMAGE_ATTRS should be considered an image property. - - This means that ImageServices will return BASE_IMAGE_ATTRS as keys in the - metadata dict, all other attributes will be returned as keys in the nested - 'properties' dict. - - """ - - BASE_IMAGE_ATTRS = ['id', 'name', 'created_at', 'updated_at', - 'deleted_at', 'deleted', 'status', 'is_public'] - - # NOTE(sirp): ImageService subclasses may override this to aid translation - # between BaseImageService attributes and additional metadata stored by - # the ImageService subclass - SERVICE_IMAGE_ATTRS = [] - - def index(self, context, *args, **kwargs): - """List images. - - :returns: a sequence of mappings with the following signature - {'id': opaque id of image, 'name': name of image} - - """ - raise NotImplementedError - - def detail(self, context, *args, **kwargs): - """Detailed information about an images. - - :returns: a sequence of mappings with the following signature - {'id': opaque id of image, - 'name': name of image, - 'created_at': creation datetime object, - 'updated_at': modification datetime object, - 'deleted_at': deletion datetime object or None, - 'deleted': boolean indicating if image has been deleted, - 'status': string description of image status, - 'is_public': boolean indicating if image is public - } - - If the service does not implement a method that provides a detailed - set of information about images, then the method should raise - NotImplementedError, in which case Nova will emulate this method - with repeated calls to show() for each image received from the - index() method. - - """ - raise NotImplementedError - - def show(self, context, image_id): - """Detailed information about an image. - - :returns: a mapping with the following signature: - {'id': opaque id of image, - 'name': name of image, - 'created_at': creation datetime object, - 'updated_at': modification datetime object, - 'deleted_at': deletion datetime object or None, - 'deleted': boolean indicating if image has been deleted, - 'status': string description of image status, - 'is_public': boolean indicating if image is public - }, ... - - :raises: NotFound if the image does not exist - - """ - raise NotImplementedError - - def get(self, context, data): - """Get an image. - - :param data: a file-like object to hold binary image data - :returns: a dict containing image metadata, writes image data to data. - :raises: NotFound if the image does not exist - - """ - raise NotImplementedError - - def create(self, context, metadata, data=None): - """Store the image metadata and data. - - :returns: the new image metadata. - :raises: AlreadyExists if the image already exist. - - """ - raise NotImplementedError - - def update(self, context, image_id, metadata, data=None): - """Update the given image metadata and data and return the metadata. - - :raises: NotFound if the image does not exist. - - """ - raise NotImplementedError - - def delete(self, context, image_id): - """Delete the given image. - - :raises: NotFound if the image does not exist. - - """ - raise NotImplementedError - - @staticmethod - def _is_image_available(context, image_meta): - """Check image availability. - - Images are always available if they are public or if the user is an - admin. - - Otherwise, we filter by project_id (if present) and then fall-back to - images owned by user. - - """ - # FIXME(sirp): We should be filtering by user_id on the Glance side - # for security; however, we can't do that until we get authn/authz - # sorted out. Until then, filtering in Nova. - if image_meta['is_public'] or context.is_admin: - return True - - properties = image_meta['properties'] - - if context.project_id and ('project_id' in properties): - return str(properties['project_id']) == str(context.project_id) - - try: - user_id = properties['user_id'] - except KeyError: - return False - - return str(user_id) == str(context.user_id) - - @classmethod - def _translate_to_base(cls, metadata): - """Return a metadata dictionary that is BaseImageService compliant. - - This is used by subclasses to expose only a metadata dictionary that - is the same across ImageService implementations. - - """ - return cls._propertify_metadata(metadata, cls.BASE_IMAGE_ATTRS) - - @classmethod - def _translate_to_service(cls, metadata): - """Return a metadata dict that is usable by the ImageService subclass. - - As an example, Glance has additional attributes (like 'location'); the - BaseImageService considers these properties, but we need to translate - these back to first-class attrs for sending to Glance. This method - handles this by allowing you to specify the attributes an ImageService - considers first-class. - - """ - if not cls.SERVICE_IMAGE_ATTRS: - raise NotImplementedError(_('Cannot use this without specifying ' - 'SERVICE_IMAGE_ATTRS for subclass')) - return cls._propertify_metadata(metadata, cls.SERVICE_IMAGE_ATTRS) - - @staticmethod - def _propertify_metadata(metadata, keys): - """Move unknown keys to a nested 'properties' dict. - - :returns: a new dict with the keys moved. - - """ - flattened = utils.flatten_dict(metadata) - attributes, properties = utils.partition_dict(flattened, keys) - attributes['properties'] = properties - return attributes -- cgit From b63b6d896917af60fcc1b76b22cb912faab3c1c3 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Wed, 31 Aug 2011 23:26:18 -0400 Subject: removing unnecessary imports --- nova/tests/api/openstack/test_images.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index b9bd18eda..942bab5c7 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -22,20 +22,14 @@ and as a WSGI layer import copy import json -import os -import shutil -import tempfile import xml.dom.minidom as minidom import mox import stubout import webob -from glance import client as glance_client from nova import context -from nova import exception from nova import test -from nova import utils import nova.api.openstack from nova.api.openstack import images from nova.tests.api.openstack import fakes -- cgit From 7be0e4cd371691253aa332c2c180be1a83f7f98b Mon Sep 17 00:00:00 2001 From: Launchpad Translations on behalf of nova-core <> Date: Thu, 1 Sep 2011 05:02:30 +0000 Subject: Launchpad automatic translations update. --- po/pt_BR.po | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/po/pt_BR.po b/po/pt_BR.po index bff1135c6..e10b0c0de 100644 --- a/po/pt_BR.po +++ b/po/pt_BR.po @@ -8,14 +8,14 @@ msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: FULL NAME \n" "POT-Creation-Date: 2011-02-21 10:03-0500\n" -"PO-Revision-Date: 2011-08-23 11:23+0000\n" -"Last-Translator: msinhore \n" +"PO-Revision-Date: 2011-09-01 04:15+0000\n" +"Last-Translator: Daniel Negri \n" "Language-Team: Brazilian Portuguese \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-08-24 04:47+0000\n" -"X-Generator: Launchpad (build 13697)\n" +"X-Launchpad-Export-Date: 2011-09-01 05:02+0000\n" +"X-Generator: Launchpad (build 13827)\n" #: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 #: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 @@ -708,7 +708,7 @@ msgstr "Ligação %(queue)s para %(exchange)s com chave %(routing_key)s" #: ../nova/fakerabbit.py:121 #, python-format msgid "Getting from %(queue)s: %(message)s" -msgstr "" +msgstr "Recebendo de %(queue)s: %(message)s" #: ../nova/virt/xenapi/vm_utils.py:135 ../nova/virt/hyperv.py:171 #, python-format @@ -808,7 +808,7 @@ msgstr "Kernel/Ramdisk %s destruidos" #: ../nova/virt/xenapi/vm_utils.py:361 #, python-format msgid "Asking xapi to fetch %(url)s as %(access)s" -msgstr "" +msgstr "Requisitando à xapi a busca da url %(url)s como %(access)s" #: ../nova/virt/xenapi/vm_utils.py:386 ../nova/virt/xenapi/vm_utils.py:402 #, python-format -- cgit From 3ae639e5acd965999138e307508933ae1624b476 Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Wed, 31 Aug 2011 23:12:09 -0700 Subject: add specific exceptions for quantum client. Fix doc-strings in client.py --- nova/network/quantum/client.py | 93 ++++++++++++++---------------- nova/network/quantum/quantum_connection.py | 7 +-- 2 files changed, 46 insertions(+), 54 deletions(-) diff --git a/nova/network/quantum/client.py b/nova/network/quantum/client.py index 82f23b6b5..22a4b4d63 100644 --- a/nova/network/quantum/client.py +++ b/nova/network/quantum/client.py @@ -39,6 +39,24 @@ class JSONSerializer(object): return json.loads(data) +# FIXME: (danwent) the full client lib will expose more +# granular exceptions, for now, just try to distinguish +# between the cases we care about. +class QuantumNotFoundException(Exception); + """ Indicates that Quantum Server returned 404""" + pass + + +class QuantumServerException(Exception): + """ Indicates any non-404 error from Quantum Server""" + pass + + +class QuantumIOException(Exception): + """ Indicates network IO trouble reaching Quantum Server""" + pass + + class api_call(object): """A Decorator to add support for format and tenant overriding""" def __init__(self, func): @@ -78,8 +96,7 @@ class Client(object): def __init__(self, host="127.0.0.1", port=9696, use_ssl=False, tenant=None, format="xml", testing_stub=None, key_file=None, cert_file=None, logger=None): - """ - Creates a new client to some service. + """ Creates a new client to some service. :param host: The host where service resides :param port: The port where service resides @@ -102,9 +119,7 @@ class Client(object): self.logger = logger def get_connection_type(self): - """ - Returns the proper connection type - """ + """ Returns the proper connection type """ if self.testing_stub: return self.testing_stub elif self.use_ssl: @@ -114,8 +129,7 @@ class Client(object): def do_request(self, method, action, body=None, headers=None, params=None): - """ - Connects to the server and issues a request. + """ Connects to the server and issues a request. Returns the result data, or raises an appropriate exception if HTTP status code is not 2xx @@ -168,6 +182,10 @@ class Client(object): self.logger.debug("Quantum Client Reply (code = %s) :\n %s" \ % (str(status_code), data)) + if status_code == httplib.NOT_FOUND: + raise QuantumNotFoundException( + _("Quantum entity not found: %s" % data)) + if status_code in (httplib.OK, httplib.CREATED, httplib.ACCEPTED, @@ -175,15 +193,16 @@ class Client(object): if data is not None and len(data): return self.deserialize(data, status_code) else: - raise Exception(_("Server returned error: %s" % res.read())) + raise QuantumServerException( + _("Server %(status_code)s error: %(data)s" + % locals())) except (socket.error, IOError), e: - raise Exception(_("Unable to connect to " + raise QuantumIOException(_("Unable to connect to " "server. Got error: %s" % e)) def get_status_code(self, response): - """ - Returns the integer status code from the response, which + """ Returns the integer status code from the response, which can be either a Webob.Response (used in testing) or httplib.Response """ if hasattr(response, 'status_int'): @@ -212,99 +231,73 @@ class Client(object): @api_call def list_networks(self): - """ - Fetches a list of all networks for a tenant - """ + """ Fetches a list of all networks for a tenant """ return self.do_request("GET", self.networks_path) @api_call def show_network_details(self, network): - """ - Fetches the details of a certain network - """ + """ Fetches the details of a certain network """ return self.do_request("GET", self.network_path % (network)) @api_call def create_network(self, body=None): - """ - Creates a new network - """ + """ Creates a new network """ body = self.serialize(body) return self.do_request("POST", self.networks_path, body=body) @api_call def update_network(self, network, body=None): - """ - Updates a network - """ + """ Updates a network """ body = self.serialize(body) return self.do_request("PUT", self.network_path % (network), body=body) @api_call def delete_network(self, network): - """ - Deletes the specified network - """ + """ Deletes the specified network """ return self.do_request("DELETE", self.network_path % (network)) @api_call def list_ports(self, network): - """ - Fetches a list of ports on a given network - """ + """ Fetches a list of ports on a given network """ return self.do_request("GET", self.ports_path % (network)) @api_call def show_port_details(self, network, port): - """ - Fetches the details of a certain port - """ + """ Fetches the details of a certain port """ return self.do_request("GET", self.port_path % (network, port)) @api_call def create_port(self, network, body=None): - """ - Creates a new port on a given network - """ + """ Creates a new port on a given network """ body = self.serialize(body) return self.do_request("POST", self.ports_path % (network), body=body) @api_call def delete_port(self, network, port): - """ - Deletes the specified port from a network - """ + """ Deletes the specified port from a network """ return self.do_request("DELETE", self.port_path % (network, port)) @api_call def set_port_state(self, network, port, body=None): - """ - Sets the state of the specified port - """ + """ Sets the state of the specified port """ body = self.serialize(body) return self.do_request("PUT", self.port_path % (network, port), body=body) @api_call def show_port_attachment(self, network, port): - """ - Fetches the attachment-id associated with the specified port - """ + """ Fetches the attachment-id associated with the specified port """ return self.do_request("GET", self.attachment_path % (network, port)) @api_call def attach_resource(self, network, port, body=None): - """ - Sets the attachment-id of the specified port - """ + """ Sets the attachment-id of the specified port """ body = self.serialize(body) return self.do_request("PUT", self.attachment_path % (network, port), body=body) @api_call def detach_resource(self, network, port): - """ - Removes the attachment-id of the specified port - """ + """ Removes the attachment-id of the specified port """ return self.do_request("DELETE", self.attachment_path % (network, port)) diff --git a/nova/network/quantum/quantum_connection.py b/nova/network/quantum/quantum_connection.py index e2218c68d..90d5e8e04 100644 --- a/nova/network/quantum/quantum_connection.py +++ b/nova/network/quantum/quantum_connection.py @@ -70,11 +70,10 @@ class QuantumClientConnection(object): """ try: self.client.show_network_details(net_id, tenant=tenant_id) - except: - # FIXME(danwent): client lib should expose granular exceptions - # so we can confirm we're getting a 404 and not some other error + return True + except client.QuantumNotFoundException: + # Not really an error. Real errors will be propogated to caller return False - return True def create_and_attach_port(self, tenant_id, net_id, interface_id): """ Creates a Quantum port on the specified network, sets -- cgit From 02093fbe185e52a3c22b748811e60e988150a352 Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Wed, 31 Aug 2011 23:37:26 -0700 Subject: more review cleanup --- nova/network/quantum/manager.py | 16 ++++++++-------- nova/network/quantum/nova_ipam_lib.py | 8 +++----- 2 files changed, 11 insertions(+), 13 deletions(-) diff --git a/nova/network/quantum/manager.py b/nova/network/quantum/manager.py index 1d2c7c664..153f6c0f2 100644 --- a/nova/network/quantum/manager.py +++ b/nova/network/quantum/manager.py @@ -103,16 +103,16 @@ class QuantumManager(manager.FlatManager): priority, cidr, gateway_v6, cidr_v6, dns1, dns2) def delete_network(self, context, fixed_range): - """ Lookup network by IPv4 cidr, delete both the IPAM - subnet and the corresponding Quantum network. - """ - project_id = context.project_id - quantum_net_id = self.ipam.get_network_id_by_cidr( + """ Lookup network by IPv4 cidr, delete both the IPAM + subnet and the corresponding Quantum network. + """ + project_id = context.project_id + quantum_net_id = self.ipam.get_network_id_by_cidr( context, fixed_range, project_id) - self.ipam.delete_subnets_by_net_id(context, quantum_net_id, + self.ipam.delete_subnets_by_net_id(context, quantum_net_id, project_id) - q_tenant_id = project_id or FLAGS.quantum_default_tenant_id - self.q_conn.delete_network(q_tenant_id, quantum_net_id) + q_tenant_id = project_id or FLAGS.quantum_default_tenant_id + self.q_conn.delete_network(q_tenant_id, quantum_net_id) def allocate_for_instance(self, context, **kwargs): """ Called by compute when it is creating a new VM. diff --git a/nova/network/quantum/nova_ipam_lib.py b/nova/network/quantum/nova_ipam_lib.py index 4f62887d1..4b08b906a 100644 --- a/nova/network/quantum/nova_ipam_lib.py +++ b/nova/network/quantum/nova_ipam_lib.py @@ -15,7 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. -import math +import netaddr from nova import db from nova import exception @@ -60,8 +60,7 @@ class QuantumNovaIPAMLib(object): are needed by Quantum but not the FlatManager. """ admin_context = context.elevated() - # FIXME(danwent): Use the netaddr library here - subnet_size = int(math.pow(2, (32 - int(cidr.split("/")[1])))) + subnet_size = len(netaddr.IPNetwork(cidr)) networks = manager.FlatManager.create_networks(self.net_manager, admin_context, label, cidr, False, 1, subnet_size, cidr_v6, @@ -114,8 +113,7 @@ class QuantumNovaIPAMLib(object): return sorted(net_list, key=lambda x: id_priority_map[x[0]]) def allocate_fixed_ip(self, context, tenant_id, quantum_net_id, vif_rec): - """ Allocates a single fixed IPv4 address for a virtual interface. - """ + """ Allocates a single fixed IPv4 address for a virtual interface.""" admin_context = context.elevated() network = db.network_get_by_uuid(admin_context, quantum_net_id) if network['cidr']: -- cgit From 76a60bf27cc8864e397139a3497b1f571ce38d88 Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Wed, 31 Aug 2011 23:42:24 -0700 Subject: typo --- nova/network/quantum/client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/network/quantum/client.py b/nova/network/quantum/client.py index 22a4b4d63..67582d8bc 100644 --- a/nova/network/quantum/client.py +++ b/nova/network/quantum/client.py @@ -42,7 +42,7 @@ class JSONSerializer(object): # FIXME: (danwent) the full client lib will expose more # granular exceptions, for now, just try to distinguish # between the cases we care about. -class QuantumNotFoundException(Exception); +class QuantumNotFoundException(Exception): """ Indicates that Quantum Server returned 404""" pass -- cgit From 26e581ac03336182a44f2a411f28da36beef0f89 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Thu, 1 Sep 2011 11:40:44 -0400 Subject: docstring cleanup --- nova/tests/image/test_glance.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py index 3922036a9..4b7e04d7c 100644 --- a/nova/tests/image/test_glance.py +++ b/nova/tests/image/test_glance.py @@ -96,8 +96,9 @@ class BaseGlanceTest(unittest.TestCase): class TestGlanceImageServiceProperties(BaseGlanceTest): """ - Ensure attributes which aren't base attributes are ignored. Missing - attributes should be added as None + Ensure attributes which aren't base attributes are ignored. + + Missingattributes should be added as None """ @@ -304,9 +305,8 @@ class TestGlanceSerializer(unittest.TestCase): class GlanceImageServiceTest(test.TestCase): - - """Tests the Glance image service, in particular that metadata translation - works properly. + """ + Tests the Glance image service. At a high level, the translations involved are: @@ -315,6 +315,7 @@ class GlanceImageServiceTest(test.TestCase): 2. ImageService -> API - This is needed so we can support multple APIs (OpenStack, EC2) + """ def __init__(self, *args, **kwargs): super(GlanceImageServiceTest, self).__init__(*args, **kwargs) -- cgit From 5b3b3d1b357c085c2088df7d76df8392118fb82e Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Thu, 1 Sep 2011 09:39:36 -0700 Subject: additional review cleanup --- .mailmap | 2 +- nova/network/quantum/client.py | 12 ++++++++---- nova/network/quantum/fake.py | 10 ++++++---- nova/network/quantum/manager.py | 2 +- nova/network/quantum/melange_connection.py | 4 +++- nova/network/quantum/melange_ipam_lib.py | 3 ++- nova/network/quantum/nova_ipam_lib.py | 2 +- nova/network/quantum/quantum_connection.py | 8 ++++---- nova/tests/test_quantum.py | 21 +++++++++++---------- 9 files changed, 37 insertions(+), 27 deletions(-) diff --git a/.mailmap b/.mailmap index a46dd6673..f2f59d81b 100644 --- a/.mailmap +++ b/.mailmap @@ -37,7 +37,7 @@ - + diff --git a/nova/network/quantum/client.py b/nova/network/quantum/client.py index 67582d8bc..455bb8a79 100644 --- a/nova/network/quantum/client.py +++ b/nova/network/quantum/client.py @@ -22,11 +22,15 @@ import socket import urllib +#FIXME(danwent): All content in this file should be removed once the +# packaging work for the quantum client libraries is complete. +# At that point, we will be able to just install the libraries as a +# dependency and import from quantum.client.* and quantum.common.* +# Until then, we have simplified versions of these classes in this file. + class JSONSerializer(object): - """ - This is a simple json-only serializer to use until we can just grab + """ This is a simple json-only serializer to use until we can just grab the standard serializer from the quantum library. - TODO(danwent): replace serializer with quantum implementation """ def serialize(self, data, content_type): try: @@ -39,7 +43,7 @@ class JSONSerializer(object): return json.loads(data) -# FIXME: (danwent) the full client lib will expose more +# The full client lib will expose more # granular exceptions, for now, just try to distinguish # between the cases we care about. class QuantumNotFoundException(Exception): diff --git a/nova/network/quantum/fake.py b/nova/network/quantum/fake.py index f668edfed..6a4005c59 100644 --- a/nova/network/quantum/fake.py +++ b/nova/network/quantum/fake.py @@ -65,8 +65,9 @@ class FakeQuantumClientConnection(object): def create_and_attach_port(self, tenant_id, net_id, interface_id): if not self.network_exists(tenant_id, net_id): - raise Exception(_("network %s does not exist for tenant %s" % - (net_id, tenant_id))) + raise Exception( + _("network %(net_id)s does not exist for tenant %(tenant_id)" + % locals())) self._confirm_not_attached(interface_id) uuid = str(utils.gen_uuid()) @@ -76,8 +77,9 @@ class FakeQuantumClientConnection(object): def detach_and_delete_port(self, tenant_id, net_id, port_id): if not self.network_exists(tenant_id, net_id): - raise Exception(_("network %s does not exist for tenant %s" %\ - (net_id, tenant_id))) + raise exception.NotFound( + _("network %s does not exist for tenant %s" % + (net_id, tenant_id))) del self.nets[net_id]['ports'][port_id] def get_port_by_attachment(self, tenant_id, attachment_id): diff --git a/nova/network/quantum/manager.py b/nova/network/quantum/manager.py index 153f6c0f2..709299dd2 100644 --- a/nova/network/quantum/manager.py +++ b/nova/network/quantum/manager.py @@ -86,7 +86,7 @@ class QuantumManager(manager.FlatManager): if num_networks != 1: raise Exception(_("QuantumManager requires that only one" " network is created per call")) - q_tenant_id = kwargs["project_id"] or FLAGS.quantum_default_tenant_id + q_tenant_id = kwargs.get("project_id", FLAGS.quantum_default_tenant_id) quantum_net_id = uuid if quantum_net_id: if not self.q_conn.network_exists(q_tenant_id, quantum_net_id): diff --git a/nova/network/quantum/melange_connection.py b/nova/network/quantum/melange_connection.py index 1ee0c29a2..4dc35d15d 100644 --- a/nova/network/quantum/melange_connection.py +++ b/nova/network/quantum/melange_connection.py @@ -35,7 +35,9 @@ flags.DEFINE_string('melange_port', json_content_type = {'Content-type': "application/json"} - +#FIXME(danwent): talk to the Melange folks about creating a +# client lib that we can import as a library, instead of +# have to have all of the client code in here. class MelangeConnection(object): def __init__(self, host=None, port=None, use_ssl=False): diff --git a/nova/network/quantum/melange_ipam_lib.py b/nova/network/quantum/melange_ipam_lib.py index 24a7c5404..71c0d7ce6 100644 --- a/nova/network/quantum/melange_ipam_lib.py +++ b/nova/network/quantum/melange_ipam_lib.py @@ -17,6 +17,7 @@ from netaddr import IPNetwork +from nova import exception from nova import flags from nova import log as logging from nova.network.quantum import melange_connection @@ -85,7 +86,7 @@ class QuantumMelangeIPAMLib(object): for b in all_blocks['ip_blocks']: if b['cidr'] == cidr: return b['network_id'] - raise Exception(_("No network found for cidr %s" % cidr)) + raise exception.NotFound(_("No network found for cidr %s" % cidr)) def delete_subnets_by_net_id(self, context, net_id, project_id): """ Find Melange block associated with the Quantum UUID, diff --git a/nova/network/quantum/nova_ipam_lib.py b/nova/network/quantum/nova_ipam_lib.py index 4b08b906a..17236a976 100644 --- a/nova/network/quantum/nova_ipam_lib.py +++ b/nova/network/quantum/nova_ipam_lib.py @@ -154,7 +154,7 @@ class QuantumNovaIPAMLib(object): vif_rec = db.virtual_interface_get_by_uuid(context, vif_id) fixed_ips = db.fixed_ip_get_by_virtual_interface(context, vif_rec['id']) - return [f['address'] for f in fixed_ips] + return [fixed_ip['address'] for fixed_ip in fixed_ips] def get_v6_ips_by_interface(self, context, net_id, vif_id, project_id): """ Returns a list containing a single IPv6 address strings diff --git a/nova/network/quantum/quantum_connection.py b/nova/network/quantum/quantum_connection.py index 90d5e8e04..93892a843 100644 --- a/nova/network/quantum/quantum_connection.py +++ b/nova/network/quantum/quantum_connection.py @@ -80,8 +80,8 @@ class QuantumClientConnection(object): status to ACTIVE to enable traffic, and attaches the vNIC with the specified interface-id. """ - LOG.debug(_("Connecting interface %s to net %s for %s" % - (interface_id, net_id, tenant_id))) + LOG.debug(_("Connecting interface %(interface_id)s to " + "net %(net_id)s for %(tenant_id)s" % locals())) port_data = {'port': {'state': 'ACTIVE'}} resdict = self.client.create_port(net_id, port_data, tenant=tenant_id) port_id = resdict["port"]["id"] @@ -92,8 +92,8 @@ class QuantumClientConnection(object): def detach_and_delete_port(self, tenant_id, net_id, port_id): """ Detach and delete the specified Quantum port. """ - LOG.debug("Deleting port %s on net %s for %s" % \ - (port_id, net_id, tenant_id)) + LOG.debug(_("Deleting port %(port_id)s on net %(net_id)s" + " for %(tenant_id)s" % locals())) self.client.detach_resource(net_id, port_id, tenant=tenant_id) self.client.delete_port(net_id, port_id, tenant=tenant_id) diff --git a/nova/tests/test_quantum.py b/nova/tests/test_quantum.py index 2cc83adf1..0fa4184b1 100644 --- a/nova/tests/test_quantum.py +++ b/nova/tests/test_quantum.py @@ -189,29 +189,29 @@ class QuantumTestCaseBase(object): # we don't know which order the NICs will be in until we # introduce the notion of priority # v4 cidr - self.assertTrue(nw_info[0][0]['cidr'].startswith("9.") or \ + self.assertTrue(nw_info[0][0]['cidr'].startswith("9.") or nw_info[1][0]['cidr'].startswith("9.")) - self.assertTrue(nw_info[0][0]['cidr'].startswith("192.") or \ + self.assertTrue(nw_info[0][0]['cidr'].startswith("192.") or nw_info[1][0]['cidr'].startswith("192.")) # v4 address - self.assertTrue(nw_info[0][1]['ips'][0]['ip'].startswith("9.") or \ + self.assertTrue(nw_info[0][1]['ips'][0]['ip'].startswith("9.") or nw_info[1][1]['ips'][0]['ip'].startswith("9.")) - self.assertTrue(nw_info[0][1]['ips'][0]['ip'].startswith("192.") or \ + self.assertTrue(nw_info[0][1]['ips'][0]['ip'].startswith("192.") or nw_info[1][1]['ips'][0]['ip'].startswith("192.")) # v6 cidr - self.assertTrue(nw_info[0][0]['cidr_v6'].startswith("2001:1dbb:") or \ + self.assertTrue(nw_info[0][0]['cidr_v6'].startswith("2001:1dbb:") or nw_info[1][0]['cidr_v6'].startswith("2001:1dbb:")) - self.assertTrue(nw_info[0][0]['cidr_v6'].startswith("2001:1db9:") or \ + self.assertTrue(nw_info[0][0]['cidr_v6'].startswith("2001:1db9:") or nw_info[1][0]['cidr_v6'].startswith("2001:1db9:")) # v6 address self.assertTrue(\ - nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1dbb:") or \ + nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1dbb:") or nw_info[1][1]['ip6s'][0]['ip'].startswith("2001:1dbb:")) self.assertTrue(\ - nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1db9:") or \ + nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1db9:") or nw_info[1][1]['ip6s'][0]['ip'].startswith("2001:1db9:")) self.net_man.deallocate_for_instance(ctx, @@ -240,9 +240,10 @@ class QuantumNovaIPAMTestCase(QuantumTestCaseBase, test.TestCase): for n in db.network_get_all(ctx): db.network_delete_safe(ctx, n['id']) - # NOTE(danwent): I've found that other unit tests have a nasty + # Other unit tests (e.g., test_compute.py) have a nasty # habit of of creating fixed IPs and not cleaning up, which - # can confuse these tests, so we clean them all. + # can confuse these tests, so we remove all existing fixed + # ips before starting. session = get_session() result = session.query(models.FixedIp).all() with session.begin(): -- cgit From 35c4cecc8d29da32bd816bb68f8b45c2d03f892f Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Thu, 1 Sep 2011 09:42:44 -0700 Subject: undo change in setting q_tenant_id in quantum_manager.create_network --- nova/network/quantum/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/network/quantum/manager.py b/nova/network/quantum/manager.py index 709299dd2..153f6c0f2 100644 --- a/nova/network/quantum/manager.py +++ b/nova/network/quantum/manager.py @@ -86,7 +86,7 @@ class QuantumManager(manager.FlatManager): if num_networks != 1: raise Exception(_("QuantumManager requires that only one" " network is created per call")) - q_tenant_id = kwargs.get("project_id", FLAGS.quantum_default_tenant_id) + q_tenant_id = kwargs["project_id"] or FLAGS.quantum_default_tenant_id quantum_net_id = uuid if quantum_net_id: if not self.q_conn.network_exists(q_tenant_id, quantum_net_id): -- cgit From a36e0a1cbc5645ab47041c2627dba80b39b23cc2 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 1 Sep 2011 15:42:01 -0400 Subject: Fixes for minor network manager issues centered around deleting/accessing instances which don't have network information set. --- nova/api/openstack/views/addresses.py | 1 - nova/network/manager.py | 5 ++++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/views/addresses.py b/nova/api/openstack/views/addresses.py index 8f07a2289..8d38bc9c3 100644 --- a/nova/api/openstack/views/addresses.py +++ b/nova/api/openstack/views/addresses.py @@ -88,7 +88,6 @@ class ViewBuilderV11(ViewBuilder): try: return interface['network']['label'] except (TypeError, KeyError) as exc: - LOG.exception(exc) raise TypeError def _extract_ipv4_addresses(self, interface): diff --git a/nova/network/manager.py b/nova/network/manager.py index e6b30d1a0..d7b0abdda 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -448,7 +448,7 @@ class NetworkManager(manager.SchedulerDependentManager): try: fixed_ips = kwargs.get('fixed_ips') or \ self.db.fixed_ip_get_by_instance(context, instance_id) - except exceptions.FixedIpNotFoundForInstance: + except exception.FixedIpNotFoundForInstance: fixed_ips = [] LOG.debug(_("network deallocation for instance |%s|"), instance_id, context=context) @@ -484,6 +484,9 @@ class NetworkManager(manager.SchedulerDependentManager): for vif in vifs: network = vif['network'] + if network is None: + continue + # determine which of the instance's IPs belong to this network network_IPs = [fixed_ip['address'] for fixed_ip in fixed_ips if fixed_ip['network_id'] == network['id']] -- cgit -- cgit From 833ac1674d33cde3721b2d10a3d9545cc8320b37 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 1 Sep 2011 16:09:23 -0400 Subject: Added test for NULL network. --- nova/network/manager.py | 4 ++-- nova/tests/test_network.py | 7 ++++++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index d7b0abdda..e8c5c0654 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -484,8 +484,8 @@ class NetworkManager(manager.SchedulerDependentManager): for vif in vifs: network = vif['network'] - if network is None: - continue + #if network is None: + # continue # determine which of the instance's IPs belong to this network network_IPs = [fixed_ip['address'] for fixed_ip in fixed_ips if diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index 25ff940f0..2ae5a35e3 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -118,9 +118,14 @@ vifs = [{'id': 0, {'id': 1, 'address': 'DE:AD:BE:EF:00:01', 'uuid': '00000000-0000-0000-0000-0000000000000001', - 'network_id': 0, 'network_id': 1, 'network': FakeModel(**networks[1]), + 'instance_id': 0}, + {'id': 2, + 'address': 'DE:AD:BE:EF:00:02', + 'uuid': '00000000-0000-0000-0000-0000000000000002', + 'network_id': 2, + 'network': None, 'instance_id': 0}] -- cgit From aef85a2596e943299542f05e165774250476bc5b Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 1 Sep 2011 16:11:14 -0400 Subject: Probably shouldn't leave that commented out. --- nova/network/manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index e8c5c0654..d7b0abdda 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -484,8 +484,8 @@ class NetworkManager(manager.SchedulerDependentManager): for vif in vifs: network = vif['network'] - #if network is None: - # continue + if network is None: + continue # determine which of the instance's IPs belong to this network network_IPs = [fixed_ip['address'] for fixed_ip in fixed_ips if -- cgit From ab8cfda10ed12dd592ef3b06806a80b1922707c1 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 1 Sep 2011 15:29:00 -0500 Subject: moved cidr_v6 back --- nova/network/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index 8fd176c26..b4605eea5 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -499,6 +499,7 @@ class NetworkManager(manager.SchedulerDependentManager): 'bridge': network['bridge'], 'id': network['id'], 'cidr': network['cidr'], + 'cidr_v6': network['cidr_v6'], 'injected': network['injected'], 'vlan': network['vlan'], 'bridge_interface': network['bridge_interface'], @@ -524,7 +525,6 @@ class NetworkManager(manager.SchedulerDependentManager): if network['cidr_v6']: info['ip6s'] = [ip6_dict()] - network_dict['cidr_v6'] = network['cidr_v6'] # TODO(tr3buchet): handle ip6 routes here as well if network['gateway_v6']: info['gateway6'] = network['gateway_v6'] -- cgit From 04cfb6356b920941080fbc58301b6d005d21ac5f Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 1 Sep 2011 15:50:38 -0500 Subject: renamed fake_network_info.py --- nova/tests/fake_network.py | 144 ++++++++++++++++++++++++++++++++++++++++ nova/tests/fake_network_info.py | 144 ---------------------------------------- nova/tests/test_libvirt.py | 6 +- nova/tests/test_network.py | 4 +- 4 files changed, 149 insertions(+), 149 deletions(-) create mode 100644 nova/tests/fake_network.py delete mode 100644 nova/tests/fake_network_info.py diff --git a/nova/tests/fake_network.py b/nova/tests/fake_network.py new file mode 100644 index 000000000..2e8cf60ec --- /dev/null +++ b/nova/tests/fake_network.py @@ -0,0 +1,144 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Rackspace +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import db +from nova import flags +from nova import test +from nova.network import manager as network_manager + + +HOST = "testhost" +FLAGS = flags.FLAGS + + +class FakeModel(dict): + """Represent a model from the db""" + def __init__(self, *args, **kwargs): + self.update(kwargs) + + def __getattr__(self, name): + return self[name] + + +def fake_network(n, ipv6=None): + if ipv6 == None: + ipv6 = FLAGS.use_ipv6 + rval = {'id': n, + 'label': 'test%d' % n, + 'injected': False, + 'multi_host': False, + 'cidr': '192.168.%d.0/24' % n, + 'cidr_v6': None, + 'netmask': '255.255.255.0', + 'netmask_v6': None, + 'bridge': 'fake_br%d' % n, + 'bridge_interface': 'fake_eth%d' % n, + 'gateway': '192.168.%d.1' % n, + 'gateway_v6': None, + 'broadcast': '192.168.%d.255' % n, + 'dns1': '192.168.%d.3' % n, + 'dns2': '192.168.%d.4' % n, + 'vlan': None, + 'host': None, + 'project_id': 'fake_project', + 'vpn_public_address': '192.168.%d.2' % n} + if ipv6: + rval['cidr_v6'] = '2001:db8:0:%x::/64' % n + rval['gateway_v6'] = '2001:db8:0:%x::1' % n + rval['netmask_v6'] = '64' + + return rval + + +def fixed_ips(num_networks, num_ips, num_floating_ips=0): + for network in xrange(num_networks): + for ip in xrange(num_ips): + id = network * num_ips + ip + f_ips = [floating_ips(id).next() for i in xrange(num_floating_ips)] + yield {'id': id, + 'network_id': network, + 'address': '192.168.%d.1%02d' % (network, ip), + 'instance_id': 0, + 'allocated': False, + # and since network_id and vif_id happen to be equivalent + 'virtual_interface_id': network, + 'floating_ips': [FakeModel(**ip) for ip in f_ips]} + + +flavor = {'id': 0, + 'name': 'fake_flavor', + 'memory_mb': 2048, + 'vcpus': 2, + 'local_gb': 10, + 'flavor_id': 0, + 'swap': 0, + 'rxtx_quota': 0, + 'rxtx_cap': 3} + + +def floating_ips(fixed_ip_id): + for i in xrange(154): + yield {'id': 0, + 'address': '10.10.10.%d' % (i + 100), + 'fixed_ip_id': fixed_ip_id, + 'project_id': None, + 'auto_assigned': False} + + +def vifs(n): + for x in xrange(n): + yield {'id': x, + 'address': 'DE:AD:BE:EF:00:%02x' % x, + 'uuid': '00000000-0000-0000-0000-00000000000000%02d' % x, + 'network_id': x, + 'network': FakeModel(**fake_network(x)), + 'instance_id': 0} + + +def ipv4_like(ip, s): + ip = ip.split('.') + s = s.split('.') + + for i, octet in enumerate(s): + if octet == '*': + continue + if octet != ip[i]: + return False + return True + + +def fake_get_instance_nw_info(stubs, n=1, ips_per_vif=2): + # stubs is the self.stubs from the test + # ips_per_vif is the number of ips each vif will have + # num_floating_ips is number of float ips for each fixed ip + network = network_manager.FlatManager(host=HOST) + network.db = db + + def fixed_ips_fake(*args, **kwargs): + return list(fixed_ips(n, ips_per_vif)) + + def virtual_interfaces_fake(*args, **kwargs): + return [vif for vif in vifs(n)] + + def instance_type_fake(*args, **kwargs): + return flavor + + stubs.Set(db, 'fixed_ip_get_by_instance', fixed_ips_fake) + stubs.Set(db, 'virtual_interface_get_by_instance', virtual_interfaces_fake) + stubs.Set(db, 'instance_type_get', instance_type_fake) + + return network.get_instance_nw_info(None, 0, 0, None) diff --git a/nova/tests/fake_network_info.py b/nova/tests/fake_network_info.py deleted file mode 100644 index 2e8cf60ec..000000000 --- a/nova/tests/fake_network_info.py +++ /dev/null @@ -1,144 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 Rackspace -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova import db -from nova import flags -from nova import test -from nova.network import manager as network_manager - - -HOST = "testhost" -FLAGS = flags.FLAGS - - -class FakeModel(dict): - """Represent a model from the db""" - def __init__(self, *args, **kwargs): - self.update(kwargs) - - def __getattr__(self, name): - return self[name] - - -def fake_network(n, ipv6=None): - if ipv6 == None: - ipv6 = FLAGS.use_ipv6 - rval = {'id': n, - 'label': 'test%d' % n, - 'injected': False, - 'multi_host': False, - 'cidr': '192.168.%d.0/24' % n, - 'cidr_v6': None, - 'netmask': '255.255.255.0', - 'netmask_v6': None, - 'bridge': 'fake_br%d' % n, - 'bridge_interface': 'fake_eth%d' % n, - 'gateway': '192.168.%d.1' % n, - 'gateway_v6': None, - 'broadcast': '192.168.%d.255' % n, - 'dns1': '192.168.%d.3' % n, - 'dns2': '192.168.%d.4' % n, - 'vlan': None, - 'host': None, - 'project_id': 'fake_project', - 'vpn_public_address': '192.168.%d.2' % n} - if ipv6: - rval['cidr_v6'] = '2001:db8:0:%x::/64' % n - rval['gateway_v6'] = '2001:db8:0:%x::1' % n - rval['netmask_v6'] = '64' - - return rval - - -def fixed_ips(num_networks, num_ips, num_floating_ips=0): - for network in xrange(num_networks): - for ip in xrange(num_ips): - id = network * num_ips + ip - f_ips = [floating_ips(id).next() for i in xrange(num_floating_ips)] - yield {'id': id, - 'network_id': network, - 'address': '192.168.%d.1%02d' % (network, ip), - 'instance_id': 0, - 'allocated': False, - # and since network_id and vif_id happen to be equivalent - 'virtual_interface_id': network, - 'floating_ips': [FakeModel(**ip) for ip in f_ips]} - - -flavor = {'id': 0, - 'name': 'fake_flavor', - 'memory_mb': 2048, - 'vcpus': 2, - 'local_gb': 10, - 'flavor_id': 0, - 'swap': 0, - 'rxtx_quota': 0, - 'rxtx_cap': 3} - - -def floating_ips(fixed_ip_id): - for i in xrange(154): - yield {'id': 0, - 'address': '10.10.10.%d' % (i + 100), - 'fixed_ip_id': fixed_ip_id, - 'project_id': None, - 'auto_assigned': False} - - -def vifs(n): - for x in xrange(n): - yield {'id': x, - 'address': 'DE:AD:BE:EF:00:%02x' % x, - 'uuid': '00000000-0000-0000-0000-00000000000000%02d' % x, - 'network_id': x, - 'network': FakeModel(**fake_network(x)), - 'instance_id': 0} - - -def ipv4_like(ip, s): - ip = ip.split('.') - s = s.split('.') - - for i, octet in enumerate(s): - if octet == '*': - continue - if octet != ip[i]: - return False - return True - - -def fake_get_instance_nw_info(stubs, n=1, ips_per_vif=2): - # stubs is the self.stubs from the test - # ips_per_vif is the number of ips each vif will have - # num_floating_ips is number of float ips for each fixed ip - network = network_manager.FlatManager(host=HOST) - network.db = db - - def fixed_ips_fake(*args, **kwargs): - return list(fixed_ips(n, ips_per_vif)) - - def virtual_interfaces_fake(*args, **kwargs): - return [vif for vif in vifs(n)] - - def instance_type_fake(*args, **kwargs): - return flavor - - stubs.Set(db, 'fixed_ip_get_by_instance', fixed_ips_fake) - stubs.Set(db, 'virtual_interface_get_by_instance', virtual_interfaces_fake) - stubs.Set(db, 'instance_type_get', instance_type_fake) - - return network.get_instance_nw_info(None, 0, 0, None) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index fe5470a6f..190e197f5 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -36,13 +36,13 @@ from nova.api.ec2 import cloud from nova.compute import power_state from nova.virt.libvirt import connection from nova.virt.libvirt import firewall -from nova.tests import fake_network_info +from nova.tests import fake_network libvirt = None FLAGS = flags.FLAGS -_fake_network_info = fake_network_info.fake_get_instance_nw_info -_ipv4_like = fake_network_info.ipv4_like +_fake_network_info = fake_network.fake_get_instance_nw_info +_ipv4_like = fake_network.ipv4_like def _concurrency(wait, done, target): diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index 803868cd9..a0079e120 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -22,7 +22,7 @@ from nova import exception from nova import log as logging from nova import test from nova.network import manager as network_manager -from nova.tests import fake_network_info +from nova.tests import fake_network LOG = logging.getLogger('nova.tests.network') @@ -132,7 +132,7 @@ class FlatNetworkTestCase(test.TestCase): is_admin=False) def test_get_instance_nw_info(self): - fake_get_instance_nw_info = fake_network_info.fake_get_instance_nw_info + fake_get_instance_nw_info = fake_network.fake_get_instance_nw_info nw_info = fake_get_instance_nw_info(self.stubs, 0, 2) self.assertFalse(nw_info) -- cgit From 43a392814150e49769e935f4972c9901612570af Mon Sep 17 00:00:00 2001 From: Tushar Patil Date: Thu, 1 Sep 2011 14:03:22 -0700 Subject: added unit tests and cleanup of import statements --- .../api/openstack/contrib/test_createserverext.py | 113 +++++++++++++++++---- 1 file changed, 95 insertions(+), 18 deletions(-) diff --git a/nova/tests/api/openstack/contrib/test_createserverext.py b/nova/tests/api/openstack/contrib/test_createserverext.py index e5eed14fe..739312399 100644 --- a/nova/tests/api/openstack/contrib/test_createserverext.py +++ b/nova/tests/api/openstack/contrib/test_createserverext.py @@ -15,7 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. -import base64 +import datetime import json import unittest from xml.dom import minidom @@ -26,15 +26,7 @@ import webob from nova import exception from nova import flags from nova import test -from nova import utils import nova.api.openstack -from nova.api.openstack import servers -from nova.api.openstack.contrib import createserverext -import nova.compute.api - -import nova.scheduler.api -import nova.image.fake -import nova.rpc from nova.tests.api.openstack import fakes @@ -51,22 +43,41 @@ DUPLICATE_NETWORKS = [('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '10.0.1.12'), INVALID_NETWORKS = [('invalid', 'invalid-ip-address')] +INSTANCE = { + "id": 1, + "display_name": "test_server", + "uuid": FAKE_UUID, + "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0), + "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0), + "security_groups": [{"id": 1, "name": "test"}] + } + +def return_server_by_id(context, id, session=None): + INSTANCE['id'] = id + return INSTANCE + + +def return_security_group_non_existing(context, project_id, group_name): + raise exception.SecurityGroupNotFoundForProject(project_id=project_id, + security_group_id=group_name) + +def return_security_group_get_by_name(context, project_id, group_name): + return {'id': 1, 'name': group_name} + + +def return_security_group_get(context, security_group_id, session): + return {'id': security_group_id} + + +def return_instance_add_security_group(context, instance_id, security_group_id): + pass class CreateserverextTest(test.TestCase): def setUp(self): super(CreateserverextTest, self).setUp() - self.stubs = stubout.StubOutForTesting() - fakes.FakeAuthManager.auth_data = {} - fakes.FakeAuthDatabase.data = {} - fakes.stub_out_auth(self.stubs) - fakes.stub_out_image_service(self.stubs) - fakes.stub_out_key_pair_funcs(self.stubs) - self.allow_admin = FLAGS.allow_admin_api def tearDown(self): - self.stubs.UnsetAll() - FLAGS.allow_admin_api = self.allow_admin super(CreateserverextTest, self).tearDown() def _setup_mock_compute_api(self): @@ -87,6 +98,7 @@ class CreateserverextTest(test.TestCase): self.networks = kwargs['requested_networks'] else: self.networks = None + return [{'id': '1234', 'display_name': 'fakeinstance', 'uuid': FAKE_UUID, 'created_at': "", @@ -107,6 +119,18 @@ class CreateserverextTest(test.TestCase): '_get_kernel_ramdisk_from_image', make_stub_method((1, 1))) return compute_api + def _create_security_group_request_dict(self, security_groups): + server = {} + server['name'] = 'new-server-test' + server['imageRef'] = 1 + server['flavorRef'] = 1 + if security_groups is not None: + sg_list = [] + for name in security_groups: + sg_list.append({'name': name}) + server['security_groups'] = sg_list + return {'server': server} + def _create_networks_request_dict(self, networks): server = {} server['name'] = 'new-server-test' @@ -304,3 +328,56 @@ class CreateserverextTest(test.TestCase): self.assertEquals(response.status_int, 202) self.assertEquals(compute_api.networks, [('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)]) + + def test_create_instance_with_security_group_empty_list_json(self): + security_groups = [] + body_dict = self._create_security_group_request_dict(security_groups) + request = self._get_create_request_json(body_dict) + response = request.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 202) + res_dict = json.loads(response.body) + self.assertEquals([{'name': "default"}], res_dict['server']['security_groups']) + + def test_create_instance_with_security_group_non_existing_json(self): + security_groups = ['non-existing'] + self.stubs.Set(nova.db, 'security_group_get_by_name', + return_security_group_non_existing) + body_dict = self._create_security_group_request_dict(security_groups) + request = self._get_create_request_json(body_dict) + response = request.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 400) + + def test_create_instance_with_security_group_json(self): + security_groups = ['test', 'test1'] + self.stubs.Set(nova.db, 'security_group_get_by_name', + return_security_group_get_by_name) + self.stubs.Set(nova.db.api, 'instance_add_security_group', + return_instance_add_security_group) + body_dict = self._create_security_group_request_dict(security_groups) + request = self._get_create_request_json(body_dict) + response = request.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 202) + + def test_get_server_by_id_verify_security_groups_json(self): + self.stubs.Set(nova.db.api, 'instance_get', return_server_by_id) + req = webob.Request.blank('/v1.1/123/os-create-server-ext/1') + req.headers['Content-Type'] = 'application/json' + response = req.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 200) + res_dict = json.loads(response.body) + expected_security_group = [{"name": "test"}] + self.assertEquals(res_dict['server']['security_groups'], + expected_security_group) + + def test_get_server_by_id_verify_security_groups_xml(self): + self.stubs.Set(nova.db.api, 'instance_get', return_server_by_id) + req = webob.Request.blank('/v1.1/123/os-create-server-ext/1') + req.headers['Accept'] = 'application/xml' + response = req.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 200) + dom = minidom.parseString(response.body) + server = dom.childNodes[0] + security_groups = server.getElementsByTagName('security_groups')[0] + security_group = security_groups.getElementsByTagName('security_group')[0] + self.assertEqual(INSTANCE['security_groups'][0]['name'], security_group.getAttribute("name")) + -- cgit From 2d2d9a5f5caed27d9ade06b2dbc56b793b7e5d3b Mon Sep 17 00:00:00 2001 From: Tushar Patil Date: Thu, 1 Sep 2011 14:32:48 -0700 Subject: Deleted debug messages --- nova/tests/api/openstack/contrib/test_createserverext.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/nova/tests/api/openstack/contrib/test_createserverext.py b/nova/tests/api/openstack/contrib/test_createserverext.py index d6600d054..ba8fb925e 100644 --- a/nova/tests/api/openstack/contrib/test_createserverext.py +++ b/nova/tests/api/openstack/contrib/test_createserverext.py @@ -32,8 +32,6 @@ import nova.api.openstack from nova.tests.api.openstack import fakes -from nova import log as logging -LOG = logging.getLogger("api.nova.openstack.etere") FLAGS = flags.FLAGS FLAGS.verbose = True @@ -388,7 +386,6 @@ class CreateserverextTest(test.TestCase): body_dict = self._create_security_group_request_dict(security_groups) request = self._get_create_request_json(body_dict) response = request.get_response(fakes.wsgi_app()) - LOG.debug(response) self.assertEquals(response.status_int, 202) def test_get_server_by_id_verify_security_groups_json(self): -- cgit From f0a6c35149a1b9cc278cd3ba960861da9189b5bf Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Thu, 1 Sep 2011 14:39:34 -0700 Subject: remove references to MelangeIPAMTest, as they cannot be used yet --- nova/tests/test_quantum.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/nova/tests/test_quantum.py b/nova/tests/test_quantum.py index 0fa4184b1..42acf03e7 100644 --- a/nova/tests/test_quantum.py +++ b/nova/tests/test_quantum.py @@ -249,13 +249,3 @@ class QuantumNovaIPAMTestCase(QuantumTestCaseBase, test.TestCase): with session.begin(): for fip_ref in result: session.delete(fip_ref) - -# FIXME(danwent): Cannot run this unit tests automatically for now, as -# it requires melange to be running locally. -# -#class QuantumMelangeIPAMTestCase(QuantumTestCaseBase, test.TestCase): -# -# def setUp(self): -# super(QuantumMelangeIPAMTestCase, self).setUp() -# self.net_man = quantum_manager.QuantumManager( \ -# ipam_lib="nova.network.quantum.melange_ipam_lib") -- cgit From 527670d632788d20aca7a3f12495d4c97e036d51 Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Thu, 1 Sep 2011 14:40:30 -0700 Subject: melange testing cleanup, localization cleanup --- nova/network/quantum/client.py | 5 +++-- nova/network/quantum/fake.py | 4 ++-- nova/network/quantum/manager.py | 19 +++++++++---------- nova/network/quantum/melange_connection.py | 19 +++++++++++-------- nova/network/quantum/melange_ipam_lib.py | 7 ++++--- 5 files changed, 29 insertions(+), 25 deletions(-) diff --git a/nova/network/quantum/client.py b/nova/network/quantum/client.py index 455bb8a79..f4936695e 100644 --- a/nova/network/quantum/client.py +++ b/nova/network/quantum/client.py @@ -172,8 +172,9 @@ class Client(object): c = connection_type(self.host, self.port) if self.logger: - self.logger.debug(_("Quantum Client Request:\n%s %s\n" % - (method, action))) + self.logger.debug( + _("Quantum Client Request:\n%(method)s %(action)s\n" % + locals())) if body: self.logger.debug(body) diff --git a/nova/network/quantum/fake.py b/nova/network/quantum/fake.py index 6a4005c59..4ecddd2ae 100644 --- a/nova/network/quantum/fake.py +++ b/nova/network/quantum/fake.py @@ -78,8 +78,8 @@ class FakeQuantumClientConnection(object): def detach_and_delete_port(self, tenant_id, net_id, port_id): if not self.network_exists(tenant_id, net_id): raise exception.NotFound( - _("network %s does not exist for tenant %s" % - (net_id, tenant_id))) + _("network %(net_id)s does not exist " + "for tenant %(tenant_id)s" % locals())) del self.nets[net_id]['ports'][port_id] def get_port_by_attachment(self, tenant_id, attachment_id): diff --git a/nova/network/quantum/manager.py b/nova/network/quantum/manager.py index 153f6c0f2..fa16475ac 100644 --- a/nova/network/quantum/manager.py +++ b/nova/network/quantum/manager.py @@ -91,8 +91,8 @@ class QuantumManager(manager.FlatManager): if quantum_net_id: if not self.q_conn.network_exists(q_tenant_id, quantum_net_id): raise Exception(_("Unable to find existing quantum " \ - " network for tenant '%s' with net-id '%s'" % \ - (q_tenant_id, quantum_net_id))) + " network for tenant '%(q_tenant_id)s' with " + "net-id '%(quantum_net_id)s'" % locals())) else: # otherwise, create network from default quantum pool quantum_net_id = self.q_conn.create_network(q_tenant_id, label) @@ -252,17 +252,18 @@ class QuantumManager(manager.FlatManager): 'dns': [], 'ips': [ip_dict(ip, v4_subnet) for ip in v4_ips]} - if v6_subnet['cidr']: - network_dict['cidr_v6'] = v6_subnet['cidr'] - info['ip6s'] = [ip_dict(ip, v6_subnet) for ip in v6_ips] + if v6_subnet: + if v6_subnet['cidr']: + network_dict['cidr_v6'] = v6_subnet['cidr'] + info['ip6s'] = [ip_dict(ip, v6_subnet) for ip in v6_ips] - if v6_subnet['gateway']: - info['gateway6'] = v6_subnet['gateway'] + if v6_subnet['gateway']: + info['gateway6'] = v6_subnet['gateway'] dns_dict = {} for s in [v4_subnet, v6_subnet]: for k in ['dns1', 'dns2']: - if s[k]: + if s and s[k]: dns_dict[s[k]] = None info['dns'] = [d for d in dns_dict.keys()] @@ -308,8 +309,6 @@ class QuantumManager(manager.FlatManager): except exception.InstanceNotFound: LOG.error(_("Attempted to deallocate non-existent instance: %s" % (instance_id))) - self._do_trigger_security_group_members_refresh_for_instance( - instance_id) def validate_networks(self, context, networks): """ Validates that this tenant has quantum networks with the associated diff --git a/nova/network/quantum/melange_connection.py b/nova/network/quantum/melange_connection.py index 4dc35d15d..5a79eff77 100644 --- a/nova/network/quantum/melange_connection.py +++ b/nova/network/quantum/melange_connection.py @@ -48,6 +48,7 @@ class MelangeConnection(object): self.host = host self.port = port self.use_ssl = use_ssl + self.version = "v0.1" def get(self, path, params={}, headers={}): return self.do_request("GET", path, params=params, headers=headers) @@ -66,7 +67,9 @@ class MelangeConnection(object): def do_request(self, method, path, body=None, headers={}, params={}): - url = path + '.json?' + urllib.urlencode(params) + url = "/%s/%s.json?%s" % (self.version, + path, + urllib.urlencode(params)) try: connection = self._get_connection() @@ -75,7 +78,7 @@ class MelangeConnection(object): response_str = response.read() if response.status < 400: return response_str - raise Exception(_("Server returned error: %s", response_str)) + raise Exception(_("Server returned error: %s" % response_str)) except (socket.error, IOError), e: raise Exception(_("Unable to connect to " "server. Got error: %s" % e)) @@ -86,7 +89,7 @@ class MelangeConnection(object): request_body = (json.dumps(dict(network=dict(mac_address=mac_address, tenant_id=project_id))) if mac_address else None) - url = ("/ipam%(tenant_scope)s/networks/%(network_id)s/" + url = ("ipam%(tenant_scope)s/networks/%(network_id)s/" "interfaces/%(vif_id)s/ip_allocations" % locals()) response = self.post(url, body=request_body, headers=json_content_type) @@ -96,7 +99,7 @@ class MelangeConnection(object): project_id=None, dns1=None, dns2=None): tenant_scope = "/tenants/%s" % project_id if project_id else "" - url = "/ipam%(tenant_scope)s/ip_blocks" % locals() + url = "ipam%(tenant_scope)s/ip_blocks" % locals() req_params = dict(ip_block=dict(cidr=cidr, network_id=network_id, type='private', dns1=dns1, dns2=dns2)) @@ -106,14 +109,14 @@ class MelangeConnection(object): def delete_block(self, block_id, project_id=None): tenant_scope = "/tenants/%s" % project_id if project_id else "" - url = "/ipam%(tenant_scope)s/ip_blocks/%(block_id)s" % locals() + url = "ipam%(tenant_scope)s/ip_blocks/%(block_id)s" % locals() self.delete(url, headers=json_content_type) def get_blocks(self, project_id=None): tenant_scope = "/tenants/%s" % project_id if project_id else "" - url = "/ipam%(tenant_scope)s/ip_blocks" % locals() + url = "ipam%(tenant_scope)s/ip_blocks" % locals() response = self.get(url, headers=json_content_type) return json.loads(response) @@ -121,7 +124,7 @@ class MelangeConnection(object): def get_allocated_ips(self, network_id, vif_id, project_id=None): tenant_scope = "/tenants/%s" % project_id if project_id else "" - url = ("/ipam%(tenant_scope)s/networks/%(network_id)s/" + url = ("ipam%(tenant_scope)s/networks/%(network_id)s/" "interfaces/%(vif_id)s/ip_allocations" % locals()) response = self.get(url, headers=json_content_type) @@ -130,7 +133,7 @@ class MelangeConnection(object): def deallocate_ips(self, network_id, vif_id, project_id=None): tenant_scope = "/tenants/%s" % project_id if project_id else "" - url = ("/ipam%(tenant_scope)s/networks/%(network_id)s/" + url = ("ipam%(tenant_scope)s/networks/%(network_id)s/" "interfaces/%(vif_id)s/ip_allocations" % locals()) self.delete(url, headers=json_content_type) diff --git a/nova/network/quantum/melange_ipam_lib.py b/nova/network/quantum/melange_ipam_lib.py index 71c0d7ce6..dcee0a3f9 100644 --- a/nova/network/quantum/melange_ipam_lib.py +++ b/nova/network/quantum/melange_ipam_lib.py @@ -17,6 +17,7 @@ from netaddr import IPNetwork +from nova import db from nova import exception from nova import flags from nova import log as logging @@ -68,7 +69,8 @@ class QuantumMelangeIPAMLib(object): "project_id": project_id, "priority": priority, "label": label} - network = self.db.network_create_safe(context, net) + admin_context = context.elevated() + network = db.network_create_safe(admin_context, net) def allocate_fixed_ip(self, context, project_id, quantum_net_id, vif_ref): """ Pass call to allocate fixed IP on to Melange""" @@ -100,8 +102,7 @@ class QuantumMelangeIPAMLib(object): self.m_conn.delete_block(b['id'], tenant_id) network = db.network_get_by_uuid(admin_context, net_id) - if network is not None: - db.network_delete_safe(context, network['id']) + db.network_delete_safe(context, network['id']) def get_project_and_global_net_ids(self, context, project_id): """ Fetches all networks associated with this project, or -- cgit From 38373bf8f60dd068dec69933d1456a8deb75bf8e Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Thu, 1 Sep 2011 15:02:09 -0700 Subject: move content of quantum/fake.py to test_quantum.py in unit testing class (most original content has been removed anyway) --- nova/network/quantum/fake.py | 92 ----------------------------------------- nova/network/quantum/manager.py | 9 ++-- nova/tests/test_quantum.py | 76 +++++++++++++++++++++++++++++++++- 3 files changed, 78 insertions(+), 99 deletions(-) delete mode 100644 nova/network/quantum/fake.py diff --git a/nova/network/quantum/fake.py b/nova/network/quantum/fake.py deleted file mode 100644 index 4ecddd2ae..000000000 --- a/nova/network/quantum/fake.py +++ /dev/null @@ -1,92 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 Nicira Networks, Inc -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova import exception -from nova import ipv6 -from nova import log as logging -from nova import utils - - -LOG = logging.getLogger("network.quantum.fake") - - -# this class can be used for unit functional/testing on nova, -# as it does not actually make remote calls to the Quantum service -class FakeQuantumClientConnection(object): - - def __init__(self): - self.nets = {} - - def get_networks_for_tenant(self, tenant_id): - net_ids = [] - for net_id, n in self.nets.items(): - if n['tenant-id'] == tenant_id: - net_ids.append(net_id) - return net_ids - - def create_network(self, tenant_id, network_name): - - uuid = str(utils.gen_uuid()) - self.nets[uuid] = {'net-name': network_name, - 'tenant-id': tenant_id, - 'ports': {}} - return uuid - - def delete_network(self, tenant_id, net_id): - if self.nets[net_id]['tenant-id'] == tenant_id: - del self.nets[net_id] - - def network_exists(self, tenant_id, net_id): - try: - return self.nets[net_id]['tenant-id'] == tenant_id - except KeyError: - return False - - def _confirm_not_attached(self, interface_id): - for n in self.nets.values(): - for p in n['ports'].values(): - if p['attachment-id'] == interface_id: - raise Exception(_("interface '%s' is already attached" % - interface_id)) - - def create_and_attach_port(self, tenant_id, net_id, interface_id): - if not self.network_exists(tenant_id, net_id): - raise Exception( - _("network %(net_id)s does not exist for tenant %(tenant_id)" - % locals())) - - self._confirm_not_attached(interface_id) - uuid = str(utils.gen_uuid()) - self.nets[net_id]['ports'][uuid] = \ - {"port-state": "ACTIVE", - "attachment-id": interface_id} - - def detach_and_delete_port(self, tenant_id, net_id, port_id): - if not self.network_exists(tenant_id, net_id): - raise exception.NotFound( - _("network %(net_id)s does not exist " - "for tenant %(tenant_id)s" % locals())) - del self.nets[net_id]['ports'][port_id] - - def get_port_by_attachment(self, tenant_id, attachment_id): - for net_id, n in self.nets.items(): - if n['tenant-id'] == tenant_id: - for port_id, p in n['ports'].items(): - if p['attachment-id'] == attachment_id: - return (net_id, port_id) - - return (None, None) diff --git a/nova/network/quantum/manager.py b/nova/network/quantum/manager.py index fa16475ac..db2b000de 100644 --- a/nova/network/quantum/manager.py +++ b/nova/network/quantum/manager.py @@ -50,17 +50,16 @@ class QuantumManager(manager.FlatManager): Support for these capabilities are targted for future releases. """ - def __init__(self, ipam_lib=None, *args, **kwargs): + def __init__(self, q_conn=None, ipam_lib=None, *args, **kwargs): """ Initialize two key libraries, the connection to a Quantum service, and the library for implementing IPAM. Calls inherited FlatManager constructor. """ - if FLAGS.fake_network: - self.q_conn = fake.FakeQuantumClientConnection() - else: - self.q_conn = quantum_connection.QuantumClientConnection() + if not q_conn: + q_conn = quantum_connection.QuantumClientConnection() + self.q_conn = q_conn if not ipam_lib: ipam_lib = FLAGS.quantum_ipam_lib diff --git a/nova/tests/test_quantum.py b/nova/tests/test_quantum.py index 42acf03e7..0b1a1f204 100644 --- a/nova/tests/test_quantum.py +++ b/nova/tests/test_quantum.py @@ -20,12 +20,82 @@ from nova import db from nova.db.sqlalchemy import models from nova.db.sqlalchemy.session import get_session from nova import exception +from nova import ipv6 from nova import log as logging from nova.network.quantum import manager as quantum_manager from nova import test +from nova import utils LOG = logging.getLogger('nova.tests.quantum_network') + +# this class can be used for unit functional/testing on nova, +# as it does not actually make remote calls to the Quantum service +class FakeQuantumClientConnection(object): + + def __init__(self): + self.nets = {} + + def get_networks_for_tenant(self, tenant_id): + net_ids = [] + for net_id, n in self.nets.items(): + if n['tenant-id'] == tenant_id: + net_ids.append(net_id) + return net_ids + + def create_network(self, tenant_id, network_name): + + uuid = str(utils.gen_uuid()) + self.nets[uuid] = {'net-name': network_name, + 'tenant-id': tenant_id, + 'ports': {}} + return uuid + + def delete_network(self, tenant_id, net_id): + if self.nets[net_id]['tenant-id'] == tenant_id: + del self.nets[net_id] + + def network_exists(self, tenant_id, net_id): + try: + return self.nets[net_id]['tenant-id'] == tenant_id + except KeyError: + return False + + def _confirm_not_attached(self, interface_id): + for n in self.nets.values(): + for p in n['ports'].values(): + if p['attachment-id'] == interface_id: + raise Exception(_("interface '%s' is already attached" % + interface_id)) + + def create_and_attach_port(self, tenant_id, net_id, interface_id): + if not self.network_exists(tenant_id, net_id): + raise Exception( + _("network %(net_id)s does not exist for tenant %(tenant_id)" + % locals())) + + self._confirm_not_attached(interface_id) + uuid = str(utils.gen_uuid()) + self.nets[net_id]['ports'][uuid] = \ + {"port-state": "ACTIVE", + "attachment-id": interface_id} + + def detach_and_delete_port(self, tenant_id, net_id, port_id): + if not self.network_exists(tenant_id, net_id): + raise exception.NotFound( + _("network %(net_id)s does not exist " + "for tenant %(tenant_id)s" % locals())) + del self.nets[net_id]['ports'][port_id] + + def get_port_by_attachment(self, tenant_id, attachment_id): + for net_id, n in self.nets.items(): + if n['tenant-id'] == tenant_id: + for port_id, p in n['ports'].items(): + if p['attachment-id'] == attachment_id: + return (net_id, port_id) + + return (None, None) + networks = [{'label': 'project1-net1', 'injected': False, 'multi_host': False, @@ -230,8 +300,10 @@ class QuantumNovaIPAMTestCase(QuantumTestCaseBase, test.TestCase): def setUp(self): super(QuantumNovaIPAMTestCase, self).setUp() - self.net_man = quantum_manager.QuantumManager( \ - ipam_lib="nova.network.quantum.nova_ipam_lib") + + self.net_man = quantum_manager.QuantumManager( + ipam_lib="nova.network.quantum.nova_ipam_lib", + q_conn=FakeQuantumClientConnection()) # Tests seem to create some networks by default, which # we don't want. So we delete them. -- cgit From e6e3f46bf449fa371a584720c12c21e0832f4160 Mon Sep 17 00:00:00 2001 From: "Kevin L. Mitchell" Date: Thu, 1 Sep 2011 17:15:21 -0500 Subject: import filters in scheduler/host_filter.py so default_host_filter gets added to FLAGS; rework SchedulerManager() to only catch missing 'schedule_' attribute and report other missing attributes --- nova/scheduler/host_filter.py | 1 + nova/scheduler/manager.py | 10 ++++++---- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 826a99b0a..4024ec854 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -31,6 +31,7 @@ import types from nova import exception from nova import flags import nova.scheduler +from nova.scheduler import filters FLAGS = flags.FLAGS diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index 0e395ee79..92bb1ed6e 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -93,12 +93,14 @@ class SchedulerManager(manager.Manager): driver_method = 'schedule_%s' % method elevated = context.elevated() try: - host = getattr(self.driver, driver_method)(elevated, *args, - **kwargs) + real_meth = getattr(self.driver, driver_method) + args = (elevated,) + args except AttributeError, e: LOG.warning(_("Driver Method %(driver_method)s missing: %(e)s." - "Reverting to schedule()") % locals()) - host = self.driver.schedule(elevated, topic, *args, **kwargs) + "Reverting to schedule()") % locals()) + real_meth = self.driver.schedule + args = (elevated, topic) + args + real_meth(*args, **kwargs) if not host: LOG.debug(_("%(topic)s %(method)s handled in Scheduler") -- cgit From 81f3c9e0d1657783edf8f2ab7e055c5628af33ec Mon Sep 17 00:00:00 2001 From: Launchpad Translations on behalf of nova-core <> Date: Fri, 2 Sep 2011 04:53:10 +0000 Subject: Launchpad automatic translations update. --- po/pt_BR.po | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/po/pt_BR.po b/po/pt_BR.po index e10b0c0de..c4eb487d6 100644 --- a/po/pt_BR.po +++ b/po/pt_BR.po @@ -8,14 +8,14 @@ msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: FULL NAME \n" "POT-Creation-Date: 2011-02-21 10:03-0500\n" -"PO-Revision-Date: 2011-09-01 04:15+0000\n" -"Last-Translator: Daniel Negri \n" +"PO-Revision-Date: 2011-09-01 20:56+0000\n" +"Last-Translator: Robson Negreiros Bezerra \n" "Language-Team: Brazilian Portuguese \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-09-01 05:02+0000\n" -"X-Generator: Launchpad (build 13827)\n" +"X-Launchpad-Export-Date: 2011-09-02 04:53+0000\n" +"X-Generator: Launchpad (build 13830)\n" #: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 #: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 @@ -453,11 +453,13 @@ msgid "" "Detach volume %(volume_id)s from mountpoint %(mp)s on instance " "%(instance_id)s" msgstr "" +"Desconectando volume %(volume_id)s do ponto de montagem %(mp)s na instância " +"%(instance_id)s" #: ../nova/compute/manager.py:588 #, python-format msgid "Detaching volume from unknown instance %s" -msgstr "" +msgstr "Desconectando volume da instância desconhecida %s" #: ../nova/scheduler/simple.py:53 #, python-format -- cgit From 0c737cb60980b8db74496e7914322f567950c2c3 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Fri, 2 Sep 2011 01:07:30 -0400 Subject: Removing xml functions that are no longer called. --- nova/api/openstack/versions.py | 105 +++++------------------------- nova/tests/api/openstack/test_versions.py | 19 ++++-- 2 files changed, 27 insertions(+), 97 deletions(-) diff --git a/nova/api/openstack/versions.py b/nova/api/openstack/versions.py index 0766e1eb7..31dd9dc11 100644 --- a/nova/api/openstack/versions.py +++ b/nova/api/openstack/versions.py @@ -19,7 +19,6 @@ from datetime import datetime from lxml import etree import webob import webob.dec -from xml.dom import minidom import nova.api.openstack.views.versions from nova.api.openstack import wsgi @@ -188,32 +187,25 @@ class VersionsXMLSerializer(wsgi.XMLDictSerializer): for version in data['versions']: version_elem = etree.SubElement(root, 'version') self._populate_version(version_elem, version) - return etree.tostring(root, encoding='UTF-8') + return self._to_xml(root) def show(self, data): root = etree.Element('version', nsmap=self.NSMAP) self._populate_version(root, data['version']) - return etree.tostring(root, encoding='UTF-8') + return self._to_xml(root) def multi(self, data): root = etree.Element('choices', nsmap=self.NSMAP) for version in data['choices']: version_elem = etree.SubElement(root, 'version') self._populate_version(version_elem, version) - return etree.tostring(root, encoding='UTF-8') + return self._to_xml(root) class VersionsAtomSerializer(wsgi.XMLDictSerializer): NSMAP = {None: xmlutil.XMLNS_ATOM} - #TODO(wwolf): this is temporary until we get rid of toprettyxml - # in the base class (XMLDictSerializer), which I plan to do in - # another branch - def to_xml_string(self, node, has_atom=False): - self._add_xmlns(node, has_atom) - return node.toxml(encoding='UTF-8') - def __init__(self, metadata=None, xmlns=None): self.metadata = metadata or {} if not xmlns: @@ -221,14 +213,6 @@ class VersionsAtomSerializer(wsgi.XMLDictSerializer): else: self.xmlns = xmlns - def _create_text_elem(self, name, text, type=None): - elem = self._xml_doc.createElement(name) - if type: - elem.setAttribute('type', type) - elem_text = self._xml_doc.createTextNode(text) - elem.appendChild(elem_text) - return elem - def _get_most_recent_update(self, versions): recent = None for version in versions: @@ -246,47 +230,21 @@ class VersionsAtomSerializer(wsgi.XMLDictSerializer): link_href = link_href.rstrip('/') return link_href.rsplit('/', 1)[0] + '/' - def _create_detail_meta(self, root, version): - title = self._create_text_elem('title', "About This Version", - type='text') - - updated = self._create_text_elem('updated', version['updated']) - - uri = version['links'][0]['href'] - id = self._create_text_elem('id', uri) - - link = self._xml_doc.createElement('link') - link.setAttribute('rel', 'self') - link.setAttribute('href', uri) - - author = self._xml_doc.createElement('author') - author_name = self._create_text_elem('name', 'Rackspace') - author_uri = self._create_text_elem('uri', 'http://www.rackspace.com/') - author.appendChild(author_name) - author.appendChild(author_uri) - - root.appendChild(title) - root.appendChild(updated) - root.appendChild(id) - root.appendChild(author) - root.appendChild(link) - - def _create_feed(self, versions): + def _create_feed(self, versions, feed_title, feed_id): feed = etree.Element('feed', nsmap=self.NSMAP) title = etree.SubElement(feed, 'title') title.set('type', 'text') - title.text = 'Available API Versions' + title.text = feed_title # Set this updated to the most recently updated version recent = self._get_most_recent_update(versions) etree.SubElement(feed, 'updated').text = recent - base_url = self._get_base_url(versions[0]['links'][0]['href']) - etree.SubElement(feed, 'id').text = base_url + etree.SubElement(feed, 'id').text = feed_id link = etree.SubElement(feed, 'link') link.set('rel', 'self') - link.set('href', base_url) + link.set('href', feed_id) author = etree.SubElement(feed, 'author') etree.SubElement(author, 'name').text = 'Rackspace' @@ -319,50 +277,17 @@ class VersionsAtomSerializer(wsgi.XMLDictSerializer): version['updated']) return entry - def _create_version_entries(self, root, versions): - for version in versions: - entry = self._xml_doc.createElement('entry') - - id = self._create_text_elem('id', version['links'][0]['href']) - title = self._create_text_elem('title', - 'Version %s' % version['id'], - type='text') - updated = self._create_text_elem('updated', version['updated']) - - entry.appendChild(id) - entry.appendChild(title) - entry.appendChild(updated) - - for link in version['links']: - link_node = self._xml_doc.createElement('link') - link_node.setAttribute('rel', link['rel']) - link_node.setAttribute('href', link['href']) - if 'type' in link: - link_node.setAttribute('type', link['type']) - - entry.appendChild(link_node) - - content = self._create_text_elem('content', - 'Version %s %s (%s)' % - (version['id'], - version['status'], - version['updated']), - type='text') - - entry.appendChild(content) - root.appendChild(entry) - def index(self, data): - feed = self._create_feed(data['versions']) - return etree.tostring(feed, encoding='UTF-8') + versions = data['versions'] + feed_id = self._get_base_url(versions[0]['links'][0]['href']) + feed = self._create_feed(versions, 'Available API Versions', feed_id) + return self._to_xml(feed) def show(self, data): - self._xml_doc = minidom.Document() - node = self._xml_doc.createElementNS(self.xmlns, 'feed') - self._create_detail_meta(node, data['version']) - self._create_version_entries(node, [data['version']]) - - return self.to_xml_string(node) + version = data['version'] + feed_id = version['links'][0]['href'] + feed = self._create_feed([version], 'About This Version', feed_id) + return self._to_xml(feed) class VersionsHeadersSerializer(wsgi.ResponseHeadersSerializer): diff --git a/nova/tests/api/openstack/test_versions.py b/nova/tests/api/openstack/test_versions.py index 03a47bf24..a9308e31d 100644 --- a/nova/tests/api/openstack/test_versions.py +++ b/nova/tests/api/openstack/test_versions.py @@ -306,7 +306,8 @@ class VersionsTest(test.TestCase): self.assertEqual(f.feed.updated, '2011-01-21T11:33:21Z') self.assertEqual(f.feed.id, 'http://localhost/v1.0/') self.assertEqual(f.feed.author, 'Rackspace') - self.assertEqual(f.feed.author_detail.href, 'http://www.rackspace.com/') + self.assertEqual(f.feed.author_detail.href, + 'http://www.rackspace.com/') self.assertEqual(f.feed.links, [{'href': 'http://localhost/v1.0/', 'type': 'application/atom+xml', 'rel': 'self'}]) @@ -350,7 +351,8 @@ class VersionsTest(test.TestCase): self.assertEqual(f.feed.updated, '2011-01-21T11:33:21Z') self.assertEqual(f.feed.id, 'http://localhost/v1.1/') self.assertEqual(f.feed.author, 'Rackspace') - self.assertEqual(f.feed.author_detail.href, 'http://www.rackspace.com/') + self.assertEqual(f.feed.author_detail.href, + 'http://www.rackspace.com/') self.assertEqual(f.feed.links, [{'href': 'http://localhost/v1.1/', 'type': 'application/atom+xml', 'rel': 'self'}]) @@ -394,7 +396,8 @@ class VersionsTest(test.TestCase): self.assertEqual(f.feed.updated, '2011-01-21T11:33:21Z') self.assertEqual(f.feed.id, 'http://localhost/') self.assertEqual(f.feed.author, 'Rackspace') - self.assertEqual(f.feed.author_detail.href, 'http://www.rackspace.com/') + self.assertEqual(f.feed.author_detail.href, + 'http://www.rackspace.com/') self.assertEqual(f.feed.links, [{'href': 'http://localhost/', 'type': 'application/atom+xml', 'rel': 'self'}]) @@ -489,7 +492,7 @@ class VersionsTest(test.TestCase): self.assertEqual(len(versions), 2) version = versions[0] - self.assertEqual(version.get('id'), 'v1.1'); + self.assertEqual(version.get('id'), 'v1.1') self.assertEqual(version.get('status'), 'CURRENT') media_types = version.xpath('ns:media-types/ns:media-type', namespaces=NS) @@ -500,7 +503,7 @@ class VersionsTest(test.TestCase): [{'rel': 'self', 'href': 'http://localhost/v1.1/images/1'}])) version = versions[1] - self.assertEqual(version.get('id'), 'v1.0'); + self.assertEqual(version.get('id'), 'v1.0') self.assertEqual(version.get('status'), 'DEPRECATED') media_types = version.xpath('ns:media-types/ns:media-type', namespaces=NS) @@ -784,7 +787,8 @@ class VersionsSerializerTests(test.TestCase): self.assertEqual(f.feed.updated, '2011-07-20T11:40:00Z') self.assertEqual(f.feed.id, 'http://test/') self.assertEqual(f.feed.author, 'Rackspace') - self.assertEqual(f.feed.author_detail.href, 'http://www.rackspace.com/') + self.assertEqual(f.feed.author_detail.href, + 'http://www.rackspace.com/') self.assertEqual(f.feed.links, [{'href': 'http://test/', 'type': 'application/atom+xml', 'rel': 'self'}]) @@ -846,7 +850,8 @@ class VersionsSerializerTests(test.TestCase): self.assertEqual(f.feed.updated, '2011-01-21T11:33:21Z') self.assertEqual(f.feed.id, 'http://localhost/v1.1/') self.assertEqual(f.feed.author, 'Rackspace') - self.assertEqual(f.feed.author_detail.href, 'http://www.rackspace.com/') + self.assertEqual(f.feed.author_detail.href, + 'http://www.rackspace.com/') self.assertEqual(f.feed.links, [{'href': 'http://localhost/v1.1/', 'type': 'application/atom+xml', 'rel': 'self'}]) -- cgit From 4f72f6c0fb88baaa680e5dd7973a2b1aa9bd6aaf Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Fri, 2 Sep 2011 01:28:11 -0400 Subject: Adding feedparser to pip-requires --- tools/pip-requires | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/pip-requires b/tools/pip-requires index 60b502ffd..4949b66ca 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -34,3 +34,4 @@ coverage nosexcover GitPython paramiko +feedparser -- cgit From 1081b9d52026afb84128c15a1df0998f80810ce9 Mon Sep 17 00:00:00 2001 From: Thuleau Édouard Date: Fri, 2 Sep 2011 15:48:32 +0200 Subject: Correct tests associated. --- nova/tests/api/openstack/test_servers.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 3559e6de5..e5ebedf0e 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -3490,10 +3490,14 @@ class TestGetKernelRamdiskFromImage(test.TestCase): self.assertRaises(exception.NotFound, self._get_k_r, image_meta) def test_ami_no_ramdisk(self): - """If an ami is missing a ramdisk it should raise NotFound""" + """If an ami is missing a ramdisk, return kernel ID and None for + ramdisk ID + """ image_meta = {'id': 1, 'status': 'active', 'container_format': 'ami', 'properties': {'kernel_id': 1}} - self.assertRaises(exception.NotFound, self._get_k_r, image_meta) + kernel_id, ramdisk_id = self._get_k_r(image_meta) + self.assertEqual(kernel_id, 1) + self.assertEqual(ramdisk_id, None) def test_ami_kernel_ramdisk_present(self): """Return IDs if both kernel and ramdisk are present""" -- cgit From c5a90f842c99f94af2e321051d85e81f49c1e692 Mon Sep 17 00:00:00 2001 From: Thuleau Édouard Date: Fri, 2 Sep 2011 16:01:13 +0200 Subject: Update Authors file. --- Authors | 1 + 1 file changed, 1 insertion(+) diff --git a/Authors b/Authors index b9e7a7d23..2961b90ef 100644 --- a/Authors +++ b/Authors @@ -30,6 +30,7 @@ Devendra Modium Devin Carlen Donal Lafferty Ed Leafe +Edouard Thuleau Eldar Nugaev Eric Day Eric Windisch -- cgit From 78bb09eacedb91f0b5bf294ede9372768409590b Mon Sep 17 00:00:00 2001 From: Thuleau Édouard Date: Fri, 2 Sep 2011 16:17:18 +0200 Subject: Email contact error. --- Authors | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Authors b/Authors index 2961b90ef..eff58692e 100644 --- a/Authors +++ b/Authors @@ -30,7 +30,7 @@ Devendra Modium Devin Carlen Donal Lafferty Ed Leafe -Edouard Thuleau +Edouard Thuleau Eldar Nugaev Eric Day Eric Windisch -- cgit From f119805aa7c8e2dd7f0bafe666d976f3a0c08795 Mon Sep 17 00:00:00 2001 From: "Kevin L. Mitchell" Date: Fri, 2 Sep 2011 11:00:03 -0500 Subject: Forgot to handle return value --- nova/scheduler/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index 92bb1ed6e..bf18abc6c 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -100,7 +100,7 @@ class SchedulerManager(manager.Manager): "Reverting to schedule()") % locals()) real_meth = self.driver.schedule args = (elevated, topic) + args - real_meth(*args, **kwargs) + host = real_meth(*args, **kwargs) if not host: LOG.debug(_("%(topic)s %(method)s handled in Scheduler") -- cgit From 666f7152910838f866ca4b76258b025c27744ffb Mon Sep 17 00:00:00 2001 From: "Kevin L. Mitchell" Date: Fri, 2 Sep 2011 12:31:10 -0500 Subject: Add documentation comment --- nova/scheduler/host_filter.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 4024ec854..9f7d34ea7 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -31,6 +31,11 @@ import types from nova import exception from nova import flags import nova.scheduler + +# NOTE(Vek): Even though we don't use filters in here anywhere, we +# depend on default_host_filter being available in FLAGS, +# and that happens only when filters/abstract_filter.py is +# imported. from nova.scheduler import filters -- cgit From c5acb6186318541b1743bf62daa0510c9dba9c48 Mon Sep 17 00:00:00 2001 From: Thuleau Édouard Date: Fri, 2 Sep 2011 20:25:18 +0200 Subject: Email error. --- Authors | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Authors b/Authors index eff58692e..1160a0741 100644 --- a/Authors +++ b/Authors @@ -30,7 +30,7 @@ Devendra Modium Devin Carlen Donal Lafferty Ed Leafe -Edouard Thuleau +Édouard Thuleau Eldar Nugaev Eric Day Eric Windisch -- cgit From 55f0a6a058546f0ffbf4bee0e92eea2e70d8c76f Mon Sep 17 00:00:00 2001 From: Thuleau Édouard Date: Fri, 2 Sep 2011 20:29:58 +0200 Subject: Email error again. Tired. --- Authors | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Authors b/Authors index 1160a0741..0beab16a4 100644 --- a/Authors +++ b/Authors @@ -30,7 +30,7 @@ Devendra Modium Devin Carlen Donal Lafferty Ed Leafe -Édouard Thuleau +Édouard Thuleau Eldar Nugaev Eric Day Eric Windisch -- cgit From 5fe5c5dc26276a10b7dc766104a7e2d6c7793dc3 Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Fri, 2 Sep 2011 11:51:55 -0700 Subject: remove import of 'fake' from nova manager, now that we've moved that to test_quantum.py --- nova/network/quantum/manager.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/network/quantum/manager.py b/nova/network/quantum/manager.py index db2b000de..c10dc90de 100644 --- a/nova/network/quantum/manager.py +++ b/nova/network/quantum/manager.py @@ -22,7 +22,6 @@ from nova import log as logging from nova import manager from nova.network import manager from nova.network.quantum import quantum_connection -from nova.network.quantum import fake from nova import utils LOG = logging.getLogger("quantum_manager") -- cgit From bd1bc5e3c6f52963ce088e2e0a6da41f125d29f1 Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Fri, 2 Sep 2011 12:11:28 -0700 Subject: more review cleanup --- .../migrate_repo/versions/044_add_network_priority.py | 4 +--- nova/network/quantum/melange_connection.py | 17 +++++++++-------- nova/tests/test_quantum.py | 4 ++-- 3 files changed, 12 insertions(+), 13 deletions(-) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/044_add_network_priority.py b/nova/db/sqlalchemy/migrate_repo/versions/044_add_network_priority.py index e69380199..e3ee6a85f 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/044_add_network_priority.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/044_add_network_priority.py @@ -25,13 +25,11 @@ meta = MetaData() # Add priority column to networks table priority = Column('priority', Integer()) +networks = Table('networks', meta, autoload=True) def upgrade(migrate_engine): meta.bind = migrate_engine - # grab tables and (column for dropping later) - networks = Table('networks', meta, autoload=True) - try: networks.create_column(priority) except Exception: diff --git a/nova/network/quantum/melange_connection.py b/nova/network/quantum/melange_connection.py index 5a79eff77..d5a1901aa 100644 --- a/nova/network/quantum/melange_connection.py +++ b/nova/network/quantum/melange_connection.py @@ -50,13 +50,13 @@ class MelangeConnection(object): self.use_ssl = use_ssl self.version = "v0.1" - def get(self, path, params={}, headers={}): + def get(self, path, params=None, headers=None): return self.do_request("GET", path, params=params, headers=headers) - def post(self, path, body=None, headers={}): + def post(self, path, body=None, headers=None): return self.do_request("POST", path, body=body, headers=headers) - def delete(self, path, headers={}): + def delete(self, path, headers=None): return self.do_request("DELETE", path, headers=headers) def _get_connection(self): @@ -65,12 +65,13 @@ class MelangeConnection(object): else: return httplib.HTTPConnection(self.host, self.port) - def do_request(self, method, path, body=None, headers={}, params={}): - - url = "/%s/%s.json?%s" % (self.version, - path, - urllib.urlencode(params)) + def do_request(self, method, path, body=None, headers=None, params=None): + headers = headers or {} + params = params or {} + url = "/%s/%s.json" % (self.version, path) + if params: + url += "?%s" % urllib.urlencode(params) try: connection = self._get_connection() connection.request(method, url, body, headers) diff --git a/nova/tests/test_quantum.py b/nova/tests/test_quantum.py index 0b1a1f204..0feec9b99 100644 --- a/nova/tests/test_quantum.py +++ b/nova/tests/test_quantum.py @@ -277,10 +277,10 @@ class QuantumTestCaseBase(object): nw_info[1][0]['cidr_v6'].startswith("2001:1db9:")) # v6 address - self.assertTrue(\ + self.assertTrue( nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1dbb:") or nw_info[1][1]['ip6s'][0]['ip'].startswith("2001:1dbb:")) - self.assertTrue(\ + self.assertTrue( nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1db9:") or nw_info[1][1]['ip6s'][0]['ip'].startswith("2001:1db9:")) -- cgit From 435016f27ea36a6780897efe1289328c51e1463f Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Fri, 2 Sep 2011 12:31:14 -0700 Subject: move networks declarations within upgrade/downgrade methods --- nova/db/sqlalchemy/migrate_repo/versions/044_add_network_priority.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/044_add_network_priority.py b/nova/db/sqlalchemy/migrate_repo/versions/044_add_network_priority.py index e3ee6a85f..9db950c9b 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/044_add_network_priority.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/044_add_network_priority.py @@ -25,11 +25,11 @@ meta = MetaData() # Add priority column to networks table priority = Column('priority', Integer()) -networks = Table('networks', meta, autoload=True) def upgrade(migrate_engine): meta.bind = migrate_engine + networks = Table('networks', meta, autoload=True) try: networks.create_column(priority) except Exception: @@ -39,4 +39,6 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): meta.bind = migrate_engine + + networks = Table('networks', meta, autoload=True) networks.drop_column(priority) -- cgit From 9773d900d35316edbad4468a869ca62a353d3114 Mon Sep 17 00:00:00 2001 From: Tushar Patil Date: Fri, 2 Sep 2011 12:34:14 -0700 Subject: Fix for LP Bug #839269 --- nova/db/api.py | 6 ++++-- nova/db/sqlalchemy/api.py | 5 +++-- nova/network/manager.py | 3 ++- nova/tests/test_network.py | 3 ++- 4 files changed, 11 insertions(+), 6 deletions(-) diff --git a/nova/db/api.py b/nova/db/api.py index 148887635..efc088e35 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -324,13 +324,15 @@ def migration_get_by_instance_and_status(context, instance_uuid, status): #################### -def fixed_ip_associate(context, address, instance_id, network_id=None): +def fixed_ip_associate(context, address, instance_id, network_id=None, + reserved=False): """Associate fixed ip to instance. Raises if fixed ip is not available. """ - return IMPL.fixed_ip_associate(context, address, instance_id, network_id) + return IMPL.fixed_ip_associate(context, address, instance_id, network_id, + reserved) def fixed_ip_associate_pool(context, network_id, instance_id=None, host=None): diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index b99667afc..e0be8454e 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -669,14 +669,15 @@ def floating_ip_update(context, address, values): @require_admin_context -def fixed_ip_associate(context, address, instance_id, network_id=None): +def fixed_ip_associate(context, address, instance_id, network_id=None, + reserved=False): session = get_session() with session.begin(): network_or_none = or_(models.FixedIp.network_id == network_id, models.FixedIp.network_id == None) fixed_ip_ref = session.query(models.FixedIp).\ filter(network_or_none).\ - filter_by(reserved=False).\ + filter_by(reserved=reserved).\ filter_by(deleted=False).\ filter_by(address=address).\ with_lockmode('update').\ diff --git a/nova/network/manager.py b/nova/network/manager.py index e6b30d1a0..90f414ee2 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -991,7 +991,8 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager): address = network['vpn_private_address'] self.db.fixed_ip_associate(context, address, - instance_id) + instance_id, + reserved=True) else: address = kwargs.get('address', None) if address: diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index 25ff940f0..2347544de 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -264,7 +264,8 @@ class VlanNetworkTestCase(test.TestCase): db.fixed_ip_associate(mox.IgnoreArg(), mox.IgnoreArg(), - mox.IgnoreArg()).AndReturn('192.168.0.1') + mox.IgnoreArg(), + reserved=True).AndReturn('192.168.0.1') db.fixed_ip_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) -- cgit From bcb6f7d570ed24e0bf083cd4f4c8be0f20e69918 Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Fri, 2 Sep 2011 12:40:55 -0700 Subject: change db migrate script again to match other similar scripts --- .../db/sqlalchemy/migrate_repo/versions/044_add_network_priority.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/044_add_network_priority.py b/nova/db/sqlalchemy/migrate_repo/versions/044_add_network_priority.py index 9db950c9b..b9b0ea37c 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/044_add_network_priority.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/044_add_network_priority.py @@ -22,6 +22,9 @@ from nova import utils meta = MetaData() +networks = Table('networks', meta, + Column("id", Integer(), primary_key=True, nullable=False)) + # Add priority column to networks table priority = Column('priority', Integer()) @@ -29,7 +32,6 @@ priority = Column('priority', Integer()) def upgrade(migrate_engine): meta.bind = migrate_engine - networks = Table('networks', meta, autoload=True) try: networks.create_column(priority) except Exception: @@ -39,6 +41,4 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): meta.bind = migrate_engine - - networks = Table('networks', meta, autoload=True) networks.drop_column(priority) -- cgit From cc3bd1da5edc368871d2c8de0e498ab2649ae0dd Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 2 Sep 2011 12:52:02 -0700 Subject: revert description changes, use metadata['description'] if it is set to populate field in db --- nova/api/openstack/create_instance_helper.py | 9 ++++++--- nova/api/openstack/schemas/v1.1/server.rng | 1 - nova/api/openstack/servers.py | 6 +----- nova/api/openstack/views/servers.py | 1 - nova/tests/api/openstack/test_servers.py | 28 ---------------------------- 5 files changed, 7 insertions(+), 38 deletions(-) diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index ff3be4a01..289f87921 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -161,6 +161,10 @@ class CreateInstanceHelper(object): 'config_drive': config_drive, 'password': password} + # NOTE(vish): This is solely for compatibility with anything + # that is using the display description field. + metadata = server_dict.get('metadata') or {} + display_description = metadata.get('description') or '' return (extra_values, create_method(context, inst_type, @@ -168,10 +172,9 @@ class CreateInstanceHelper(object): kernel_id=kernel_id, ramdisk_id=ramdisk_id, display_name=name, - display_description=server_dict.\ - get('description', ''), + display_description=display_description, key_name=key_name, - metadata=server_dict.get('metadata', {}), + metadata=metadata, access_ip_v4=server_dict.get('accessIPv4'), access_ip_v6=server_dict.get('accessIPv6'), injected_files=injected_files, diff --git a/nova/api/openstack/schemas/v1.1/server.rng b/nova/api/openstack/schemas/v1.1/server.rng index 203728f48..ef835e408 100644 --- a/nova/api/openstack/schemas/v1.1/server.rng +++ b/nova/api/openstack/schemas/v1.1/server.rng @@ -3,7 +3,6 @@ - diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 7f5463e70..46a111cf5 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -185,9 +185,6 @@ class Controller(object): self.helper._validate_server_name(name) update_dict['display_name'] = name.strip() - if 'description' in body['server']: - description = body['server']['description'] - update_dict['display_description'] = description.strip() if 'accessIPv4' in body['server']: access_ipv4 = body['server']['accessIPv4'] update_dict['access_ip_v4'] = access_ipv4.strip() @@ -881,7 +878,6 @@ class ServerXMLSerializer(wsgi.XMLDictSerializer): node.setAttribute('uuid', str(server['uuid'])) node.setAttribute('hostId', str(server['hostId'])) node.setAttribute('name', server['name']) - node.setAttribute('description', server['description']) node.setAttribute('created', str(server['created'])) node.setAttribute('updated', str(server['updated'])) node.setAttribute('status', server['status']) @@ -997,7 +993,7 @@ def create_resource(version='1.0'): "attributes": { "server": ["id", "imageId", "name", "flavorId", "hostId", "status", "progress", "adminPass", "flavorRef", - "imageRef", "userId", "tenantId", "description"], + "imageRef", "userId", "tenantId"], "link": ["rel", "type", "href"], }, "dict_collections": { diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index 98ccd817a..ac09b5864 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -68,7 +68,6 @@ class ViewBuilder(object): 'name': inst['display_name'], 'user_id': inst.get('user_id', ''), 'tenant_id': inst.get('project_id', ''), - 'description': inst.get('display_description', ''), 'status': common.status_from_state(vm_state, task_state)} # Return the metadata as a dictionary diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index d5de1aa3c..45ad6e5a8 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -2268,34 +2268,6 @@ class ServersTest(test.TestCase): self.assertEqual(res_dict['server']['id'], 1) self.assertEqual(res_dict['server']['name'], 'server_test') - def test_update_server_description_v1_1(self): - DESC = 'updated_desc' - - def server_update(context, id, params): - # assert that parameter conversion from description - # to display_description worked correctly - self.assertEqual(params.get('display_description'), DESC) - return stub_instance(1, - name='server_test', - description=params['display_description']) - - self.stubs.Set(nova.db.api, 'instance_get', - return_server_with_attributes(name='server_test', - description=DESC)) - - self.stubs.Set(nova.db.api, 'instance_update', - server_update) - - req = webob.Request.blank('/v1.1/fake/servers/1') - req.method = 'PUT' - req.content_type = 'application/json' - req.body = json.dumps({'server': {'description': DESC}}) - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 200) - res_dict = json.loads(res.body) - self.assertEqual(res_dict['server']['id'], 1) - self.assertEqual(res_dict['server']['description'], DESC) - def test_update_server_access_ipv4_v1_1(self): self.stubs.Set(nova.db.api, 'instance_get', return_server_with_attributes(access_ipv4='0.0.0.0')) -- cgit From 752b6c9e26b718ab86f04c25a8c7f977bbea4a22 Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Fri, 2 Sep 2011 13:05:24 -0700 Subject: feedback from jk0's review, including removing a lot of spaces from docstrings --- bin/nova-manage | 3 +- nova/db/sqlalchemy/api.py | 2 +- nova/network/manager.py | 2 +- nova/network/quantum/client.py | 45 +++++++++--------- nova/network/quantum/manager.py | 76 +++++++++++++++--------------- nova/network/quantum/melange_connection.py | 2 +- nova/network/quantum/melange_ipam_lib.py | 63 +++++++++++++------------ nova/network/quantum/nova_ipam_lib.py | 56 +++++++++++----------- nova/network/quantum/quantum_connection.py | 34 ++++++------- 9 files changed, 141 insertions(+), 142 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 3a17818b2..6dd2920d1 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -779,8 +779,7 @@ class NetworkCommands(object): def list(self): """List all created networks""" - _fmt = "%-5s\t%-18s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s"\ - "\t%-15s\t%-15s" + _fmt = "%-5s\t%-18s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s" print _fmt % (_('id'), _('IPv4'), _('IPv6'), diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 09356e966..e0da2269d 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -946,7 +946,7 @@ def virtual_interface_get_by_address(context, address): def virtual_interface_get_by_uuid(context, vif_uuid): """Gets a virtual interface from the table. - :param vif_uuid: = the uuid of the interface you're looking to get + :param vif_uuid: the uuid of the interface you're looking to get """ session = get_session() vif_ref = session.query(models.VirtualInterface).\ diff --git a/nova/network/manager.py b/nova/network/manager.py index 426ff2f33..6730e808f 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -547,7 +547,7 @@ class NetworkManager(manager.SchedulerDependentManager): 'network_id': network_id, 'uuid': str(utils.gen_uuid())} # try FLAG times to create a vif record with a unique mac_address - for i in xrange(FLAGS.create_unique_mac_address_attempts): + for _ in xrange(FLAGS.create_unique_mac_address_attempts): try: return self.db.virtual_interface_create(context, vif) except exception.VirtualInterfaceCreateException: diff --git a/nova/network/quantum/client.py b/nova/network/quantum/client.py index f4936695e..40c68dfdc 100644 --- a/nova/network/quantum/client.py +++ b/nova/network/quantum/client.py @@ -22,14 +22,14 @@ import socket import urllib -#FIXME(danwent): All content in this file should be removed once the +# FIXME(danwent): All content in this file should be removed once the # packaging work for the quantum client libraries is complete. # At that point, we will be able to just install the libraries as a # dependency and import from quantum.client.* and quantum.common.* # Until then, we have simplified versions of these classes in this file. class JSONSerializer(object): - """ This is a simple json-only serializer to use until we can just grab + """This is a simple json-only serializer to use until we can just grab the standard serializer from the quantum library. """ def serialize(self, data, content_type): @@ -47,17 +47,17 @@ class JSONSerializer(object): # granular exceptions, for now, just try to distinguish # between the cases we care about. class QuantumNotFoundException(Exception): - """ Indicates that Quantum Server returned 404""" + """Indicates that Quantum Server returned 404""" pass class QuantumServerException(Exception): - """ Indicates any non-404 error from Quantum Server""" + """Indicates any non-404 error from Quantum Server""" pass class QuantumIOException(Exception): - """ Indicates network IO trouble reaching Quantum Server""" + """Indicates network IO trouble reaching Quantum Server""" pass @@ -100,7 +100,7 @@ class Client(object): def __init__(self, host="127.0.0.1", port=9696, use_ssl=False, tenant=None, format="xml", testing_stub=None, key_file=None, cert_file=None, logger=None): - """ Creates a new client to some service. + """Creates a new client to some service. :param host: The host where service resides :param port: The port where service resides @@ -123,7 +123,7 @@ class Client(object): self.logger = logger def get_connection_type(self): - """ Returns the proper connection type """ + """Returns the proper connection type""" if self.testing_stub: return self.testing_stub elif self.use_ssl: @@ -133,7 +133,7 @@ class Client(object): def do_request(self, method, action, body=None, headers=None, params=None): - """ Connects to the server and issues a request. + """Connects to the server and issues a request. Returns the result data, or raises an appropriate exception if HTTP status code is not 2xx @@ -142,7 +142,6 @@ class Client(object): :param headers: mapping of key/value pairs to add as headers :param params: dictionary of key/value pairs to add to append to action - """ # Ensure we have a tenant id @@ -207,7 +206,7 @@ class Client(object): "server. Got error: %s" % e)) def get_status_code(self, response): - """ Returns the integer status code from the response, which + """Returns the integer status code from the response, which can be either a Webob.Response (used in testing) or httplib.Response """ if hasattr(response, 'status_int'): @@ -236,73 +235,73 @@ class Client(object): @api_call def list_networks(self): - """ Fetches a list of all networks for a tenant """ + """Fetches a list of all networks for a tenant""" return self.do_request("GET", self.networks_path) @api_call def show_network_details(self, network): - """ Fetches the details of a certain network """ + """Fetches the details of a certain network""" return self.do_request("GET", self.network_path % (network)) @api_call def create_network(self, body=None): - """ Creates a new network """ + """Creates a new network""" body = self.serialize(body) return self.do_request("POST", self.networks_path, body=body) @api_call def update_network(self, network, body=None): - """ Updates a network """ + """Updates a network""" body = self.serialize(body) return self.do_request("PUT", self.network_path % (network), body=body) @api_call def delete_network(self, network): - """ Deletes the specified network """ + """Deletes the specified network""" return self.do_request("DELETE", self.network_path % (network)) @api_call def list_ports(self, network): - """ Fetches a list of ports on a given network """ + """Fetches a list of ports on a given network""" return self.do_request("GET", self.ports_path % (network)) @api_call def show_port_details(self, network, port): - """ Fetches the details of a certain port """ + """Fetches the details of a certain port""" return self.do_request("GET", self.port_path % (network, port)) @api_call def create_port(self, network, body=None): - """ Creates a new port on a given network """ + """Creates a new port on a given network""" body = self.serialize(body) return self.do_request("POST", self.ports_path % (network), body=body) @api_call def delete_port(self, network, port): - """ Deletes the specified port from a network """ + """Deletes the specified port from a network""" return self.do_request("DELETE", self.port_path % (network, port)) @api_call def set_port_state(self, network, port, body=None): - """ Sets the state of the specified port """ + """Sets the state of the specified port""" body = self.serialize(body) return self.do_request("PUT", self.port_path % (network, port), body=body) @api_call def show_port_attachment(self, network, port): - """ Fetches the attachment-id associated with the specified port """ + """Fetches the attachment-id associated with the specified port""" return self.do_request("GET", self.attachment_path % (network, port)) @api_call def attach_resource(self, network, port, body=None): - """ Sets the attachment-id of the specified port """ + """Sets the attachment-id of the specified port""" body = self.serialize(body) return self.do_request("PUT", self.attachment_path % (network, port), body=body) @api_call def detach_resource(self, network, port): - """ Removes the attachment-id of the specified port """ + """Removes the attachment-id of the specified port""" return self.do_request("DELETE", self.attachment_path % (network, port)) diff --git a/nova/network/quantum/manager.py b/nova/network/quantum/manager.py index c10dc90de..23a9aba0d 100644 --- a/nova/network/quantum/manager.py +++ b/nova/network/quantum/manager.py @@ -24,7 +24,7 @@ from nova.network import manager from nova.network.quantum import quantum_connection from nova import utils -LOG = logging.getLogger("quantum_manager") +LOG = logging.getLogger("nova.network.quantum.manager") FLAGS = flags.FLAGS @@ -34,26 +34,26 @@ flags.DEFINE_string('quantum_ipam_lib', class QuantumManager(manager.FlatManager): - """ NetworkManager class that communicates with a Quantum service - via a web services API to provision VM network connectivity. + """NetworkManager class that communicates with a Quantum service + via a web services API to provision VM network connectivity. - For IP Address management, QuantumManager can be configured to - use either Nova's local DB or the Melange IPAM service. + For IP Address management, QuantumManager can be configured to + use either Nova's local DB or the Melange IPAM service. - Currently, the QuantumManager does NOT support any of the 'gateway' - functionality implemented by the Nova VlanManager, including: + Currently, the QuantumManager does NOT support any of the 'gateway' + functionality implemented by the Nova VlanManager, including: * floating IPs * DHCP * NAT gateway - Support for these capabilities are targted for future releases. + Support for these capabilities are targted for future releases. """ def __init__(self, q_conn=None, ipam_lib=None, *args, **kwargs): - """ Initialize two key libraries, the connection to a - Quantum service, and the library for implementing IPAM. + """Initialize two key libraries, the connection to a + Quantum service, and the library for implementing IPAM. - Calls inherited FlatManager constructor. + Calls inherited FlatManager constructor. """ if not q_conn: @@ -70,16 +70,16 @@ class QuantumManager(manager.FlatManager): network_size, cidr_v6, gateway_v6, bridge, bridge_interface, dns1=None, dns2=None, uuid=None, **kwargs): - """ Unlike other NetworkManagers, with QuantumManager, each - create_networks calls should create only a single network. + """Unlike other NetworkManagers, with QuantumManager, each + create_networks calls should create only a single network. - Two scenarios exist: + Two scenarios exist: - no 'uuid' is specified, in which case we contact Quantum and create a new network. - an existing 'uuid' is specified, corresponding to a Quantum network created out of band. - In both cases, we initialize a subnet using the IPAM lib. + In both cases, we initialize a subnet using the IPAM lib. """ if num_networks != 1: raise Exception(_("QuantumManager requires that only one" @@ -101,8 +101,8 @@ class QuantumManager(manager.FlatManager): priority, cidr, gateway_v6, cidr_v6, dns1, dns2) def delete_network(self, context, fixed_range): - """ Lookup network by IPv4 cidr, delete both the IPAM - subnet and the corresponding Quantum network. + """Lookup network by IPv4 cidr, delete both the IPAM + subnet and the corresponding Quantum network. """ project_id = context.project_id quantum_net_id = self.ipam.get_network_id_by_cidr( @@ -113,14 +113,14 @@ class QuantumManager(manager.FlatManager): self.q_conn.delete_network(q_tenant_id, quantum_net_id) def allocate_for_instance(self, context, **kwargs): - """ Called by compute when it is creating a new VM. + """Called by compute when it is creating a new VM. - There are three key tasks: + There are three key tasks: - Determine the number and order of vNICs to create - Allocate IP addresses - Create ports on a Quantum network and attach vNICs. - We support two approaches to determining vNICs: + We support two approaches to determining vNICs: - By default, a VM gets a vNIC for any network belonging to the VM's project, and a vNIC for any "global" network that has a NULL project_id. vNIC order is determined @@ -130,10 +130,10 @@ class QuantumManager(manager.FlatManager): create vNICs, and the vNIC order is determiend by the order in the requested_networks array. - For each vNIC, use the FlatManager to create the entries - in the virtual_interfaces table, contact Quantum to - create a port and attachment the vNIC, and use the IPAM - lib to allocate IP addresses. + For each vNIC, use the FlatManager to create the entries + in the virtual_interfaces table, contact Quantum to + create a port and attachment the vNIC, and use the IPAM + lib to allocate IP addresses. """ instance_id = kwargs.pop('instance_id') instance_type_id = kwargs['instance_type_id'] @@ -181,17 +181,17 @@ class QuantumManager(manager.FlatManager): def get_instance_nw_info(self, context, instance_id, instance_type_id, host): - """ This method is used by compute to fetch all network data - that should be used when creating the VM. + """This method is used by compute to fetch all network data + that should be used when creating the VM. - The method simply loops through all virtual interfaces - stored in the nova DB and queries the IPAM lib to get - the associated IP data. + The method simply loops through all virtual interfaces + stored in the nova DB and queries the IPAM lib to get + the associated IP data. - The format of returned data is 'defined' by the initial - set of NetworkManagers found in nova/network/manager.py . - Ideally this 'interface' will be more formally defined - in the future. + The format of returned data is 'defined' by the initial + set of NetworkManagers found in nova/network/manager.py . + Ideally this 'interface' will be more formally defined + in the future. """ network_info = [] instance = db.instance_get(context, instance_id) @@ -269,10 +269,10 @@ class QuantumManager(manager.FlatManager): return network_info def deallocate_for_instance(self, context, **kwargs): - """ Called when a VM is terminated. Loop through each virtual - interface in the Nova DB and remove the Quantum port and - clear the IP allocation using the IPAM. Finally, remove - the virtual interfaces from the Nova DB. + """Called when a VM is terminated. Loop through each virtual + interface in the Nova DB and remove the Quantum port and + clear the IP allocation using the IPAM. Finally, remove + the virtual interfaces from the Nova DB. """ instance_id = kwargs.get('instance_id') project_id = kwargs.pop('project_id', None) @@ -309,7 +309,7 @@ class QuantumManager(manager.FlatManager): (instance_id))) def validate_networks(self, context, networks): - """ Validates that this tenant has quantum networks with the associated + """Validates that this tenant has quantum networks with the associated UUIDs. This is called by the 'os-create-server-ext' API extension code so that we can return an API error code to the caller if they request an invalid network. diff --git a/nova/network/quantum/melange_connection.py b/nova/network/quantum/melange_connection.py index d5a1901aa..0c744f080 100644 --- a/nova/network/quantum/melange_connection.py +++ b/nova/network/quantum/melange_connection.py @@ -35,7 +35,7 @@ flags.DEFINE_string('melange_port', json_content_type = {'Content-type': "application/json"} -#FIXME(danwent): talk to the Melange folks about creating a +# FIXME(danwent): talk to the Melange folks about creating a # client lib that we can import as a library, instead of # have to have all of the client code in here. class MelangeConnection(object): diff --git a/nova/network/quantum/melange_ipam_lib.py b/nova/network/quantum/melange_ipam_lib.py index dcee0a3f9..a0ac10fd3 100644 --- a/nova/network/quantum/melange_ipam_lib.py +++ b/nova/network/quantum/melange_ipam_lib.py @@ -24,7 +24,7 @@ from nova import log as logging from nova.network.quantum import melange_connection -LOG = logging.getLogger("quantum_melange_ipam") +LOG = logging.getLogger("nova.network.quantum.melange_ipam_lib") FLAGS = flags.FLAGS @@ -34,26 +34,26 @@ def get_ipam_lib(net_man): class QuantumMelangeIPAMLib(object): - """ Implements Quantum IP Address Management (IPAM) interface - using the Melange service, which is access using the Melange - web services API. + """Implements Quantum IP Address Management (IPAM) interface + using the Melange service, which is access using the Melange + web services API. """ def __init__(self): - """ Initialize class used to connect to Melange server""" + """Initialize class used to connect to Melange server""" self.m_conn = melange_connection.MelangeConnection() def create_subnet(self, context, label, project_id, quantum_net_id, priority, cidr=None, gateway_v6=None, cidr_v6=None, dns1=None, dns2=None): - """ Contact Melange and create a subnet for any non-NULL - IPv4 or IPv6 subnets. + """Contact Melange and create a subnet for any non-NULL + IPv4 or IPv6 subnets. - Also create a entry in the Nova networks DB, but only - to store values not represented in Melange or to - temporarily provide compatibility with Nova code that - accesses IPAM data directly via the DB (e.g., nova-api) + Also create a entry in the Nova networks DB, but only + to store values not represented in Melange or to + temporarily provide compatibility with Nova code that + accesses IPAM data directly via the DB (e.g., nova-api) """ tenant_id = project_id or FLAGS.quantum_default_tenant_id if cidr: @@ -73,15 +73,15 @@ class QuantumMelangeIPAMLib(object): network = db.network_create_safe(admin_context, net) def allocate_fixed_ip(self, context, project_id, quantum_net_id, vif_ref): - """ Pass call to allocate fixed IP on to Melange""" + """Pass call to allocate fixed IP on to Melange""" tenant_id = project_id or FLAGS.quantum_default_tenant_id self.m_conn.allocate_ip(quantum_net_id, vif_ref['uuid'], project_id=tenant_id, mac_address=vif_ref['address']) def get_network_id_by_cidr(self, context, cidr, project_id): - """ Find the Quantum UUID associated with a IPv4 CIDR - address for the specified tenant. + """Find the Quantum UUID associated with a IPv4 CIDR + address for the specified tenant. """ tenant_id = project_id or FLAGS.quantum_default_tenant_id all_blocks = self.m_conn.get_blocks(tenant_id) @@ -91,8 +91,8 @@ class QuantumMelangeIPAMLib(object): raise exception.NotFound(_("No network found for cidr %s" % cidr)) def delete_subnets_by_net_id(self, context, net_id, project_id): - """ Find Melange block associated with the Quantum UUID, - then tell Melange to delete that block. + """Find Melange block associated with the Quantum UUID, + then tell Melange to delete that block. """ admin_context = context.elevated() tenant_id = project_id or FLAGS.quantum_default_tenant_id @@ -105,9 +105,10 @@ class QuantumMelangeIPAMLib(object): db.network_delete_safe(context, network['id']) def get_project_and_global_net_ids(self, context, project_id): - """ Fetches all networks associated with this project, or - that are "global" (i.e., have no project set). - Returns list sorted by 'priority'. + """Fetches all networks associated with this project, or + that are "global" (i.e., have no project set). + Returns list sorted by 'priority' (lowest integer value + is highest priority). """ if project_id is None: raise Exception(_("get_project_and_global_net_ids must be called" @@ -134,8 +135,8 @@ class QuantumMelangeIPAMLib(object): for priority, network_id, tenant_id in priority_nets] def get_subnets_by_net_id(self, context, project_id, net_id): - """ Returns information about the IPv4 and IPv6 subnets - associated with a Quantum Network UUID. + """Returns information about the IPv4 and IPv6 subnets + associated with a Quantum Network UUID. """ # FIXME(danwent): Melange actually returns the subnet info @@ -164,23 +165,23 @@ class QuantumMelangeIPAMLib(object): return (subnet_v4, subnet_v6) def get_v4_ips_by_interface(self, context, net_id, vif_id, project_id): - """ Returns a list of IPv4 address strings associated with - the specified virtual interface. + """Returns a list of IPv4 address strings associated with + the specified virtual interface. """ return self._get_ips_by_interface(context, net_id, vif_id, project_id, 4) def get_v6_ips_by_interface(self, context, net_id, vif_id, project_id): - """ Returns a list of IPv6 address strings associated with - the specified virtual interface. + """Returns a list of IPv6 address strings associated with + the specified virtual interface. """ return self._get_ips_by_interface(context, net_id, vif_id, project_id, 6) def _get_ips_by_interface(self, context, net_id, vif_id, project_id, ip_version): - """ Helper method to fetch v4 or v6 addresses for a particular - virtual interface. + """Helper method to fetch v4 or v6 addresses for a particular + virtual interface. """ tenant_id = project_id or FLAGS.quantum_default_tenant_id ip_list = self.m_conn.get_allocated_ips(net_id, vif_id, tenant_id) @@ -188,8 +189,8 @@ class QuantumMelangeIPAMLib(object): if IPNetwork(ip['address']).version == ip_version] def verify_subnet_exists(self, context, project_id, quantum_net_id): - """ Confirms that a subnet exists that is associated with the - specified Quantum Network UUID. + """Confirms that a subnet exists that is associated with the + specified Quantum Network UUID. """ tenant_id = project_id or FLAGS.quantum_default_tenant_id v4_subnet, v6_subnet = self.get_subnets_by_net_id(context, tenant_id, @@ -197,8 +198,8 @@ class QuantumMelangeIPAMLib(object): return v4_subnet is not None def deallocate_ips_by_vif(self, context, project_id, net_id, vif_ref): - """ Deallocate all fixed IPs associated with the specified - virtual interface. + """Deallocate all fixed IPs associated with the specified + virtual interface. """ tenant_id = project_id or FLAGS.quantum_default_tenant_id self.m_conn.deallocate_ips(net_id, vif_ref['uuid'], tenant_id) diff --git a/nova/network/quantum/nova_ipam_lib.py b/nova/network/quantum/nova_ipam_lib.py index 17236a976..21dee8f6a 100644 --- a/nova/network/quantum/nova_ipam_lib.py +++ b/nova/network/quantum/nova_ipam_lib.py @@ -27,7 +27,7 @@ from nova.network.quantum import melange_connection as melange from nova import utils -LOG = logging.getLogger("quantum_nova_ipam_lib") +LOG = logging.getLogger("nova.network.quantum.nova_ipam_lib") FLAGS = flags.FLAGS @@ -37,15 +37,15 @@ def get_ipam_lib(net_man): class QuantumNovaIPAMLib(object): - """ Implements Quantum IP Address Management (IPAM) interface - using the local Nova database. This implementation is inline - with how IPAM is used by other NetworkManagers. + """Implements Quantum IP Address Management (IPAM) interface + using the local Nova database. This implementation is inline + with how IPAM is used by other NetworkManagers. """ def __init__(self, net_manager): - """ Holds a reference to the "parent" network manager, used - to take advantage of various FlatManager methods to avoid - code duplication. + """Holds a reference to the "parent" network manager, used + to take advantage of various FlatManager methods to avoid + code duplication. """ self.net_manager = net_manager @@ -53,11 +53,11 @@ class QuantumNovaIPAMLib(object): quantum_net_id, priority, cidr=None, gateway_v6=None, cidr_v6=None, dns1=None, dns2=None): - """ Re-use the basic FlatManager create_networks method to - initialize the networks and fixed_ips tables in Nova DB. + """Re-use the basic FlatManager create_networks method to + initialize the networks and fixed_ips tables in Nova DB. - Also stores a few more fields in the networks table that - are needed by Quantum but not the FlatManager. + Also stores a few more fields in the networks table that + are needed by Quantum but not the FlatManager. """ admin_context = context.elevated() subnet_size = len(netaddr.IPNetwork(cidr)) @@ -85,8 +85,8 @@ class QuantumNovaIPAMLib(object): return network['uuid'] def delete_subnets_by_net_id(self, context, net_id, project_id): - """ Deletes a network based on Quantum UUID. Uses FlatManager - delete_network to avoid duplication. + """Deletes a network based on Quantum UUID. Uses FlatManager + delete_network to avoid duplication. """ admin_context = context.elevated() network = db.network_get_by_uuid(admin_context, net_id) @@ -97,9 +97,9 @@ class QuantumNovaIPAMLib(object): require_disassociated=False) def get_project_and_global_net_ids(self, context, project_id): - """ Fetches all networks associated with this project, or - that are "global" (i.e., have no project set). - Returns list sorted by 'priority'. + """Fetches all networks associated with this project, or + that are "global" (i.e., have no project set). + Returns list sorted by 'priority'. """ admin_context = context.elevated() networks = db.project_get_networks(admin_context, project_id, False) @@ -113,7 +113,7 @@ class QuantumNovaIPAMLib(object): return sorted(net_list, key=lambda x: id_priority_map[x[0]]) def allocate_fixed_ip(self, context, tenant_id, quantum_net_id, vif_rec): - """ Allocates a single fixed IPv4 address for a virtual interface.""" + """Allocates a single fixed IPv4 address for a virtual interface.""" admin_context = context.elevated() network = db.network_get_by_uuid(admin_context, quantum_net_id) if network['cidr']: @@ -125,8 +125,8 @@ class QuantumNovaIPAMLib(object): db.fixed_ip_update(admin_context, address, values) def get_subnets_by_net_id(self, context, tenant_id, net_id): - """ Returns information about the IPv4 and IPv6 subnets - associated with a Quantum Network UUID. + """Returns information about the IPv4 and IPv6 subnets + associated with a Quantum Network UUID. """ n = db.network_get_by_uuid(context.elevated(), net_id) subnet_data_v4 = { @@ -148,8 +148,8 @@ class QuantumNovaIPAMLib(object): return (subnet_data_v4, subnet_data_v6) def get_v4_ips_by_interface(self, context, net_id, vif_id, project_id): - """ Returns a list of IPv4 address strings associated with - the specified virtual interface, based on the fixed_ips table. + """Returns a list of IPv4 address strings associated with + the specified virtual interface, based on the fixed_ips table. """ vif_rec = db.virtual_interface_get_by_uuid(context, vif_id) fixed_ips = db.fixed_ip_get_by_virtual_interface(context, @@ -157,8 +157,8 @@ class QuantumNovaIPAMLib(object): return [fixed_ip['address'] for fixed_ip in fixed_ips] def get_v6_ips_by_interface(self, context, net_id, vif_id, project_id): - """ Returns a list containing a single IPv6 address strings - associated with the specified virtual interface. + """Returns a list containing a single IPv6 address strings + associated with the specified virtual interface. """ admin_context = context.elevated() network = db.network_get_by_uuid(admin_context, net_id) @@ -171,16 +171,16 @@ class QuantumNovaIPAMLib(object): return [] def verify_subnet_exists(self, context, tenant_id, quantum_net_id): - """ Confirms that a subnet exists that is associated with the - specified Quantum Network UUID. Raises an exception if no - such subnet exists. + """Confirms that a subnet exists that is associated with the + specified Quantum Network UUID. Raises an exception if no + such subnet exists. """ admin_context = context.elevated() db.network_get_by_uuid(admin_context, quantum_net_id) def deallocate_ips_by_vif(self, context, tenant_id, net_id, vif_ref): - """ Deallocate all fixed IPs associated with the specified - virtual interface. + """Deallocate all fixed IPs associated with the specified + virtual interface. """ try: admin_context = context.elevated() diff --git a/nova/network/quantum/quantum_connection.py b/nova/network/quantum/quantum_connection.py index 93892a843..21917653c 100644 --- a/nova/network/quantum/quantum_connection.py +++ b/nova/network/quantum/quantum_connection.py @@ -21,7 +21,7 @@ from nova.network.quantum import client as quantum_client from nova import utils -LOG = logging.getLogger("nova.network.quantum") +LOG = logging.getLogger("nova.network.quantum.quantum_connection") FLAGS = flags.FLAGS flags.DEFINE_string('quantum_connection_host', @@ -38,35 +38,35 @@ flags.DEFINE_string('quantum_default_tenant_id', class QuantumClientConnection(object): - """ Abstracts connection to Quantum service into higher level - operations performed by the QuantumManager. + """Abstracts connection to Quantum service into higher level + operations performed by the QuantumManager. - Separating this out as a class also let's us create a 'fake' - version of this class for unit tests. + Separating this out as a class also let's us create a 'fake' + version of this class for unit tests. """ def __init__(self): - """ Initialize Quantum client class based on flags. """ + """Initialize Quantum client class based on flags.""" self.client = quantum_client.Client(FLAGS.quantum_connection_host, FLAGS.quantum_connection_port, format="json", logger=LOG) def create_network(self, tenant_id, network_name): - """ Create network using specified name, return Quantum - network UUID. + """Create network using specified name, return Quantum + network UUID. """ data = {'network': {'name': network_name}} resdict = self.client.create_network(data, tenant=tenant_id) return resdict["network"]["id"] def delete_network(self, tenant_id, net_id): - """ Deletes Quantum network with specified UUID. """ + """Deletes Quantum network with specified UUID.""" self.client.delete_network(net_id, tenant=tenant_id) def network_exists(self, tenant_id, net_id): - """ Determine if a Quantum network exists for the - specified tenant. + """Determine if a Quantum network exists for the + specified tenant. """ try: self.client.show_network_details(net_id, tenant=tenant_id) @@ -76,9 +76,9 @@ class QuantumClientConnection(object): return False def create_and_attach_port(self, tenant_id, net_id, interface_id): - """ Creates a Quantum port on the specified network, sets - status to ACTIVE to enable traffic, and attaches the - vNIC with the specified interface-id. + """Creates a Quantum port on the specified network, sets + status to ACTIVE to enable traffic, and attaches the + vNIC with the specified interface-id. """ LOG.debug(_("Connecting interface %(interface_id)s to " "net %(net_id)s for %(tenant_id)s" % locals())) @@ -91,7 +91,7 @@ class QuantumClientConnection(object): tenant=tenant_id) def detach_and_delete_port(self, tenant_id, net_id, port_id): - """ Detach and delete the specified Quantum port. """ + """Detach and delete the specified Quantum port.""" LOG.debug(_("Deleting port %(port_id)s on net %(net_id)s" " for %(tenant_id)s" % locals())) @@ -99,8 +99,8 @@ class QuantumClientConnection(object): self.client.delete_port(net_id, port_id, tenant=tenant_id) def get_port_by_attachment(self, tenant_id, attachment_id): - """ Given a tenant, search for the Quantum network and port - UUID that has the specified interface-id attachment. + """Given a tenant, search for the Quantum network and port + UUID that has the specified interface-id attachment. """ # FIXME(danwent): this will be inefficient until the Quantum # API implements querying a port by the interface-id -- cgit From e5e3b306985a3b1fdd8a971f48b76eaf8f923f21 Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Fri, 2 Sep 2011 13:24:38 -0700 Subject: fix pep8 violation --- nova/network/quantum/melange_connection.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/network/quantum/melange_connection.py b/nova/network/quantum/melange_connection.py index 0c744f080..71ac9b5f1 100644 --- a/nova/network/quantum/melange_connection.py +++ b/nova/network/quantum/melange_connection.py @@ -35,6 +35,7 @@ flags.DEFINE_string('melange_port', json_content_type = {'Content-type': "application/json"} + # FIXME(danwent): talk to the Melange folks about creating a # client lib that we can import as a library, instead of # have to have all of the client code in here. -- cgit From fc0ee0d01320d81b5bb6cd1bc6cb23c90c8246a7 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 2 Sep 2011 13:24:40 -0700 Subject: remove extra description stuff --- .../api/openstack/contrib/test_createserverext.py | 1 - nova/tests/api/openstack/test_servers.py | 19 +------------------ 2 files changed, 1 insertion(+), 19 deletions(-) diff --git a/nova/tests/api/openstack/contrib/test_createserverext.py b/nova/tests/api/openstack/contrib/test_createserverext.py index a6da9abfd..0881efcfe 100644 --- a/nova/tests/api/openstack/contrib/test_createserverext.py +++ b/nova/tests/api/openstack/contrib/test_createserverext.py @@ -98,7 +98,6 @@ class CreateserverextTest(test.TestCase): 'uuid': FAKE_UUID, 'user_id': 'fake', 'project_id': 'fake', - 'display_description': 'fakedescription', 'created_at': "", 'updated_at': ""}] diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 45ad6e5a8..a716af0e5 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -156,7 +156,6 @@ def stub_instance(id, user_id='fake', project_id='fake', private_address=None, vm_state=None, task_state=None, reservation_id="", uuid=FAKE_UUID, image_ref="10", flavor_id="1", interfaces=None, name=None, key_name='', - description='fakedescription', access_ipv4=None, access_ipv6=None): metadata = [] metadata.append(InstanceMetadata(key='seq', value=id)) @@ -211,7 +210,7 @@ def stub_instance(id, user_id='fake', project_id='fake', private_address=None, "terminated_at": utils.utcnow(), "availability_zone": "", "display_name": server_name, - "display_description": description, + "display_description": "", "locked": False, "metadata": metadata, "access_ip_v4": access_ipv4, @@ -354,7 +353,6 @@ class ServersTest(test.TestCase): "created": "2010-10-10T12:00:00Z", "progress": 0, "name": "server1", - "description": "fakedescription", "status": "BUILD", "accessIPv4": "", "accessIPv6": "", @@ -455,7 +453,6 @@ class ServersTest(test.TestCase): xmlns="http://docs.openstack.org/compute/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" name="server1" - description="fakedescription" updated="%(expected_updated)s" created="%(expected_created)s" hostId="" @@ -528,7 +525,6 @@ class ServersTest(test.TestCase): "created": "2010-10-10T12:00:00Z", "progress": 100, "name": "server1", - "description": "fakedescription", "status": "ACTIVE", "accessIPv4": "", "accessIPv6": "", @@ -626,7 +622,6 @@ class ServersTest(test.TestCase): "created": "2010-10-10T12:00:00Z", "progress": 100, "name": "server1", - "description": "fakedescription", "status": "ACTIVE", "accessIPv4": "", "accessIPv6": "", @@ -1488,7 +1483,6 @@ class ServersTest(test.TestCase): 'access_ip_v4': '1.2.3.4', 'access_ip_v6': 'fead::1234', 'image_ref': image_ref, - 'display_description': 'fakedescription', 'user_id': 'fake', 'project_id': 'fake', "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0), @@ -3368,7 +3362,6 @@ class TestServerInstanceCreation(test.TestCase): return [{'id': '1234', 'display_name': 'fakeinstance', 'user_id': 'fake', 'project_id': 'fake', - 'display_description': 'fakedescription', 'uuid': FAKE_UUID}] def set_admin_password(self, *args, **kwargs): @@ -3686,7 +3679,6 @@ class ServersViewBuilderV11Test(test.TestCase): "terminated_at": utils.utcnow(), "availability_zone": "", "display_name": "test_server", - "display_description": "fakedescription", "locked": False, "metadata": [], "accessIPv4": "1.2.3.4", @@ -3775,7 +3767,6 @@ class ServersViewBuilderV11Test(test.TestCase): "created": "2010-10-10T12:00:00Z", "progress": 0, "name": "test_server", - "description": "fakedescription", "status": "BUILD", "accessIPv4": "", "accessIPv6": "", @@ -3833,7 +3824,6 @@ class ServersViewBuilderV11Test(test.TestCase): "created": "2010-10-10T12:00:00Z", "progress": 100, "name": "test_server", - "description": "fakedescription", "status": "ACTIVE", "accessIPv4": "", "accessIPv6": "", @@ -4011,7 +4001,6 @@ class ServersViewBuilderV11Test(test.TestCase): "created": "2010-10-10T12:00:00Z", "progress": 0, "name": "test_server", - "description": "fakedescription", "status": "BUILD", "accessIPv4": "", "accessIPv6": "", @@ -4083,7 +4072,6 @@ class ServerXMLSerializationTest(test.TestCase): 'updated': self.TIMESTAMP, "progress": 0, "name": "test_server", - "description": "fakedescription", "status": "BUILD", "hostId": 'e4d909c290d0fb1ca068ffaddf22cbd0', "key_name": '', @@ -4222,7 +4210,6 @@ class ServerXMLSerializationTest(test.TestCase): 'updated': self.TIMESTAMP, "progress": 0, "name": "test_server", - "description": "fakedescription", "status": "BUILD", "accessIPv4": "1.2.3.4", "accessIPv6": "fead::1234", @@ -4425,7 +4412,6 @@ class ServerXMLSerializationTest(test.TestCase): 'updated': self.TIMESTAMP, "progress": 0, "name": "test_server", - "description": "fakedescription", "status": "BUILD", "accessIPv4": "1.2.3.4", "accessIPv6": "fead::1234", @@ -4483,7 +4469,6 @@ class ServerXMLSerializationTest(test.TestCase): 'updated': self.TIMESTAMP, "progress": 100, "name": "test_server_2", - "description": "fakedescription", "status": "ACTIVE", "accessIPv4": "1.2.3.4", "accessIPv6": "fead::1234", @@ -4606,7 +4591,6 @@ class ServerXMLSerializationTest(test.TestCase): 'updated': self.TIMESTAMP, "progress": 0, "name": "test_server", - "description": "fakedescription", "status": "BUILD", "hostId": 'e4d909c290d0fb1ca068ffaddf22cbd0', "accessIPv4": "1.2.3.4", @@ -4744,7 +4728,6 @@ class ServerXMLSerializationTest(test.TestCase): 'updated': self.TIMESTAMP, "progress": 0, "name": "test_server", - "description": "fakedescription", "status": "BUILD", "accessIPv4": "1.2.3.4", "accessIPv6": "fead::1234", -- cgit From dd7aa0234080d5c3512d0e9bab831a621aac10aa Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 2 Sep 2011 16:30:52 -0400 Subject: use 'qemu-image resize' rather than 'truncate' to grow image files qcow-image is capable of growing qcow formated disks in addition to being able to grow a qcow formated one. (LP: #836759) --- nova/virt/disk.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/disk.py b/nova/virt/disk.py index 52b2881e8..50c7c40e9 100644 --- a/nova/virt/disk.py +++ b/nova/virt/disk.py @@ -58,7 +58,7 @@ def extend(image, size): file_size = os.path.getsize(image) if file_size >= size: return - utils.execute('truncate', '-s', size, image) + utils.execute('qemu-img', 'resize', image, size) # NOTE(vish): attempts to resize filesystem utils.execute('e2fsck', '-fp', image, check_exit_code=False) utils.execute('resize2fs', image, check_exit_code=False) -- cgit From 6eb28b5748a829d058fd35888f03f2ee1f26f5b5 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 2 Sep 2011 13:31:19 -0700 Subject: default description to name --- nova/api/openstack/create_instance_helper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index 289f87921..9b2928bc8 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -164,7 +164,7 @@ class CreateInstanceHelper(object): # NOTE(vish): This is solely for compatibility with anything # that is using the display description field. metadata = server_dict.get('metadata') or {} - display_description = metadata.get('description') or '' + display_description = metadata.get('description') or name return (extra_values, create_method(context, inst_type, -- cgit From 53f1aafc546b165cfdd74aa6620fe4c288a9359a Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 2 Sep 2011 17:28:58 -0500 Subject: alex meade issues --- nova/tests/fake_network.py | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/nova/tests/fake_network.py b/nova/tests/fake_network.py index 2e8cf60ec..36aebe6b0 100644 --- a/nova/tests/fake_network.py +++ b/nova/tests/fake_network.py @@ -34,31 +34,31 @@ class FakeModel(dict): return self[name] -def fake_network(n, ipv6=None): +def fake_network(network_id, ipv6=None): if ipv6 == None: ipv6 = FLAGS.use_ipv6 - rval = {'id': n, - 'label': 'test%d' % n, + rval = {'id': network_id, + 'label': 'test%d' % network_id, 'injected': False, 'multi_host': False, - 'cidr': '192.168.%d.0/24' % n, + 'cidr': '192.168.%d.0/24' % network_id, 'cidr_v6': None, 'netmask': '255.255.255.0', 'netmask_v6': None, - 'bridge': 'fake_br%d' % n, - 'bridge_interface': 'fake_eth%d' % n, - 'gateway': '192.168.%d.1' % n, + 'bridge': 'fake_br%d' % network_id, + 'bridge_interface': 'fake_eth%d' % network_id, + 'gateway': '192.168.%d.1' % network_id, 'gateway_v6': None, - 'broadcast': '192.168.%d.255' % n, - 'dns1': '192.168.%d.3' % n, - 'dns2': '192.168.%d.4' % n, + 'broadcast': '192.168.%d.255' % network_id, + 'dns1': '192.168.%d.3' % network_id, + 'dns2': '192.168.%d.4' % network_id, 'vlan': None, 'host': None, 'project_id': 'fake_project', - 'vpn_public_address': '192.168.%d.2' % n} + 'vpn_public_address': '192.168.%d.2' % network_id} if ipv6: - rval['cidr_v6'] = '2001:db8:0:%x::/64' % n - rval['gateway_v6'] = '2001:db8:0:%x::1' % n + rval['cidr_v6'] = '2001:db8:0:%x::/64' % network_id + rval['gateway_v6'] = '2001:db8:0:%x::1' % network_id rval['netmask_v6'] = '64' return rval @@ -109,11 +109,11 @@ def vifs(n): 'instance_id': 0} -def ipv4_like(ip, s): +def ipv4_like(ip, match_string): ip = ip.split('.') - s = s.split('.') + match_octets = match_string.split('.') - for i, octet in enumerate(s): + for i, octet in enumerate(match_octets): if octet == '*': continue if octet != ip[i]: @@ -121,7 +121,7 @@ def ipv4_like(ip, s): return True -def fake_get_instance_nw_info(stubs, n=1, ips_per_vif=2): +def fake_get_instance_nw_info(stubs, num_networks=1, ips_per_vif=2): # stubs is the self.stubs from the test # ips_per_vif is the number of ips each vif will have # num_floating_ips is number of float ips for each fixed ip @@ -129,10 +129,10 @@ def fake_get_instance_nw_info(stubs, n=1, ips_per_vif=2): network.db = db def fixed_ips_fake(*args, **kwargs): - return list(fixed_ips(n, ips_per_vif)) + return list(fixed_ips(num_networks, ips_per_vif)) def virtual_interfaces_fake(*args, **kwargs): - return [vif for vif in vifs(n)] + return [vif for vif in vifs(num_networks)] def instance_type_fake(*args, **kwargs): return flavor -- cgit From cfdc4642bfa3d96e2335079d187945b9ca4c0141 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 2 Sep 2011 18:00:34 -0500 Subject: rick nits --- nova/tests/fake_network.py | 68 ++++++++++++++++++++++++---------------------- 1 file changed, 35 insertions(+), 33 deletions(-) diff --git a/nova/tests/fake_network.py b/nova/tests/fake_network.py index 36aebe6b0..10565f83c 100644 --- a/nova/tests/fake_network.py +++ b/nova/tests/fake_network.py @@ -14,6 +14,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import itertools from nova import db from nova import flags @@ -35,48 +36,49 @@ class FakeModel(dict): def fake_network(network_id, ipv6=None): - if ipv6 == None: + if ipv6 is None: ipv6 = FLAGS.use_ipv6 - rval = {'id': network_id, - 'label': 'test%d' % network_id, - 'injected': False, - 'multi_host': False, - 'cidr': '192.168.%d.0/24' % network_id, - 'cidr_v6': None, - 'netmask': '255.255.255.0', - 'netmask_v6': None, - 'bridge': 'fake_br%d' % network_id, - 'bridge_interface': 'fake_eth%d' % network_id, - 'gateway': '192.168.%d.1' % network_id, - 'gateway_v6': None, - 'broadcast': '192.168.%d.255' % network_id, - 'dns1': '192.168.%d.3' % network_id, - 'dns2': '192.168.%d.4' % network_id, - 'vlan': None, - 'host': None, - 'project_id': 'fake_project', - 'vpn_public_address': '192.168.%d.2' % network_id} + fake_network = {'id': network_id, + 'label': 'test%d' % network_id, + 'injected': False, + 'multi_host': False, + 'cidr': '192.168.%d.0/24' % network_id, + 'cidr_v6': None, + 'netmask': '255.255.255.0', + 'netmask_v6': None, + 'bridge': 'fake_br%d' % network_id, + 'bridge_interface': 'fake_eth%d' % network_id, + 'gateway': '192.168.%d.1' % network_id, + 'gateway_v6': None, + 'broadcast': '192.168.%d.255' % network_id, + 'dns1': '192.168.%d.3' % network_id, + 'dns2': '192.168.%d.4' % network_id, + 'vlan': None, + 'host': None, + 'project_id': 'fake_project', + 'vpn_public_address': '192.168.%d.2' % network_id} if ipv6: - rval['cidr_v6'] = '2001:db8:0:%x::/64' % network_id - rval['gateway_v6'] = '2001:db8:0:%x::1' % network_id - rval['netmask_v6'] = '64' + fake_network['cidr_v6'] = '2001:db8:0:%x::/64' % network_id + fake_network['gateway_v6'] = '2001:db8:0:%x::1' % network_id + fake_network['netmask_v6'] = '64' - return rval + return fake_network def fixed_ips(num_networks, num_ips, num_floating_ips=0): - for network in xrange(num_networks): - for ip in xrange(num_ips): - id = network * num_ips + ip - f_ips = [floating_ips(id).next() for i in xrange(num_floating_ips)] - yield {'id': id, - 'network_id': network, - 'address': '192.168.%d.1%02d' % (network, ip), + for network_index in xrange(num_networks): + for ip_index in xrange(num_ips): + fixed_ip_id = network_index * num_ips + ip_index + f_ips = [FakeModel(**floating_ips(fixed_ip_id).next()) + for i in xrange(num_floating_ips)] + yield {'id': fixed_ip_id, + 'network_id': network_index, + 'address': '192.168.%d.1%02d' % (network_index, ip_index), 'instance_id': 0, 'allocated': False, # and since network_id and vif_id happen to be equivalent - 'virtual_interface_id': network, - 'floating_ips': [FakeModel(**ip) for ip in f_ips]} + 'virtual_interface_id': network_index, + 'floating_ips': f_ips} flavor = {'id': 0, -- cgit From f970ce0fbb3d4de560f73a01b508d8f0f7ac9117 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 2 Sep 2011 18:02:41 -0500 Subject: removed unneeded import --- nova/tests/fake_network.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/tests/fake_network.py b/nova/tests/fake_network.py index 10565f83c..73a117c33 100644 --- a/nova/tests/fake_network.py +++ b/nova/tests/fake_network.py @@ -14,7 +14,6 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -import itertools from nova import db from nova import flags -- cgit From 494eb94192a971f64fa6aa78092074f8ed437a7f Mon Sep 17 00:00:00 2001 From: Tushar Patil Date: Fri, 2 Sep 2011 17:09:09 -0700 Subject: Added unit tests to check instance record is not inserted in db when security groups passed to the instances are not existing --- nova/tests/test_compute.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 766a7da9b..65fdffbd6 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -161,6 +161,19 @@ class ComputeTestCase(test.TestCase): db.security_group_destroy(self.context, group['id']) db.instance_destroy(self.context, ref[0]['id']) + def test_create_instance_with_invalid_security_group_raises(self): + instance_type = instance_types.get_default_instance_type() + + pre_build_len = len(db.instance_get_all(context.get_admin_context())) + self.assertRaises(exception.SecurityGroupNotFoundForProject, + self.compute_api.create, + self.context, + instance_type=instance_type, + image_href=None, + security_group=['this_is_a_fake_sec_group']) + self.assertEqual(pre_build_len, + len(db.instance_get_all(context.get_admin_context()))) + def test_create_instance_associates_config_drive(self): """Make sure create associates a config drive.""" -- cgit From 2abdd4c2fab39b6373a1562fb260168e515e43e9 Mon Sep 17 00:00:00 2001 From: Launchpad Translations on behalf of nova-core <> Date: Sat, 3 Sep 2011 05:50:53 +0000 Subject: Launchpad automatic translations update. --- po/pt_BR.po | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/po/pt_BR.po b/po/pt_BR.po index c4eb487d6..48a718808 100644 --- a/po/pt_BR.po +++ b/po/pt_BR.po @@ -8,13 +8,13 @@ msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: FULL NAME \n" "POT-Creation-Date: 2011-02-21 10:03-0500\n" -"PO-Revision-Date: 2011-09-01 20:56+0000\n" +"PO-Revision-Date: 2011-09-02 12:17+0000\n" "Last-Translator: Robson Negreiros Bezerra \n" "Language-Team: Brazilian Portuguese \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-09-02 04:53+0000\n" +"X-Launchpad-Export-Date: 2011-09-03 05:50+0000\n" "X-Generator: Launchpad (build 13830)\n" #: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 @@ -882,58 +882,59 @@ msgstr "" #: ../nova/virt/xenapi/vm_utils.py:590 #, python-format msgid "No VDIs found for VM %s" -msgstr "" +msgstr "Nenhum VDIs encontrado para MV %s" #: ../nova/virt/xenapi/vm_utils.py:594 #, python-format msgid "Unexpected number of VDIs (%(num_vdis)s) found for VM %(vm_ref)s" msgstr "" +"Número de VDIs inesperado (%(num_vdis)s) encontrado para MV %(vm_ref)s" #: ../nova/virt/xenapi/vm_utils.py:653 #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:188 #, python-format msgid "Creating VBD for VDI %s ... " -msgstr "" +msgstr "Criando VBD para VDI %s ... " #: ../nova/virt/xenapi/vm_utils.py:655 #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:190 #, python-format msgid "Creating VBD for VDI %s done." -msgstr "" +msgstr "O VBD para VDI %s foi criado." #: ../nova/virt/xenapi/vm_utils.py:657 #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:192 #, python-format msgid "Plugging VBD %s ... " -msgstr "" +msgstr "Conectando VBD %s ... " #: ../nova/virt/xenapi/vm_utils.py:659 #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:194 #, python-format msgid "Plugging VBD %s done." -msgstr "" +msgstr "O VDB %s foi conectado." #: ../nova/virt/xenapi/vm_utils.py:661 #, python-format msgid "VBD %(vbd)s plugged as %(orig_dev)s" -msgstr "" +msgstr "VBD %(vbd)s conectado como %(orig_dev)s" #: ../nova/virt/xenapi/vm_utils.py:664 #, python-format msgid "VBD %(vbd)s plugged into wrong dev, remapping to %(dev)s" -msgstr "" +msgstr "VBD %(vbd)s conectado no device errado, remapeando para %(dev)s" #: ../nova/virt/xenapi/vm_utils.py:668 #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:197 #, python-format msgid "Destroying VBD for VDI %s ... " -msgstr "" +msgstr "Destruindo VBD para o VDI %s ... " #: ../nova/virt/xenapi/vm_utils.py:671 #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:200 #, python-format msgid "Destroying VBD for VDI %s done." -msgstr "" +msgstr "O VBD para o VDI %s foi destruído." #: ../nova/virt/xenapi/vm_utils.py:683 #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:211 @@ -954,7 +955,7 @@ msgstr "" #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:223 #, python-format msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" -msgstr "" +msgstr "Ignorando XenAPI.Failure em VBD.unplug: %s" #: ../nova/virt/xenapi/vm_utils.py:704 #: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:66 -- cgit From 864f3b24c6cd0753474ac152bce73c2df64bbdd1 Mon Sep 17 00:00:00 2001 From: Thuleau Édouard Date: Sat, 3 Sep 2011 09:36:06 +0200 Subject: Change non E ascii characte. --- Authors | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Authors b/Authors index 0beab16a4..f647a7c00 100644 --- a/Authors +++ b/Authors @@ -30,7 +30,7 @@ Devendra Modium Devin Carlen Donal Lafferty Ed Leafe -Édouard Thuleau +Edouard Thuleau Eldar Nugaev Eric Day Eric Windisch -- cgit From 80059b3e87f6ce7ab2ba18a135e5c469d2be8f88 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Sun, 4 Sep 2011 01:19:21 -0500 Subject: correct floating ip id to increment in fake_network --- nova/tests/fake_network.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/fake_network.py b/nova/tests/fake_network.py index 73a117c33..99b027cf3 100644 --- a/nova/tests/fake_network.py +++ b/nova/tests/fake_network.py @@ -93,7 +93,7 @@ flavor = {'id': 0, def floating_ips(fixed_ip_id): for i in xrange(154): - yield {'id': 0, + yield {'id': i, 'address': '10.10.10.%d' % (i + 100), 'fixed_ip_id': fixed_ip_id, 'project_id': None, -- cgit From 9b35957eef001ae1c3329e9197984d3aca0da787 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Sun, 4 Sep 2011 05:39:21 -0400 Subject: Fixing xml serialization of limits resource. --- nova/api/openstack/limits.py | 24 +++++++++++------------ nova/tests/api/openstack/test_limits.py | 34 ++++++++++++++++----------------- 2 files changed, 29 insertions(+), 29 deletions(-) diff --git a/nova/api/openstack/limits.py b/nova/api/openstack/limits.py index 2896ac396..f6df94eea 100644 --- a/nova/api/openstack/limits.py +++ b/nova/api/openstack/limits.py @@ -87,25 +87,25 @@ class LimitsXMLSerializer(wsgi.XMLDictSerializer): def __init__(self): pass - def _create_rates_node(self, rates_dict): + def _create_rates_node(self, rates): rates_elem = etree.Element('rates', nsmap=self.NSMAP) - for rate in rates_dict.items(): + for rate in rates: rate_node = etree.SubElement(rates_elem, 'rate') rate_node.set('uri', rate['uri']) rate_node.set('regex', rate['regex']) - for limit in rate['limits']: + for limit in rate['limit']: limit_elem = etree.SubElement(rate_node, 'limit') - limit_elem.set('value', str(rate['value'])) - limit_elem.set('verb', str(rate['verb'])) - limit_elem.set('remaining', str(rate['remaining'])) - limit_elem.set('unit', str(rate['unit'])) - limit_elem.set('next-available', str(rate['next-available'])) + limit_elem.set('value', str(limit['value'])) + limit_elem.set('verb', str(limit['verb'])) + limit_elem.set('remaining', str(limit['remaining'])) + limit_elem.set('unit', str(limit['unit'])) + limit_elem.set('next-available', str(limit['next-available'])) return rates_elem def _create_absolute_node(self, absolute_dict): absolute_elem = etree.Element('absolute', nsmap=self.NSMAP) for key, value in absolute_dict.items(): - limit_elem = etree.SubElement(rate_node, 'limit') + limit_elem = etree.SubElement(absolute_elem, 'limit') limit_elem.set('name', str(key)) limit_elem.set('value', str(value)) return absolute_elem @@ -114,16 +114,16 @@ class LimitsXMLSerializer(wsgi.XMLDictSerializer): """Populate a limits xml element from a dict.""" rates_elem = self._create_rates_node( - limits_dict.get('rates', {})) + limits_dict.get('rate', [])) limits_elem.append(rates_elem) absolutes_elem = self._create_absolute_node( - limits_dict.get('absolutes', {})) + limits_dict.get('absolute', {})) limits_elem.append(absolutes_elem) def index(self, limits_dict): limits = etree.Element('limits', nsmap=self.NSMAP) - self._populate_limits(limits, limits_dict) + self._populate_limits(limits, limits_dict['limits']) return self._to_xml(limits) diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py index f71d9c454..3db57ee86 100644 --- a/nova/tests/api/openstack/test_limits.py +++ b/nova/tests/api/openstack/test_limits.py @@ -41,8 +41,10 @@ TEST_LIMITS = [ limits.Limit("PUT", "*", "", 10, limits.PER_MINUTE), limits.Limit("PUT", "/servers", "^/servers", 5, limits.PER_MINUTE), ] -NS = "{http://docs.openstack.org/compute/api/v1.1}" -ATOMNS = "{http://www.w3.org/2005/Atom}" +NS = { + 'atom': 'http://www.w3.org/2005/Atom', + 'ns': 'http://docs.openstack.org/compute/api/v1.1' +} class BaseLimitTestSuite(unittest.TestCase): @@ -998,7 +1000,8 @@ class LimitsXMLSerializationTest(test.TestCase): def test_index(self): serializer = limits.LimitsXMLSerializer() - fixture = {"limits": { + fixture = { + "limits": { "rate": [{ "uri": "*", "regex": ".*", @@ -1027,28 +1030,27 @@ class LimitsXMLSerializationTest(test.TestCase): xmlutil.validate_schema(root, 'limits') #verify absolute limits - absolute = root.find('{0}absolute'.format(NS)) - absolutes = absolute.findall('limit'.format(NS)) + absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS) + self.assertEqual(len(absolutes), 4) for limit in absolutes: name = limit.get('name') value = limit.get('value') self.assertEqual(value, str(fixture['limits']['absolute'][name])) #verify rate limits - rate_root = root.find('{0}rates'.format(NS)) - rates = rate_root.findall('{0}rate'.format(NS)) - for i in range(len(rates)): - rate = rates[i] + rates = root.xpath('ns:rates/ns:rate', namespaces=NS) + self.assertEqual(len(rates), 2) + for i, rate in enumerate(rates): for key in ['uri', 'regex']: self.assertEqual(rate.get(key), str(fixture['limits']['rate'][i][key])) - rate_limits = rate.findall('{0}limit'.format(NS)) - for z in range(len(rate_limits)): - limit = rate_limits[z] + rate_limits = rate.xpath('ns:limit', namespaces=NS) + self.assertEqual(len(rate_limits), 1) + for j, limit in enumerate(rate_limits): for key in ['verb', 'value', 'remaining', 'unit', 'next-available']: self.assertEqual(limit.get(key), - str(fixture['limits']['rate'][i]['limit'][z][key])) + str(fixture['limits']['rate'][i]['limit'][j][key])) def test_index_no_limits(self): serializer = limits.LimitsXMLSerializer() @@ -1063,11 +1065,9 @@ class LimitsXMLSerializationTest(test.TestCase): xmlutil.validate_schema(root, 'limits') #verify absolute limits - absolute = root.find('{0}absolute'.format(NS)) - absolutes = absolute.findall('limit'.format(NS)) + absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS) self.assertEqual(len(absolutes), 0) #verify rate limits - rate_root = root.find('{0}rates'.format(NS)) - rates = rate_root.findall('{0}rate'.format(NS)) + rates = root.xpath('ns:rates/ns:rate', namespaces=NS) self.assertEqual(len(rates), 0) -- cgit From d155f4224cb97e43cacd4102ba01f0e1775dfbdf Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Sun, 4 Sep 2011 05:39:59 -0400 Subject: Minor cleanup. --- nova/tests/api/openstack/test_versions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/api/openstack/test_versions.py b/nova/tests/api/openstack/test_versions.py index a9308e31d..8b60db71e 100644 --- a/nova/tests/api/openstack/test_versions.py +++ b/nova/tests/api/openstack/test_versions.py @@ -285,7 +285,7 @@ class VersionsTest(test.TestCase): versions = root.xpath('ns:version', namespaces=NS) self.assertEqual(len(versions), 2) - for (i, v) in ((0, 'v1.0'), (1, 'v1.1')): + for i, v in enumerate(['v1.0', 'v1.1']): version = versions[i] expected = VERSIONS[v] for key in ['id', 'status', 'updated']: -- cgit From 6b61a6be4a14444723e3728fb0fcdd77bac8fe74 Mon Sep 17 00:00:00 2001 From: Keisuke Tagami Date: Mon, 5 Sep 2011 14:45:41 +0900 Subject: Fix bug #835919 that output a option file for dnsmasq not to offer a default gateway on second vif. --- nova/db/sqlalchemy/api.py | 15 +++++++++++++ nova/network/linux_net.py | 55 +++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 68 insertions(+), 2 deletions(-) mode change 100644 => 100755 nova/db/sqlalchemy/api.py mode change 100644 => 100755 nova/network/linux_net.py diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py old mode 100644 new mode 100755 index b99667afc..aee1c2a55 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1415,6 +1415,21 @@ def instance_get_all_by_reservation(context, reservation_id): filter_by(project_id=context.project_id).\ filter_by(deleted=False).\ all() + +@require_admin_context +def instance_get_all_by_network(context, network_id): + session = get_session() + return session.query(models.Instance).\ + options(joinedload_all('fixed_ips.floating_ips')).\ + options(joinedload('virtual_interfaces')).\ + options(joinedload('security_groups')).\ + options(joinedload_all('fixed_ips.network')).\ + options(joinedload('metadata')).\ + options(joinedload('instance_type')).\ + filter_by(deleted=can_read_deleted(context)).\ + filter_by(network_id=network_id).\ + all() + @require_context diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py old mode 100644 new mode 100755 index 57c1d0c28..8d5a0bbbd --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -511,6 +511,35 @@ def get_dhcp_hosts(context, network_ref): return '\n'.join(hosts) +def get_dhcp_opts(context, network_ref): + """Get network's hosts config in dhcp-opts format.""" + # create a decision dictionary for default gateway for eache instance + default_gateway_network_node = dict() + network_id = network_ref['id'] + instance_refs = db.instance_get_all_by_network(context, network_id) + ips_ref = db.network_get_associated_fixed_ips(context, network_id) + + for instance_ref in instance_refs: + instance_id = instance_ref['id'] + # nic number is decided by ‡”Ô from this function in xxx function + vifs = db.virtual_interface_get_by_instance(context, instance_id) + if not vifs: + continue + # offer a default gateway to the first virtual interface of instance + first_vif = vifs[0] + default_gateway_network_node[instance_id] = first_vif['network_id'] + + for fixed_ip_ref in ips_ref: + instance_id = fixed_ip_ref['instance_id'] + target_network_id = default_gateway_network_node[instance_id] + if target_network_id == fixed_ip_ref['network_id']: + hosts.append(_host_dhcp_opts(fixed_ip_ref, gw=True)) + else: + hosts.append(_host_dhcp_opts(fixed_ip_ref, gw=None)) + + return '\n'.join(hosts) + + # NOTE(ja): Sending a HUP only reloads the hostfile, so any # configuration options (like dchp-range, vlan, ...) # aren't reloaded. @@ -526,8 +555,13 @@ def update_dhcp(context, dev, network_ref): with open(conffile, 'w') as f: f.write(get_dhcp_hosts(context, network_ref)) + optsfile = _dhcp_file(dev, 'opts') + with open(optsfile, 'w') as f: + f.write(get_dhcp_opts(context, network_ref)) + # Make sure dnsmasq can actually read it (it setuid()s to "nobody") os.chmod(conffile, 0644) + os.chmod(optsfile, 0644) pid = _dnsmasq_pid_for(dev) @@ -559,6 +593,7 @@ def update_dhcp(context, dev, network_ref): '--dhcp-lease-max=%s' % len(netaddr.IPNetwork(network_ref['cidr'])), '--dhcp-hostsfile=%s' % _dhcp_file(dev, 'conf'), '--dhcp-script=%s' % FLAGS.dhcpbridge, + '--dhcp-optsfile=%s' % _dhcp_file(dev, 'opts'), '--leasefile-ro'] if FLAGS.dns_server: cmd += ['-h', '-R', '--server=%s' % FLAGS.dns_server] @@ -625,13 +660,29 @@ def _host_lease(fixed_ip_ref): instance_ref['hostname'] or '*') +def _host_dhcp_network(fixed_ip_ref): + instance_ref = fixed_ip_ref['instance'] + return 'NW-i%08d-%s' % (instance_ref['id'], + fixed_ip_ref['network_id']) + + def _host_dhcp(fixed_ip_ref): """Return a host string for an address in dhcp-host format.""" instance_ref = fixed_ip_ref['instance'] - return '%s,%s.%s,%s' % (fixed_ip_ref['virtual_interface']['address'], + return '%s,%s.%s,%s,%s' % (fixed_ip_ref['virtual_interface']['address'], instance_ref['hostname'], FLAGS.dhcp_domain, - fixed_ip_ref['address']) + fixed_ip_ref['address'], + "net:" + _host_dhcp_network(fixed_ip_ref)) + + +def _host_dhcp_opts(fixed_ip_ref, gw): + """Return a host string for an address in dhcp-host format.""" + if not gw: + return '%s,%s' % (_host_dhcp_network(fixed_ip_ref), + 3) + else: + return '' def _execute(*cmd, **kwargs): -- cgit From 6930c62a02a39f64506a7b2d2ec5b04dbff5fe3a Mon Sep 17 00:00:00 2001 From: sateesh Date: Mon, 5 Sep 2011 12:51:07 +0530 Subject: Multi-NIC support for vmwareapi virt driver in nova. Does injection of Multi-NIC information to instances with Operating system flavors Ubuntu, Windows and RHEL. vmwareapi virt driver now relies on calls to network manager instead of nova db calls for network configuration information of instance. Ensure if port group is properly associated with vlan_interface specified in case of VLAN networking for instances. Re-oranized VMWareVlanBridgeDriver and added session parmeter to methods to use existing session. Also removed session creation code as session comes as argument. Added check for flat_inject flag before attempting an inject operation. Removed stale code from vmwareapi stubs. Also updated some comments to be more meaningful. Did pep8 and pylint checks. Tried to improve pylint score for newly added lines of code. --- nova/tests/vmwareapi/stubs.py | 2 - nova/virt/vmwareapi/fake.py | 2 +- nova/virt/vmwareapi/vif.py | 27 +++++------ nova/virt/vmwareapi/vm_util.py | 26 +++++----- nova/virt/vmwareapi/vmops.py | 106 ++++++++++++++++++++++++----------------- tools/esx/guest_tool.py | 70 +++++++++++++++------------ 6 files changed, 124 insertions(+), 109 deletions(-) diff --git a/nova/tests/vmwareapi/stubs.py b/nova/tests/vmwareapi/stubs.py index 0ed5e9b68..7de10e612 100644 --- a/nova/tests/vmwareapi/stubs.py +++ b/nova/tests/vmwareapi/stubs.py @@ -45,8 +45,6 @@ def set_stubs(stubs): stubs.Set(vmware_images, 'get_vmdk_size_and_properties', fake.fake_get_vmdk_size_and_properties) stubs.Set(vmware_images, 'upload_image', fake.fake_upload_image) - stubs.Set(vmwareapi_conn.VMWareAPISession, "_get_vim_object", - fake_get_vim_object) stubs.Set(vmwareapi_conn.VMWareAPISession, "_get_vim_object", fake_get_vim_object) stubs.Set(vmwareapi_conn.VMWareAPISession, "_is_vim_object", diff --git a/nova/virt/vmwareapi/fake.py b/nova/virt/vmwareapi/fake.py index 4c62d18bb..f47ffdf1c 100644 --- a/nova/virt/vmwareapi/fake.py +++ b/nova/virt/vmwareapi/fake.py @@ -409,7 +409,7 @@ def fake_plug_vifs(*args, **kwargs): def fake_get_network(*args, **kwargs): """Fake get network.""" - return [{'type': 'fake'}] + return {'type': 'fake'} def fake_fetch_image(image, instance, **kwargs): diff --git a/nova/virt/vmwareapi/vif.py b/nova/virt/vmwareapi/vif.py index fb6548b34..9906b89e1 100644 --- a/nova/virt/vmwareapi/vif.py +++ b/nova/virt/vmwareapi/vif.py @@ -17,42 +17,35 @@ """VIF drivers for VMWare.""" -from nova import db from nova import exception from nova import flags from nova import log as logging -from nova import utils from nova.virt.vif import VIFDriver -from nova.virt.vmwareapi_conn import VMWareAPISession from nova.virt.vmwareapi import network_utils LOG = logging.getLogger("nova.virt.vmwareapi.vif") FLAGS = flags.FLAGS +FLAGS['vmwareapi_vlan_interface'].SetDefault('vmnic0') class VMWareVlanBridgeDriver(VIFDriver): """VIF Driver to setup bridge/VLAN networking using VMWare API.""" def plug(self, instance, network, mapping): + """Plug the VIF to specified instance using information passed. + Currently we are plugging the VIF(s) during instance creation itself. + We can use this method when we add support to add additional NIC to + an existing instance.""" + pass + + def ensure_vlan_bridge(self, session, network): """Create a vlan and bridge unless they already exist.""" vlan_num = network['vlan'] bridge = network['bridge'] - bridge_interface = network['bridge_interface'] + vlan_interface = FLAGS.vmwareapi_vlan_interface - # Open vmwareapi session - host_ip = FLAGS.vmwareapi_host_ip - host_username = FLAGS.vmwareapi_host_username - host_password = FLAGS.vmwareapi_host_password - if not host_ip or host_username is None or host_password is None: - raise Exception(_('Must specify vmwareapi_host_ip, ' - 'vmwareapi_host_username ' - 'and vmwareapi_host_password to use ' - 'connection_type=vmwareapi')) - session = VMWareAPISession(host_ip, host_username, host_password, - FLAGS.vmwareapi_api_retry_count) - vlan_interface = bridge_interface # Check if the vlan_interface physical network adapter exists on the # host. if not network_utils.check_if_vlan_interface_exists(session, @@ -92,4 +85,6 @@ class VMWareVlanBridgeDriver(VIFDriver): pgroup=pg_vlanid) def unplug(self, instance, network, mapping): + """Cleanup operations like deleting port group if no instance + is associated with it.""" pass diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py index 82b5f7214..dd1c81196 100644 --- a/nova/virt/vmwareapi/vm_util.py +++ b/nova/virt/vmwareapi/vm_util.py @@ -39,8 +39,7 @@ def split_datastore_path(datastore_path): def get_vm_create_spec(client_factory, instance, data_store_name, - network_name="vmnet0", - os_type="otherGuest", network_ref=None): + vif_infos, os_type="otherGuest"): """Builds the VM Create spec.""" config_spec = client_factory.create('ns0:VirtualMachineConfigSpec') config_spec.name = instance.name @@ -61,14 +60,12 @@ def get_vm_create_spec(client_factory, instance, data_store_name, config_spec.numCPUs = int(instance.vcpus) config_spec.memoryMB = int(instance.memory_mb) - mac_address = None - if instance['mac_addresses']: - mac_address = instance['mac_addresses'][0]['address'] + vif_spec_list = [] + for vif_info in vif_infos: + vif_spec = create_network_spec(client_factory, vif_info) + vif_spec_list.append(vif_spec) - nic_spec = create_network_spec(client_factory, - network_name, mac_address) - - device_config_spec = [nic_spec] + device_config_spec = vif_spec_list config_spec.deviceChange = device_config_spec return config_spec @@ -93,8 +90,7 @@ def create_controller_spec(client_factory, key): return virtual_device_config -def create_network_spec(client_factory, network_name, mac_address, - network_ref=None): +def create_network_spec(client_factory, vif_info): """ Builds a config spec for the addition of a new network adapter to the VM. @@ -109,6 +105,9 @@ def create_network_spec(client_factory, network_name, mac_address, # NOTE(asomya): Only works on ESXi if the portgroup binding is set to # ephemeral. Invalid configuration if set to static and the NIC does # not come up on boot if set to dynamic. + network_ref = vif_info['network_ref'] + network_name = vif_info['network_name'] + mac_address = vif_info['mac_address'] backing = None if (network_ref and network_ref['type'] == "DistributedVirtualPortgroup"): @@ -295,11 +294,8 @@ def get_dummy_vm_create_spec(client_factory, name, data_store_name): return config_spec -def get_machine_id_change_spec(client_factory, mac, ip_addr, netmask, - gateway, broadcast, dns): +def get_machine_id_change_spec(client_factory, machine_id_str): """Builds the machine id change config spec.""" - machine_id_str = "%s;%s;%s;%s;%s;%s" % (mac, ip_addr, netmask, - gateway, broadcast, dns) virtual_machine_config_spec = \ client_factory.create('ns0:VirtualMachineConfigSpec') diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index 07a6ba6ab..3ac99f923 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -27,7 +27,6 @@ import urllib2 import uuid from nova import context as nova_context -from nova import db from nova import exception from nova import flags from nova import log as logging @@ -111,22 +110,6 @@ class VMWareVMOps(object): client_factory = self._session._get_vim().client.factory service_content = self._session._get_vim().get_service_content() - network = db.network_get_by_instance(nova_context.get_admin_context(), - instance['id']) - - net_name = network['bridge'] - - def _check_if_network_bridge_exists(): - network_ref = \ - network_utils.get_network_with_the_name(self._session, - net_name) - if network_ref is None: - raise exception.NetworkNotFoundForBridge(bridge=net_name) - return network_ref - - self.plug_vifs(instance, network_info) - network_obj = _check_if_network_bridge_exists() - def _get_datastore_ref(): """Get the datastore list and choose the first local storage.""" data_stores = self._session._call_method(vim_util, "get_objects", @@ -182,11 +165,36 @@ class VMWareVMOps(object): vm_folder_mor, res_pool_mor = _get_vmfolder_and_res_pool_mors() + def _check_if_network_bridge_exists(network_name): + network_ref = \ + network_utils.get_network_with_the_name(self._session, + network_name) + if network_ref is None: + raise exception.NetworkNotFoundForBridge(bridge=network_name) + return network_ref + + def _get_vif_infos(): + vif_infos = [] + for (network, mapping) in network_info: + mac_address = mapping['mac'] + network_name = network['bridge'] + if mapping.get('should_create_vlan'): + network_ref = self._vif_driver.ensure_vlan_bridge( + self._session, network) + else: + network_ref = _check_if_network_bridge_exists(network_name) + vif_infos.append({'network_name': network_name, + 'mac_address': mac_address, + 'network_ref': network_ref, + }) + return vif_infos + + vif_infos = _get_vif_infos() + # Get the create vm config spec config_spec = vm_util.get_vm_create_spec( client_factory, instance, - data_store_name, net_name, os_type, - network_obj) + data_store_name, vif_infos, os_type) def _execute_create_vm(): """Create VM on ESX host.""" @@ -204,8 +212,10 @@ class VMWareVMOps(object): _execute_create_vm() - # Set the machine id for the VM for setting the IP - self._set_machine_id(client_factory, instance) + # Set the machine.id parameter of the instance to inject + # the NIC configuration inside the VM + if FLAGS.flat_injected: + self._set_machine_id(client_factory, instance, network_info) # Naming the VM files in correspondence with the VM instance name # The flat vmdk file name @@ -716,39 +726,45 @@ class VMWareVMOps(object): """Return link to instance's ajax console.""" return 'http://fakeajaxconsole/fake_url' - def _set_machine_id(self, client_factory, instance): + def _set_machine_id(self, client_factory, instance, network_info): """ - Set the machine id of the VM for guest tools to pick up and change - the IP. + Set the machine id of the VM for guest tools to pick up and reconfigure + the network interfaces. """ - admin_context = nova_context.get_admin_context() vm_ref = self._get_vm_ref_from_the_name(instance.name) if vm_ref is None: raise exception.InstanceNotFound(instance_id=instance.id) - network = db.network_get_by_instance(nova_context.get_admin_context(), - instance['id']) - mac_address = None - if instance['mac_addresses']: - mac_address = instance['mac_addresses'][0]['address'] - - net_mask = network["netmask"] - gateway = network["gateway"] - broadcast = network["broadcast"] - # TODO(vish): add support for dns2 - dns = network["dns1"] - - addresses = db.instance_get_fixed_addresses(admin_context, - instance['id']) - ip_addr = addresses[0] if addresses else None + + machine_id_str = '' + for (network, info) in network_info: + # TODO(vish): add support for dns2 + # TODO(sateesh): add support for injection of ipv6 configuration + ip_v4 = ip_v6 = None + if 'ips' in info and len(info['ips']) > 0: + ip_v4 = info['ips'][0] + if 'ip6s' in info and len(info['ip6s']) > 0: + ip_v6 = info['ip6s'][0] + if len(info['dns']) > 0: + dns = info['dns'][0] + else: + dns = '' + + interface_str = "%s;%s;%s;%s;%s;%s" % \ + (info['mac'], + ip_v4 and ip_v4['ip'] or '', + ip_v4 and ip_v4['netmask'] or '', + info['gateway'], + info['broadcast'], + dns) + machine_id_str = machine_id_str + interface_str + '#' machine_id_change_spec = \ - vm_util.get_machine_id_change_spec(client_factory, mac_address, - ip_addr, net_mask, gateway, - broadcast, dns) + vm_util.get_machine_id_change_spec(client_factory, machine_id_str) + LOG.debug(_("Reconfiguring VM instance %(name)s to set the machine id " "with ip - %(ip_addr)s") % ({'name': instance.name, - 'ip_addr': ip_addr})) + 'ip_addr': ip_v4['ip']})) reconfig_task = self._session._call_method(self._session._get_vim(), "ReconfigVM_Task", vm_ref, spec=machine_id_change_spec) @@ -756,7 +772,7 @@ class VMWareVMOps(object): LOG.debug(_("Reconfigured VM instance %(name)s to set the machine id " "with ip - %(ip_addr)s") % ({'name': instance.name, - 'ip_addr': ip_addr})) + 'ip_addr': ip_v4['ip']})) def _get_datacenter_name_and_ref(self): """Get the datacenter name and the reference.""" diff --git a/tools/esx/guest_tool.py b/tools/esx/guest_tool.py index 97b5302ba..5158d883a 100644 --- a/tools/esx/guest_tool.py +++ b/tools/esx/guest_tool.py @@ -81,28 +81,34 @@ def _bytes2int(bytes): def _parse_network_details(machine_id): """ - Parse the machine.id field to get MAC, IP, Netmask and Gateway fields - machine.id is of the form MAC;IP;Netmask;Gateway;Broadcast;DNS1,DNS2 - where ';' is the separator. + Parse the machine_id to get MAC, IP, Netmask and Gateway fields per NIC. + machine_id is of the form ('NIC_record#NIC_record#', '') + Each of the NIC will have record NIC_record in the form + 'MAC;IP;Netmask;Gateway;Broadcast;DNS' where ';' is field separator. + Each record is separated by '#' from next record. """ + logging.debug(_("Received machine_id from vmtools : %s") % machine_id[0]) network_details = [] if machine_id[1].strip() == "1": pass else: - network_info_list = machine_id[0].split(';') - assert len(network_info_list) % 6 == 0 - no_grps = len(network_info_list) / 6 - i = 0 - while i < no_grps: - k = i * 6 - network_details.append(( - network_info_list[k].strip().lower(), - network_info_list[k + 1].strip(), - network_info_list[k + 2].strip(), - network_info_list[k + 3].strip(), - network_info_list[k + 4].strip(), - network_info_list[k + 5].strip().split(','))) - i += 1 + for machine_id_str in machine_id[0].split('#'): + network_info_list = machine_id_str.split(';') + if len(network_info_list) % 6 != 0: + break + no_grps = len(network_info_list) / 6 + i = 0 + while i < no_grps: + k = i * 6 + network_details.append(( + network_info_list[k].strip().lower(), + network_info_list[k + 1].strip(), + network_info_list[k + 2].strip(), + network_info_list[k + 3].strip(), + network_info_list[k + 4].strip(), + network_info_list[k + 5].strip().split(','))) + i += 1 + logging.debug(_("NIC information from vmtools : %s") % network_details) return network_details @@ -279,6 +285,7 @@ def _filter_duplicates(all_entries): def _set_rhel_networking(network_details=None): + """Set IPv4 network settings for RHEL distros.""" network_details = network_details or [] all_dns_servers = [] for network_detail in network_details: @@ -320,31 +327,33 @@ def _set_rhel_networking(network_details=None): def _set_ubuntu_networking(network_details=None): + """Set IPv4 network settings for Ubuntu.""" network_details = network_details or [] - """ Set IPv4 network settings for Ubuntu """ all_dns_servers = [] - for network_detail in network_details: + interface_file_name = '/etc/network/interfaces' + # Remove file + os.remove(interface_file_name) + # Touch file + _execute(['touch', interface_file_name]) + interface_file = open(interface_file_name, 'w') + for device, network_detail in enumerate(network_details): mac_address, ip_address, subnet_mask, gateway, broadcast,\ dns_servers = network_detail all_dns_servers.extend(dns_servers) adapter_name, current_ip_address = \ _get_linux_adapter_name_and_ip_address(mac_address) - if adapter_name and not ip_address == current_ip_address: - interface_file_name = \ - '/etc/network/interfaces' - # Remove file - os.remove(interface_file_name) - # Touch file - _execute(['touch', interface_file_name]) - interface_file = open(interface_file_name, 'w') + if adapter_name: interface_file.write('\nauto %s' % adapter_name) interface_file.write('\niface %s inet static' % adapter_name) interface_file.write('\nbroadcast %s' % broadcast) interface_file.write('\ngateway %s' % gateway) interface_file.write('\nnetmask %s' % subnet_mask) - interface_file.write('\naddress %s' % ip_address) - interface_file.close() + interface_file.write('\naddress %s\n' % ip_address) + logging.debug(_("Successfully configured NIC %d with " + "NIC info %s") % (device, network_detail)) + interface_file.close() + if all_dns_servers: dns_file_name = "/etc/resolv.conf" os.remove(dns_file_name) @@ -355,7 +364,8 @@ def _set_ubuntu_networking(network_details=None): for dns_server in unique_entries: dns_file.write("\nnameserver %s" % dns_server) dns_file.close() - print "\nRestarting networking....\n" + + logging.debug(_("Restarting networking....\n")) _execute(['/etc/init.d/networking', 'restart']) -- cgit From d6f1a31d56de84398246498d0f2676d9741cdccf Mon Sep 17 00:00:00 2001 From: Keisuke Tagami Date: Mon, 5 Sep 2011 20:23:28 +0900 Subject: implement unit test for linux_net --- nova/db/api.py | 5 + nova/network/linux_net.py | 17 ++-- nova/tests/test_linux_net.py | 232 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 247 insertions(+), 7 deletions(-) mode change 100644 => 100755 nova/db/api.py create mode 100755 nova/tests/test_linux_net.py diff --git a/nova/db/api.py b/nova/db/api.py old mode 100644 new mode 100755 index 148887635..85e9c7669 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -532,6 +532,11 @@ def instance_get_all_by_reservation(context, reservation_id): return IMPL.instance_get_all_by_reservation(context, reservation_id) +def instance_get_all_by_network(context, network_id): + """Get all instances belonging to a network.""" + return IMPL.instance_get_all_by_network(context, network_id) + + def instance_get_by_fixed_ip(context, address): """Get an instance for a fixed ip by address.""" return IMPL.instance_get_by_fixed_ip(context, address) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 8d5a0bbbd..50d9ccf19 100755 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -514,6 +514,7 @@ def get_dhcp_hosts(context, network_ref): def get_dhcp_opts(context, network_ref): """Get network's hosts config in dhcp-opts format.""" # create a decision dictionary for default gateway for eache instance + hosts = [] default_gateway_network_node = dict() network_id = network_ref['id'] instance_refs = db.instance_get_all_by_network(context, network_id) @@ -521,9 +522,10 @@ def get_dhcp_opts(context, network_ref): for instance_ref in instance_refs: instance_id = instance_ref['id'] - # nic number is decided by ‡”Ô from this function in xxx function + # nic number is decided by xxx from this function in xxx function vifs = db.virtual_interface_get_by_instance(context, instance_id) - if not vifs: + if vifs == None: + default_gateway_network_node[instance_id] = None continue # offer a default gateway to the first virtual interface of instance first_vif = vifs[0] @@ -531,11 +533,12 @@ def get_dhcp_opts(context, network_ref): for fixed_ip_ref in ips_ref: instance_id = fixed_ip_ref['instance_id'] - target_network_id = default_gateway_network_node[instance_id] - if target_network_id == fixed_ip_ref['network_id']: - hosts.append(_host_dhcp_opts(fixed_ip_ref, gw=True)) - else: - hosts.append(_host_dhcp_opts(fixed_ip_ref, gw=None)) + if default_gateway_network_node.has_key(instance_id): + target_network_id = default_gateway_network_node[instance_id] + if target_network_id == fixed_ip_ref['network_id']: + hosts.append(_host_dhcp_opts(fixed_ip_ref, gw=True)) + else: + hosts.append(_host_dhcp_opts(fixed_ip_ref, gw=None)) return '\n'.join(hosts) diff --git a/nova/tests/test_linux_net.py b/nova/tests/test_linux_net.py new file mode 100755 index 000000000..3c3cdd0d9 --- /dev/null +++ b/nova/tests/test_linux_net.py @@ -0,0 +1,232 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 NTT +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import context +from nova import db +from nova import exception +from nova import log as logging +from nova import test +from nova import utils +from nova import flags +from nova.network import manager as network_manager + +import mox + +FLAGS = flags.FLAGS + +LOG = logging.getLogger('nova.tests.network') + + +HOST = "testhost" + +instances = [{'id': 0, + 'host': 'fake_instance00', + 'hostname': 'fake_instance00'}, + {'id': 1, + 'host': 'fake_instance01', + 'hostname': 'fake_instance01'}] + + +addresses = [{"address" : "10.0.0.1" }, + {"address" : "10.0.0.2" }, + {"address" : "10.0.0.3" }, + {"address" : "10.0.0.4" }] + + +networks = [{'id': 0, + 'uuid': "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa", + 'label': 'test0', + 'injected': False, + 'multi_host': False, + 'cidr': '192.168.0.0/24', + 'cidr_v6': '2001:db8::/64', + 'gateway_v6': '2001:db8::1', + 'netmask_v6': '64', + 'netmask': '255.255.255.0', + 'bridge': 'fa0', + 'bridge_interface': 'fake_fa0', + 'gateway': '192.168.0.1', + 'broadcast': '192.168.0.255', + 'dns1': '192.168.0.1', + 'dns2': '192.168.0.2', + 'dhcp_server' : '0.0.0.0', + 'dhcp_start' : '192.168.100.1', + 'vlan': None, + 'host': None, + 'project_id': 'fake_project', + 'vpn_public_address': '192.168.0.2'}, + {'id': 1, + 'uuid': "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb", + 'label': 'test1', + 'injected': False, + 'multi_host': False, + 'cidr': '192.168.1.0/24', + 'cidr_v6': '2001:db9::/64', + 'gateway_v6': '2001:db9::1', + 'netmask_v6': '64', + 'netmask': '255.255.255.0', + 'bridge': 'fa1', + 'bridge_interface': 'fake_fa1', + 'gateway': '192.168.1.1', + 'broadcast': '192.168.1.255', + 'dns1': '192.168.0.1', + 'dns2': '192.168.0.2', + 'dhcp_server' : '0.0.0.0', + 'dhcp_start' : '192.168.100.1', + 'vlan': None, + 'host': None, + 'project_id': 'fake_project', + 'vpn_public_address': '192.168.1.2'}] + + +fixed_ips = [{'id': 0, + 'network_id': 0, + 'address': '192.168.0.100', + 'instance_id': 0, + 'allocated': True, + 'virtual_interface_id': 0, + 'virtual_interface' : addresses[0], + 'instance': instances[0], + 'floating_ips': []}, + {'id': 1, + 'network_id': 1, + 'address': '192.168.1.100', + 'instance_id': 0, + 'allocated': True, + 'virtual_interface_id': 1, + 'virtual_interface' : addresses[1], + 'instance': instances[0], + 'floating_ips': []}, + {'id': 2, + 'network_id': 0, + 'address': '192.168.0.101', + 'instance_id': 1, + 'allocated': True, + 'virtual_interface_id': 2, + 'virtual_interface' : addresses[2], + 'instance': instances[1], + 'floating_ips': []}, + {'id': 3, + 'network_id': 1, + 'address': '192.168.1.101', + 'instance_id': 1, + 'allocated': True, + 'virtual_interface_id': 3, + 'virtual_interface' : addresses[3], + 'instance': instances[1], + 'floating_ips': []}] + + + + +vifs = [{'id': 0, + 'address': 'DE:AD:BE:EF:00:00', + 'uuid': '00000000-0000-0000-0000-0000000000000000', + 'network_id': 0, + 'network': networks[0], + 'instance_id': 0}, + {'id': 1, + 'address': 'DE:AD:BE:EF:00:01', + 'uuid': '00000000-0000-0000-0000-0000000000000001', + 'network_id': 1, + 'network': networks[1], + 'instance_id': 0}, + {'id': 2, + 'address': 'DE:AD:BE:EF:00:02', + 'uuid': '00000000-0000-0000-0000-0000000000000002', + 'network_id': 1, + 'network': networks[1], + 'instance_id': 1}, + {'id': 3, + 'address': 'DE:AD:BE:EF:00:03', + 'uuid': '00000000-0000-0000-0000-0000000000000003', + 'network_id': 0, + 'network': networks[0], + 'instance_id': 1}] + + +class LinuxNetworkTestCase(test.TestCase): + + def setUp(self): + super(LinuxNetworkTestCase, self).setUp() + network_driver = FLAGS.network_driver + self.driver = utils.import_object(network_driver) + self.driver.db = db + + def test_update_dhcp(self): + self.mox.StubOutWithMock(db, 'network_get_associated_fixed_ips') + self.mox.StubOutWithMock(db, 'instance_get_all_by_network') + self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance') + + fixed_ips[1]['instance'] = instances[0] + db.network_get_associated_fixed_ips(mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn(fixed_ips) + + db.instance_get_all_by_network(mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn(instances) + db.network_get_associated_fixed_ips(mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn(fixed_ips) + db.virtual_interface_get_by_instance(mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn([vifs[0],vifs[1]]) + db.virtual_interface_get_by_instance(mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn([vifs[2],vifs[3]]) + + self.mox.ReplayAll() + self.driver.update_dhcp(None, "eth0", networks[0]) + + + def test_get_dhcp_hosts(self): + self.mox.StubOutWithMock(db, 'network_get_associated_fixed_ips') + + fixed_ips[1]['instance'] = instances[0] + db.network_get_associated_fixed_ips(mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn(fixed_ips) + + self.mox.ReplayAll() + + hosts = self.driver.get_dhcp_hosts(None, networks[0]) + + self.assertEquals(hosts, + "10.0.0.1,fake_instance00.novalocal,192.168.0.100,net:NW-i00000000-0\n" \ + "10.0.0.2,fake_instance00.novalocal,192.168.1.100,net:NW-i00000000-1\n" \ + "10.0.0.3,fake_instance01.novalocal,192.168.0.101,net:NW-i00000001-0\n" \ + "10.0.0.4,fake_instance01.novalocal,192.168.1.101,net:NW-i00000001-1") + + + def test_get_dhcp_opts(self): + self.mox.StubOutWithMock(db, 'network_get_associated_fixed_ips') + self.mox.StubOutWithMock(db, 'instance_get_all_by_network') + self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance') + + fixed_ips[1]['instance'] = instances[0] + + db.instance_get_all_by_network(mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn(instances) + db.network_get_associated_fixed_ips(mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn(fixed_ips) + db.virtual_interface_get_by_instance(mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn([vifs[0],vifs[1]]) + db.virtual_interface_get_by_instance(mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn([vifs[2],vifs[3]]) + + self.mox.ReplayAll() + + opts = self.driver.get_dhcp_opts(None, networks[0]) + self.assertEquals(opts, '\nNW-i00000000-1,3\nNW-i00000001-0,3\n') + + + -- cgit From 418923c385a34265dd33a0c2ada3aa97a5749a06 Mon Sep 17 00:00:00 2001 From: Keisuke Tagami Date: Mon, 5 Sep 2011 21:13:00 +0900 Subject: format for pep8 --- nova/db/api.py | 4 +- nova/db/sqlalchemy/api.py | 5 +- nova/network/linux_net.py | 6 +- nova/tests/test_linux_net.py | 177 ++++++++++++++++++++++++++++++++----------- 4 files changed, 141 insertions(+), 51 deletions(-) diff --git a/nova/db/api.py b/nova/db/api.py index 85e9c7669..faac13966 100755 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -535,8 +535,8 @@ def instance_get_all_by_reservation(context, reservation_id): def instance_get_all_by_network(context, network_id): """Get all instances belonging to a network.""" return IMPL.instance_get_all_by_network(context, network_id) - - + + def instance_get_by_fixed_ip(context, address): """Get an instance for a fixed ip by address.""" return IMPL.instance_get_by_fixed_ip(context, address) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index aee1c2a55..3194a5231 100755 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1415,8 +1415,8 @@ def instance_get_all_by_reservation(context, reservation_id): filter_by(project_id=context.project_id).\ filter_by(deleted=False).\ all() - -@require_admin_context + + def instance_get_all_by_network(context, network_id): session = get_session() return session.query(models.Instance).\ @@ -1431,7 +1431,6 @@ def instance_get_all_by_network(context, network_id): all() - @require_context def instance_get_by_fixed_ip(context, address): """Return instance ref by exact match of FixedIP""" diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 50d9ccf19..4ebc3df23 100755 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -438,7 +438,7 @@ def ensure_vpn_forward(public_ip, port, private_ip): def ensure_floating_forward(floating_ip, fixed_ip): """Ensure floating ip forwarding rule.""" - for chain, rule in floating_forward_rules(floating_ip, fixed_ip): + for chain, rule in floating_forward_rules(floaing_ip, fixed_ip): iptables_manager.ipv4['nat'].add_rule(chain, rule) iptables_manager.apply() @@ -518,7 +518,7 @@ def get_dhcp_opts(context, network_ref): default_gateway_network_node = dict() network_id = network_ref['id'] instance_refs = db.instance_get_all_by_network(context, network_id) - ips_ref = db.network_get_associated_fixed_ips(context, network_id) + ips_ref = db.network_get_associated_fixed_ips(context, network_id) for instance_ref in instance_refs: instance_id = instance_ref['id'] @@ -533,7 +533,7 @@ def get_dhcp_opts(context, network_ref): for fixed_ip_ref in ips_ref: instance_id = fixed_ip_ref['instance_id'] - if default_gateway_network_node.has_key(instance_id): + if instance_id in default_gateway_network_node: target_network_id = default_gateway_network_node[instance_id] if target_network_id == fixed_ip_ref['network_id']: hosts.append(_host_dhcp_opts(fixed_ip_ref, gw=True)) diff --git a/nova/tests/test_linux_net.py b/nova/tests/test_linux_net.py index 3c3cdd0d9..94c53817b 100755 --- a/nova/tests/test_linux_net.py +++ b/nova/tests/test_linux_net.py @@ -41,10 +41,10 @@ instances = [{'id': 0, 'hostname': 'fake_instance01'}] -addresses = [{"address" : "10.0.0.1" }, - {"address" : "10.0.0.2" }, - {"address" : "10.0.0.3" }, - {"address" : "10.0.0.4" }] +addresses = [{"address": "10.0.0.1"}, + {"address": "10.0.0.2"}, + {"address": "10.0.0.3"}, + {"address": "10.0.0.4"}] networks = [{'id': 0, @@ -63,8 +63,8 @@ networks = [{'id': 0, 'broadcast': '192.168.0.255', 'dns1': '192.168.0.1', 'dns2': '192.168.0.2', - 'dhcp_server' : '0.0.0.0', - 'dhcp_start' : '192.168.100.1', + 'dhcp_server': '0.0.0.0', + 'dhcp_start': '192.168.100.1', 'vlan': None, 'host': None, 'project_id': 'fake_project', @@ -85,8 +85,8 @@ networks = [{'id': 0, 'broadcast': '192.168.1.255', 'dns1': '192.168.0.1', 'dns2': '192.168.0.2', - 'dhcp_server' : '0.0.0.0', - 'dhcp_start' : '192.168.100.1', + 'dhcp_server': '0.0.0.0', + 'dhcp_start': '192.168.100.1', 'vlan': None, 'host': None, 'project_id': 'fake_project', @@ -99,7 +99,7 @@ fixed_ips = [{'id': 0, 'instance_id': 0, 'allocated': True, 'virtual_interface_id': 0, - 'virtual_interface' : addresses[0], + 'virtual_interface': addresses[0], 'instance': instances[0], 'floating_ips': []}, {'id': 1, @@ -108,7 +108,7 @@ fixed_ips = [{'id': 0, 'instance_id': 0, 'allocated': True, 'virtual_interface_id': 1, - 'virtual_interface' : addresses[1], + 'virtual_interface': addresses[1], 'instance': instances[0], 'floating_ips': []}, {'id': 2, @@ -117,7 +117,7 @@ fixed_ips = [{'id': 0, 'instance_id': 1, 'allocated': True, 'virtual_interface_id': 2, - 'virtual_interface' : addresses[2], + 'virtual_interface': addresses[2], 'instance': instances[1], 'floating_ips': []}, {'id': 3, @@ -126,13 +126,11 @@ fixed_ips = [{'id': 0, 'instance_id': 1, 'allocated': True, 'virtual_interface_id': 3, - 'virtual_interface' : addresses[3], + 'virtual_interface': addresses[3], 'instance': instances[1], 'floating_ips': []}] - - vifs = [{'id': 0, 'address': 'DE:AD:BE:EF:00:00', 'uuid': '00000000-0000-0000-0000-0000000000000000', @@ -167,66 +165,159 @@ class LinuxNetworkTestCase(test.TestCase): self.driver = utils.import_object(network_driver) self.driver.db = db - def test_update_dhcp(self): + def test_update_dhcp_for_nw00(self): self.mox.StubOutWithMock(db, 'network_get_associated_fixed_ips') self.mox.StubOutWithMock(db, 'instance_get_all_by_network') self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance') - fixed_ips[1]['instance'] = instances[0] db.network_get_associated_fixed_ips(mox.IgnoreArg(), - mox.IgnoreArg()).AndReturn(fixed_ips) + mox.IgnoreArg())\ + .AndReturn([fixed_ips[0], + fixed_ips[3]]) db.instance_get_all_by_network(mox.IgnoreArg(), - mox.IgnoreArg()).AndReturn(instances) + mox.IgnoreArg())\ + .AndReturn(instances) db.network_get_associated_fixed_ips(mox.IgnoreArg(), - mox.IgnoreArg()).AndReturn(fixed_ips) + mox.IgnoreArg())\ + .AndReturn([fixed_ips[0], + fixed_ips[3]]) db.virtual_interface_get_by_instance(mox.IgnoreArg(), - mox.IgnoreArg()).AndReturn([vifs[0],vifs[1]]) + mox.IgnoreArg())\ + .AndReturn([vifs[0], vifs[1]]) db.virtual_interface_get_by_instance(mox.IgnoreArg(), - mox.IgnoreArg()).AndReturn([vifs[2],vifs[3]]) - + mox.IgnoreArg())\ + .AndReturn([vifs[2], vifs[3]]) self.mox.ReplayAll() + self.driver.update_dhcp(None, "eth0", networks[0]) + def test_update_dhcp_for_nw01(self): + self.mox.StubOutWithMock(db, 'network_get_associated_fixed_ips') + self.mox.StubOutWithMock(db, 'instance_get_all_by_network') + self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance') + + db.network_get_associated_fixed_ips(mox.IgnoreArg(), + mox.IgnoreArg())\ + .AndReturn([fixed_ips[1], + fixed_ips[2]]) - def test_get_dhcp_hosts(self): + db.instance_get_all_by_network(mox.IgnoreArg(), + mox.IgnoreArg())\ + .AndReturn(instances) + db.network_get_associated_fixed_ips(mox.IgnoreArg(), + mox.IgnoreArg())\ + .AndReturn([fixed_ips[1], + fixed_ips[2]]) + db.virtual_interface_get_by_instance(mox.IgnoreArg(), + mox.IgnoreArg())\ + .AndReturn([vifs[0], vifs[1]]) + db.virtual_interface_get_by_instance(mox.IgnoreArg(), + mox.IgnoreArg())\ + .AndReturn([vifs[2], vifs[3]]) + self.mox.ReplayAll() + + self.driver.update_dhcp(None, "eth0", networks[0]) + + def test_get_dhcp_hosts_for_nw00(self): self.mox.StubOutWithMock(db, 'network_get_associated_fixed_ips') - fixed_ips[1]['instance'] = instances[0] db.network_get_associated_fixed_ips(mox.IgnoreArg(), - mox.IgnoreArg()).AndReturn(fixed_ips) + mox.IgnoreArg())\ + .AndReturn([fixed_ips[0], + fixed_ips[3]]) + self.mox.ReplayAll() + + expected = \ + "10.0.0.1,fake_instance00.novalocal,"\ + "192.168.0.100,net:NW-i00000000-0\n"\ + "10.0.0.4,fake_instance01.novalocal,"\ + "192.168.1.101,net:NW-i00000001-1" + actual_hosts = self.driver.get_dhcp_hosts(None, networks[1]) + self.assertEquals(actual_hosts, expected) + + def test_get_dhcp_hosts_for_nw01(self): + self.mox.StubOutWithMock(db, 'network_get_associated_fixed_ips') + + db.network_get_associated_fixed_ips(mox.IgnoreArg(), + mox.IgnoreArg())\ + .AndReturn([fixed_ips[1], + fixed_ips[2]]) self.mox.ReplayAll() - hosts = self.driver.get_dhcp_hosts(None, networks[0]) - - self.assertEquals(hosts, - "10.0.0.1,fake_instance00.novalocal,192.168.0.100,net:NW-i00000000-0\n" \ - "10.0.0.2,fake_instance00.novalocal,192.168.1.100,net:NW-i00000000-1\n" \ - "10.0.0.3,fake_instance01.novalocal,192.168.0.101,net:NW-i00000001-0\n" \ - "10.0.0.4,fake_instance01.novalocal,192.168.1.101,net:NW-i00000001-1") + expected = \ + "10.0.0.2,fake_instance00.novalocal,"\ + "192.168.1.100,net:NW-i00000000-1\n"\ + "10.0.0.3,fake_instance01.novalocal,"\ + "192.168.0.101,net:NW-i00000001-0" + actual_hosts = self.driver.get_dhcp_hosts(None, networks[0]) + + self.assertEquals(actual_hosts, expected) - - def test_get_dhcp_opts(self): + def test_get_dhcp_opts_for_nw00(self): self.mox.StubOutWithMock(db, 'network_get_associated_fixed_ips') self.mox.StubOutWithMock(db, 'instance_get_all_by_network') self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance') - fixed_ips[1]['instance'] = instances[0] - db.instance_get_all_by_network(mox.IgnoreArg(), - mox.IgnoreArg()).AndReturn(instances) + mox.IgnoreArg())\ + .AndReturn(instances) db.network_get_associated_fixed_ips(mox.IgnoreArg(), - mox.IgnoreArg()).AndReturn(fixed_ips) + mox.IgnoreArg())\ + .AndReturn([fixed_ips[0], + fixed_ips[3]]) db.virtual_interface_get_by_instance(mox.IgnoreArg(), - mox.IgnoreArg()).AndReturn([vifs[0],vifs[1]]) + mox.IgnoreArg())\ + .AndReturn([vifs[0], + vifs[1]]) db.virtual_interface_get_by_instance(mox.IgnoreArg(), - mox.IgnoreArg()).AndReturn([vifs[2],vifs[3]]) + mox.IgnoreArg())\ + .AndReturn([vifs[2], + vifs[3]]) + self.mox.ReplayAll() + + expected_opts = '\n'\ + '' + actual_opts = self.driver.get_dhcp_opts(None, networks[0]) + self.assertEquals(actual_opts, expected_opts) + + def test_get_dhcp_opts_for_nw01(self): + self.mox.StubOutWithMock(db, 'network_get_associated_fixed_ips') + self.mox.StubOutWithMock(db, 'instance_get_all_by_network') + self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance') + + db.instance_get_all_by_network(mox.IgnoreArg(), + mox.IgnoreArg())\ + .AndReturn(instances) + db.network_get_associated_fixed_ips(mox.IgnoreArg(), + mox.IgnoreArg())\ + .AndReturn([fixed_ips[1], + fixed_ips[2]]) + db.virtual_interface_get_by_instance(mox.IgnoreArg(), + mox.IgnoreArg())\ + .AndReturn([vifs[0], + vifs[1]]) + db.virtual_interface_get_by_instance(mox.IgnoreArg(), + mox.IgnoreArg())\ + .AndReturn([vifs[2], + vifs[3]]) self.mox.ReplayAll() - opts = self.driver.get_dhcp_opts(None, networks[0]) - self.assertEquals(opts, '\nNW-i00000000-1,3\nNW-i00000001-0,3\n') + expected_opts = 'NW-i00000000-1,3\n'\ + 'NW-i00000001-0,3' + actual_opts = self.driver.get_dhcp_opts(None, networks[1]) + + self.assertEquals(actual_opts, expected_opts) + + def test_dhcp_opts_default_gateway_network(self): + expected = "" + actual = self.driver._host_dhcp_opts(fixed_ips[0], True) + self.assertEquals(actual, expected) - + def test_dhcp_opts_not_default_gateway_network(self): + expected = "NW-i00000000-0,3" + actual = self.driver._host_dhcp_opts(fixed_ips[0], False) + self.assertEquals(actual, expected) -- cgit From 14314701bac27ce0e913f8e0587c354819a4bd9e Mon Sep 17 00:00:00 2001 From: Keisuke Tagami Date: Mon, 5 Sep 2011 21:32:52 +0900 Subject: format for pep8 --- nova/tests/test_linux_net.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/tests/test_linux_net.py b/nova/tests/test_linux_net.py index 94c53817b..93d9b02c9 100755 --- a/nova/tests/test_linux_net.py +++ b/nova/tests/test_linux_net.py @@ -316,7 +316,6 @@ class LinuxNetworkTestCase(test.TestCase): actual = self.driver._host_dhcp_opts(fixed_ips[0], True) self.assertEquals(actual, expected) - def test_dhcp_opts_not_default_gateway_network(self): expected = "NW-i00000000-0,3" actual = self.driver._host_dhcp_opts(fixed_ips[0], False) -- cgit From b9ef3dcaa241088e205e170ca0a236afb0ea34dd Mon Sep 17 00:00:00 2001 From: Keisuke Tagami Date: Tue, 6 Sep 2011 10:10:25 +0900 Subject: added me to Authors --- Authors | 1 + 1 file changed, 1 insertion(+) diff --git a/Authors b/Authors index b9e7a7d23..f775c6f0c 100644 --- a/Authors +++ b/Authors @@ -60,6 +60,7 @@ Joshua McKenty Justin Santa Barbara Justin Shepherd Kei Masumoto +Keisuke Tagami masumoto Ken Pepple Kevin Bringard -- cgit From b922e08a6c15eeaab1f7ec342c00673b610d0e76 Mon Sep 17 00:00:00 2001 From: Keisuke Tagami Date: Tue, 6 Sep 2011 10:14:27 +0900 Subject: correct a method to collect instances from db add interface data to test --- nova/db/api.py | 5 --- nova/db/sqlalchemy/api.py | 14 --------- nova/network/linux_net.py | 8 +++-- nova/tests/test_linux_net.py | 74 +++++++++++++++++++++++++++++--------------- 4 files changed, 54 insertions(+), 47 deletions(-) diff --git a/nova/db/api.py b/nova/db/api.py index faac13966..148887635 100755 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -532,11 +532,6 @@ def instance_get_all_by_reservation(context, reservation_id): return IMPL.instance_get_all_by_reservation(context, reservation_id) -def instance_get_all_by_network(context, network_id): - """Get all instances belonging to a network.""" - return IMPL.instance_get_all_by_network(context, network_id) - - def instance_get_by_fixed_ip(context, address): """Get an instance for a fixed ip by address.""" return IMPL.instance_get_by_fixed_ip(context, address) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 3194a5231..b99667afc 100755 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1417,20 +1417,6 @@ def instance_get_all_by_reservation(context, reservation_id): all() -def instance_get_all_by_network(context, network_id): - session = get_session() - return session.query(models.Instance).\ - options(joinedload_all('fixed_ips.floating_ips')).\ - options(joinedload('virtual_interfaces')).\ - options(joinedload('security_groups')).\ - options(joinedload_all('fixed_ips.network')).\ - options(joinedload('metadata')).\ - options(joinedload('instance_type')).\ - filter_by(deleted=can_read_deleted(context)).\ - filter_by(network_id=network_id).\ - all() - - @require_context def instance_get_by_fixed_ip(context, address): """Return instance ref by exact match of FixedIP""" diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 4ebc3df23..c26e3574e 100755 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -517,11 +517,13 @@ def get_dhcp_opts(context, network_ref): hosts = [] default_gateway_network_node = dict() network_id = network_ref['id'] - instance_refs = db.instance_get_all_by_network(context, network_id) + instance_set = set() ips_ref = db.network_get_associated_fixed_ips(context, network_id) - for instance_ref in instance_refs: - instance_id = instance_ref['id'] + for fixed_ip_ref in ips_ref: + instance_set.add(fixed_ip_ref['instance_id']) + + for instance_id in instance_set: # nic number is decided by xxx from this function in xxx function vifs = db.virtual_interface_get_by_instance(context, instance_id) if vifs == None: diff --git a/nova/tests/test_linux_net.py b/nova/tests/test_linux_net.py index 93d9b02c9..3f44fbd05 100755 --- a/nova/tests/test_linux_net.py +++ b/nova/tests/test_linux_net.py @@ -18,10 +18,10 @@ from nova import context from nova import db from nova import exception +from nova import flags from nova import log as logging from nova import test from nova import utils -from nova import flags from nova.network import manager as network_manager import mox @@ -44,7 +44,9 @@ instances = [{'id': 0, addresses = [{"address": "10.0.0.1"}, {"address": "10.0.0.2"}, {"address": "10.0.0.3"}, - {"address": "10.0.0.4"}] + {"address": "10.0.0.4"}, + {"address": "10.0.0.5"}, + {"address": "10.0.0.6"}] networks = [{'id': 0, @@ -128,6 +130,24 @@ fixed_ips = [{'id': 0, 'virtual_interface_id': 3, 'virtual_interface': addresses[3], 'instance': instances[1], + 'floating_ips': []}, + {'id': 4, + 'network_id': 0, + 'address': '192.168.0.102', + 'instance_id': 0, + 'allocated': True, + 'virtual_interface_id': 4, + 'virtual_interface': addresses[4], + 'instance': instances[0], + 'floating_ips': []}, + {'id': 5, + 'network_id': 1, + 'address': '192.168.1.102', + 'instance_id': 1, + 'allocated': True, + 'virtual_interface_id': 5, + 'virtual_interface': addresses[5], + 'instance': instances[1], 'floating_ips': []}] @@ -154,6 +174,18 @@ vifs = [{'id': 0, 'uuid': '00000000-0000-0000-0000-0000000000000003', 'network_id': 0, 'network': networks[0], + 'instance_id': 1}, + {'id': 4, + 'address': 'DE:AD:BE:EF:00:04', + 'uuid': '00000000-0000-0000-0000-0000000000000004', + 'network_id': 0, + 'network': networks[0], + 'instance_id': 0}, + {'id': 5, + 'address': 'DE:AD:BE:EF:00:05', + 'uuid': '00000000-0000-0000-0000-0000000000000005', + 'network_id': 1, + 'network': networks[1], 'instance_id': 1}] @@ -167,7 +199,6 @@ class LinuxNetworkTestCase(test.TestCase): def test_update_dhcp_for_nw00(self): self.mox.StubOutWithMock(db, 'network_get_associated_fixed_ips') - self.mox.StubOutWithMock(db, 'instance_get_all_by_network') self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance') db.network_get_associated_fixed_ips(mox.IgnoreArg(), @@ -175,9 +206,6 @@ class LinuxNetworkTestCase(test.TestCase): .AndReturn([fixed_ips[0], fixed_ips[3]]) - db.instance_get_all_by_network(mox.IgnoreArg(), - mox.IgnoreArg())\ - .AndReturn(instances) db.network_get_associated_fixed_ips(mox.IgnoreArg(), mox.IgnoreArg())\ .AndReturn([fixed_ips[0], @@ -194,7 +222,6 @@ class LinuxNetworkTestCase(test.TestCase): def test_update_dhcp_for_nw01(self): self.mox.StubOutWithMock(db, 'network_get_associated_fixed_ips') - self.mox.StubOutWithMock(db, 'instance_get_all_by_network') self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance') db.network_get_associated_fixed_ips(mox.IgnoreArg(), @@ -202,9 +229,6 @@ class LinuxNetworkTestCase(test.TestCase): .AndReturn([fixed_ips[1], fixed_ips[2]]) - db.instance_get_all_by_network(mox.IgnoreArg(), - mox.IgnoreArg())\ - .AndReturn(instances) db.network_get_associated_fixed_ips(mox.IgnoreArg(), mox.IgnoreArg())\ .AndReturn([fixed_ips[1], @@ -257,27 +281,27 @@ class LinuxNetworkTestCase(test.TestCase): def test_get_dhcp_opts_for_nw00(self): self.mox.StubOutWithMock(db, 'network_get_associated_fixed_ips') - self.mox.StubOutWithMock(db, 'instance_get_all_by_network') self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance') - db.instance_get_all_by_network(mox.IgnoreArg(), - mox.IgnoreArg())\ - .AndReturn(instances) db.network_get_associated_fixed_ips(mox.IgnoreArg(), mox.IgnoreArg())\ .AndReturn([fixed_ips[0], - fixed_ips[3]]) + fixed_ips[3], + fixed_ips[4]]) db.virtual_interface_get_by_instance(mox.IgnoreArg(), mox.IgnoreArg())\ .AndReturn([vifs[0], - vifs[1]]) + vifs[1], + vifs[4]]) db.virtual_interface_get_by_instance(mox.IgnoreArg(), mox.IgnoreArg())\ .AndReturn([vifs[2], - vifs[3]]) + vifs[3], + vifs[5]]) self.mox.ReplayAll() expected_opts = '\n'\ + '\n'\ '' actual_opts = self.driver.get_dhcp_opts(None, networks[0]) @@ -285,28 +309,28 @@ class LinuxNetworkTestCase(test.TestCase): def test_get_dhcp_opts_for_nw01(self): self.mox.StubOutWithMock(db, 'network_get_associated_fixed_ips') - self.mox.StubOutWithMock(db, 'instance_get_all_by_network') self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance') - db.instance_get_all_by_network(mox.IgnoreArg(), - mox.IgnoreArg())\ - .AndReturn(instances) db.network_get_associated_fixed_ips(mox.IgnoreArg(), mox.IgnoreArg())\ .AndReturn([fixed_ips[1], - fixed_ips[2]]) + fixed_ips[2], + fixed_ips[5]]) db.virtual_interface_get_by_instance(mox.IgnoreArg(), mox.IgnoreArg())\ .AndReturn([vifs[0], - vifs[1]]) + vifs[1], + vifs[4]]) db.virtual_interface_get_by_instance(mox.IgnoreArg(), mox.IgnoreArg())\ .AndReturn([vifs[2], - vifs[3]]) + vifs[3], + vifs[5]]) self.mox.ReplayAll() expected_opts = 'NW-i00000000-1,3\n'\ - 'NW-i00000001-0,3' + 'NW-i00000001-0,3\n'\ + 'NW-i00000002-1,3' actual_opts = self.driver.get_dhcp_opts(None, networks[1]) self.assertEquals(actual_opts, expected_opts) -- cgit From 00c3cbe8621918fdd60f9068df2a41db52a7f76f Mon Sep 17 00:00:00 2001 From: Keisuke Tagami Date: Tue, 6 Sep 2011 10:20:28 +0900 Subject: revert codes for db --- nova/db/api.py | 0 nova/db/sqlalchemy/api.py | 0 2 files changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 nova/db/api.py mode change 100755 => 100644 nova/db/sqlalchemy/api.py diff --git a/nova/db/api.py b/nova/db/api.py old mode 100755 new mode 100644 diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py old mode 100755 new mode 100644 -- cgit From d4668c7351fab83930a6fe5ee8f67dabe8027b48 Mon Sep 17 00:00:00 2001 From: Keisuke Tagami Date: Tue, 6 Sep 2011 13:57:14 +0900 Subject: fix a mistaking of deletion in ensure_floating_forward --- nova/network/linux_net.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index c26e3574e..48438760a 100755 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -438,7 +438,7 @@ def ensure_vpn_forward(public_ip, port, private_ip): def ensure_floating_forward(floating_ip, fixed_ip): """Ensure floating ip forwarding rule.""" - for chain, rule in floating_forward_rules(floaing_ip, fixed_ip): + for chain, rule in floating_forward_rules(floating_ip, fixed_ip): iptables_manager.ipv4['nat'].add_rule(chain, rule) iptables_manager.apply() -- cgit From 6c55199d3ec900cea771e4f8077c87efa968c2f0 Mon Sep 17 00:00:00 2001 From: Keisuke Tagami Date: Tue, 6 Sep 2011 17:20:10 +0900 Subject: fix a mistaking of dataset and expected values on small test. --- nova/tests/test_linux_net.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/nova/tests/test_linux_net.py b/nova/tests/test_linux_net.py index 3f44fbd05..be6faa07b 100755 --- a/nova/tests/test_linux_net.py +++ b/nova/tests/test_linux_net.py @@ -114,7 +114,7 @@ fixed_ips = [{'id': 0, 'instance': instances[0], 'floating_ips': []}, {'id': 2, - 'network_id': 0, + 'network_id': 1, 'address': '192.168.0.101', 'instance_id': 1, 'allocated': True, @@ -123,7 +123,7 @@ fixed_ips = [{'id': 0, 'instance': instances[1], 'floating_ips': []}, {'id': 3, - 'network_id': 1, + 'network_id': 0, 'address': '192.168.1.101', 'instance_id': 1, 'allocated': True, @@ -256,7 +256,7 @@ class LinuxNetworkTestCase(test.TestCase): "10.0.0.1,fake_instance00.novalocal,"\ "192.168.0.100,net:NW-i00000000-0\n"\ "10.0.0.4,fake_instance01.novalocal,"\ - "192.168.1.101,net:NW-i00000001-1" + "192.168.1.101,net:NW-i00000001-0" actual_hosts = self.driver.get_dhcp_hosts(None, networks[1]) self.assertEquals(actual_hosts, expected) @@ -274,7 +274,7 @@ class LinuxNetworkTestCase(test.TestCase): "10.0.0.2,fake_instance00.novalocal,"\ "192.168.1.100,net:NW-i00000000-1\n"\ "10.0.0.3,fake_instance01.novalocal,"\ - "192.168.0.101,net:NW-i00000001-0" + "192.168.0.101,net:NW-i00000001-1" actual_hosts = self.driver.get_dhcp_hosts(None, networks[0]) self.assertEquals(actual_hosts, expected) @@ -301,7 +301,7 @@ class LinuxNetworkTestCase(test.TestCase): self.mox.ReplayAll() expected_opts = '\n'\ - '\n'\ + 'NW-i00000001-0,3\n'\ '' actual_opts = self.driver.get_dhcp_opts(None, networks[0]) @@ -329,8 +329,8 @@ class LinuxNetworkTestCase(test.TestCase): self.mox.ReplayAll() expected_opts = 'NW-i00000000-1,3\n'\ - 'NW-i00000001-0,3\n'\ - 'NW-i00000002-1,3' + '\n'\ + '' actual_opts = self.driver.get_dhcp_opts(None, networks[1]) self.assertEquals(actual_opts, expected_opts) -- cgit From c87009f009cb749dbccd6bd155ec1a6800631ae1 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Tue, 6 Sep 2011 09:24:24 -0400 Subject: cleaning up tests --- nova/image/glance.py | 2 +- nova/tests/image/test_glance.py | 549 ++++++++++++++++++---------------------- 2 files changed, 250 insertions(+), 301 deletions(-) diff --git a/nova/image/glance.py b/nova/image/glance.py index f0d39dfb6..8920a5d8a 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -280,7 +280,7 @@ class GlanceImageService(object): if hasattr(context, 'auth_token') and context.auth_token: return True - properties = image_meta['properties'] + properties = image_meta.get('properties', {}) if context.project_id and ('project_id' in properties): return str(properties['project_id']) == str(context.project_id) diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py index e4324d24b..f77117ca0 100644 --- a/nova/tests/image/test_glance.py +++ b/nova/tests/image/test_glance.py @@ -18,20 +18,18 @@ import datetime import stubout -import unittest from nova.tests.api.openstack import fakes from nova import context from nova import exception from nova.image import glance from nova import test -from nova import utils class StubGlanceClient(object): - def __init__(self, images, add_response=None, update_response=None): - self.images = images + def __init__(self, images=None, add_response=None, update_response=None): + self.images = images or [] self.add_response = add_response self.update_response = update_response @@ -39,28 +37,51 @@ class StubGlanceClient(object): pass def get_image_meta(self, image_id): - return self.images[image_id] + for image in self.images: + if image['id'] == image_id: + return image + raise exception.ImageNotFound(image_id=image_id) - def get_images_detailed(self, filters=None, marker=None, limit=None): - images = self.images.values() + #TODO(bcwaldon): implement filters + def get_images_detailed(self, filters=None, marker=None, limit=3): if marker is None: index = 0 else: - for index, image in enumerate(images): + for index, image in enumerate(self.images): if image['id'] == marker: index += 1 break - # default to a page size of 3 to ensure we flex the pagination code - return images[index:index + 3] + + return self.images[index:index + limit] def get_image(self, image_id): - return self.images[image_id], [] + return self.get_image_meta(image_id), [] def add_image(self, metadata, data): - return self.add_response + self.images.append(metadata) + + try: + image_id = int(metadata['id']) + except KeyError: + # auto-generate an id if one wasn't provided + image_id = len(self.images) + metadata['id'] = image_id + + return metadata def update_image(self, image_id, metadata, data): - return self.update_response + for i, image in enumerate(self.images): + if image['id'] == image_id: + self.images[i].update(metadata) + return self.images[i] + raise exception.ImageNotFound(image_id=image_id) + + def delete_image(self, image_id): + for i, image in enumerate(self.images): + if image['id'] == image_id: + del self.images[i] + return + raise exception.ImageNotFound(image_id=image_id) class NullWriter(object): @@ -70,230 +91,7 @@ class NullWriter(object): pass -class BaseGlanceTest(unittest.TestCase): - NOW_GLANCE_OLD_FORMAT = "2010-10-11T10:30:22" - NOW_GLANCE_FORMAT = "2010-10-11T10:30:22.000000" - NOW_DATETIME = datetime.datetime(2010, 10, 11, 10, 30, 22) - - def setUp(self): - self.client = StubGlanceClient(None) - self.service = glance.GlanceImageService(client=self.client) - self.context = context.RequestContext(None, None) - - def assertDateTimesFilled(self, image_meta): - self.assertEqual(image_meta['created_at'], self.NOW_DATETIME) - self.assertEqual(image_meta['updated_at'], self.NOW_DATETIME) - self.assertEqual(image_meta['deleted_at'], self.NOW_DATETIME) - - def assertDateTimesEmpty(self, image_meta): - self.assertEqual(image_meta['updated_at'], None) - self.assertEqual(image_meta['deleted_at'], None) - - def assertDateTimesBlank(self, image_meta): - self.assertEqual(image_meta['updated_at'], '') - self.assertEqual(image_meta['deleted_at'], '') - - -class TestGlanceImageServiceProperties(BaseGlanceTest): - """ - Ensure attributes which aren't base attributes are ignored. - - Missingattributes should be added as None - - """ - - def test_show_passes_through_to_client(self): - fixtures = {'image1': {'id': '1', 'name': 'image1', 'is_public': True, - 'foo': 'bar', - 'properties': {'prop1': 'propvalue1'}}} - self.client.images = fixtures - image_meta = self.service.show(self.context, 'image1') - - expected = {'id': '1', 'name': 'image1', 'is_public': True, - 'size': None, 'location': None, 'disk_format': None, - 'container_format': None, 'checksum': None, - 'created_at': None, 'updated_at': None, - 'deleted_at': None, 'deleted': None, 'status': None, - 'properties': {'prop1': 'propvalue1'}} - self.assertEqual(image_meta, expected) - - def test_show_raises_when_no_authtoken_in_the_context(self): - fixtures = {'image1': {'name': 'image1', 'is_public': False, - 'foo': 'bar', - 'properties': {'prop1': 'propvalue1'}}} - self.client.images = fixtures - self.context.auth_token = False - - expected = {'name': 'image1', 'is_public': True, - 'properties': {'prop1': 'propvalue1', 'foo': 'bar'}} - self.assertRaises(exception.ImageNotFound, - self.service.show, self.context, 'image1') - - def test_show_passes_through_to_client_with_authtoken_in_context(self): - fixtures = {'image1': {'name': 'image1', 'is_public': False, - 'foo': 'bar', - 'properties': {'prop1': 'propvalue1'}}} - self.client.images = fixtures - self.context.auth_token = True - - expected = {'name': 'image1', 'is_public': False, - 'properties': {'prop1': 'propvalue1', 'foo': 'bar'}} - - image_meta = self.service.show(self.context, 'image1') - self.assertEqual(image_meta, expected) - - def test_detail_passes_through_to_client(self): - fixtures = {'image1': {'id': '1', 'name': 'image1', 'is_public': True, - 'foo': 'bar', - 'properties': {'prop1': 'propvalue1'}}} - self.client.images = fixtures - image_meta = self.service.detail(self.context) - expected = [{'id': '1', 'name': 'image1', 'is_public': True, - 'size': None, 'location': None, 'disk_format': None, - 'container_format': None, 'checksum': None, - 'created_at': None, 'updated_at': None, - 'deleted_at': None, 'deleted': None, 'status': None, - 'properties': {'prop1': 'propvalue1'}}] - self.assertEqual(image_meta, expected) - - -class TestGetterDateTimeNoneTests(BaseGlanceTest): - - def test_show_handles_none_datetimes(self): - self.client.images = self._make_none_datetime_fixtures() - image_meta = self.service.show(self.context, 'image1') - self.assertDateTimesEmpty(image_meta) - - def test_show_handles_blank_datetimes(self): - self.client.images = self._make_blank_datetime_fixtures() - image_meta = self.service.show(self.context, 'image1') - self.assertDateTimesBlank(image_meta) - - def test_detail_handles_none_datetimes(self): - self.client.images = self._make_none_datetime_fixtures() - image_meta = self.service.detail(self.context)[0] - self.assertDateTimesEmpty(image_meta) - - def test_detail_handles_blank_datetimes(self): - self.client.images = self._make_blank_datetime_fixtures() - image_meta = self.service.detail(self.context)[0] - self.assertDateTimesBlank(image_meta) - - def test_get_handles_none_datetimes(self): - self.client.images = self._make_none_datetime_fixtures() - writer = NullWriter() - image_meta = self.service.get(self.context, 'image1', writer) - self.assertDateTimesEmpty(image_meta) - - def test_get_handles_blank_datetimes(self): - self.client.images = self._make_blank_datetime_fixtures() - writer = NullWriter() - image_meta = self.service.get(self.context, 'image1', writer) - self.assertDateTimesBlank(image_meta) - - def test_show_makes_datetimes(self): - self.client.images = self._make_datetime_fixtures() - image_meta = self.service.show(self.context, 'image1') - self.assertDateTimesFilled(image_meta) - image_meta = self.service.show(self.context, 'image2') - self.assertDateTimesFilled(image_meta) - - def test_detail_makes_datetimes(self): - self.client.images = self._make_datetime_fixtures() - image_meta = self.service.detail(self.context)[0] - self.assertDateTimesFilled(image_meta) - image_meta = self.service.detail(self.context)[1] - self.assertDateTimesFilled(image_meta) - - def test_get_makes_datetimes(self): - self.client.images = self._make_datetime_fixtures() - writer = NullWriter() - image_meta = self.service.get(self.context, 'image1', writer) - self.assertDateTimesFilled(image_meta) - image_meta = self.service.get(self.context, 'image2', writer) - self.assertDateTimesFilled(image_meta) - - def _make_datetime_fixtures(self): - fixtures = { - 'image1': { - 'id': '1', - 'name': 'image1', - 'is_public': True, - 'created_at': self.NOW_GLANCE_FORMAT, - 'updated_at': self.NOW_GLANCE_FORMAT, - 'deleted_at': self.NOW_GLANCE_FORMAT, - }, - 'image2': { - 'id': '2', - 'name': 'image2', - 'is_public': True, - 'created_at': self.NOW_GLANCE_OLD_FORMAT, - 'updated_at': self.NOW_GLANCE_OLD_FORMAT, - 'deleted_at': self.NOW_GLANCE_OLD_FORMAT, - }, - } - return fixtures - - def _make_none_datetime_fixtures(self): - fixtures = {'image1': {'id': '1', - 'name': 'image1', - 'is_public': True, - 'updated_at': None, - 'deleted_at': None}} - return fixtures - - def _make_blank_datetime_fixtures(self): - fixtures = {'image1': {'id': '1', - 'name': 'image1', - 'is_public': True, - 'updated_at': '', - 'deleted_at': ''}} - return fixtures - - -class TestMutatorDateTimeTests(BaseGlanceTest): - """Tests create(), update()""" - - def test_create_handles_datetimes(self): - self.client.add_response = self._make_datetime_fixture() - image_meta = self.service.create(self.context, {}) - self.assertDateTimesFilled(image_meta) - - def test_create_handles_none_datetimes(self): - self.client.add_response = self._make_none_datetime_fixture() - dummy_meta = {} - image_meta = self.service.create(self.context, dummy_meta) - self.assertDateTimesEmpty(image_meta) - - def test_update_handles_datetimes(self): - self.client.images = {'image1': self._make_datetime_fixture()} - self.client.update_response = self._make_datetime_fixture() - dummy_meta = {} - image_meta = self.service.update(self.context, 'image1', dummy_meta) - self.assertDateTimesFilled(image_meta) - - def test_update_handles_none_datetimes(self): - self.client.images = {'image1': self._make_datetime_fixture()} - self.client.update_response = self._make_none_datetime_fixture() - dummy_meta = {} - image_meta = self.service.update(self.context, 'image1', dummy_meta) - self.assertDateTimesEmpty(image_meta) - - def _make_datetime_fixture(self): - fixture = {'id': 'image1', 'name': 'image1', 'is_public': True, - 'created_at': self.NOW_GLANCE_FORMAT, - 'updated_at': self.NOW_GLANCE_FORMAT, - 'deleted_at': self.NOW_GLANCE_FORMAT} - return fixture - - def _make_none_datetime_fixture(self): - fixture = {'id': 'image1', 'name': 'image1', 'is_public': True, - 'updated_at': None, - 'deleted_at': None} - return fixture - - -class TestGlanceSerializer(unittest.TestCase): +class TestGlanceSerializer(test.TestCase): def test_serialize(self): metadata = {'name': 'image1', 'is_public': True, @@ -329,7 +127,7 @@ class TestGlanceSerializer(unittest.TestCase): self.assertEqual(glance._convert_from_string(converted), metadata) -class GlanceImageServiceTest(test.TestCase): +class TestGlanceImageService(test.TestCase): """ Tests the Glance image service. @@ -342,34 +140,74 @@ class GlanceImageServiceTest(test.TestCase): APIs (OpenStack, EC2) """ - def __init__(self, *args, **kwargs): - super(GlanceImageServiceTest, self).__init__(*args, **kwargs) - self.service = None - self.context = None - - @staticmethod - def _make_fixture(name): - fixture = {'name': name, - 'properties': {'one': 'two'}, - 'status': None, - 'is_public': True} - return fixture + NOW_GLANCE_OLD_FORMAT = "2010-10-11T10:30:22" + NOW_GLANCE_FORMAT = "2010-10-11T10:30:22.000000" + NOW_DATETIME = datetime.datetime(2010, 10, 11, 10, 30, 22) def setUp(self): - super(GlanceImageServiceTest, self).setUp() + super(TestGlanceImageService, self).setUp() self.stubs = stubout.StubOutForTesting() fakes.stub_out_glance(self.stubs) fakes.stub_out_compute_api_snapshot(self.stubs) - service_class = 'nova.image.glance.GlanceImageService' - self.service = utils.import_object(service_class) - self.context = context.RequestContext('fake', 'fake') + self.client = StubGlanceClient() + self.service = glance.GlanceImageService(client=self.client) + self.context = context.RequestContext('fake', 'fake', auth_token=True) self.service.delete_all() - self.sent_to_glance = {} - fakes.stub_out_glance_add_image(self.stubs, self.sent_to_glance) def tearDown(self): self.stubs.UnsetAll() - super(GlanceImageServiceTest, self).tearDown() + super(TestGlanceImageService, self).tearDown() + + + @staticmethod + def _make_fixture(**kwargs): + fixture = {'name': None, + 'properties': {}, + 'status': None, + 'is_public': None} + fixture.update(kwargs) + return fixture + + def _make_datetime_fixtures(self): + return [ + { + 'id': '1', + 'name': 'image1', + 'is_public': True, + 'created_at': self.NOW_GLANCE_FORMAT, + 'updated_at': self.NOW_GLANCE_FORMAT, + 'deleted_at': self.NOW_GLANCE_FORMAT, + }, + { + 'id': '2', + 'name': 'image2', + 'is_public': True, + 'created_at': self.NOW_GLANCE_OLD_FORMAT, + 'updated_at': self.NOW_GLANCE_OLD_FORMAT, + 'deleted_at': self.NOW_GLANCE_OLD_FORMAT, + }, + ] + + def _make_datetime_fixture(self): + return self._make_fixture(created_at=self.NOW_GLANCE_FORMAT, + updated_at=self.NOW_GLANCE_FORMAT, + deleted_at=self.NOW_GLANCE_FORMAT) + + def _make_none_datetime_fixture(self): + return self._make_fixture(updated_at=None, deleted_at=None) + + def assertDateTimesFilled(self, image_meta): + self.assertEqual(image_meta['created_at'], self.NOW_DATETIME) + self.assertEqual(image_meta['updated_at'], self.NOW_DATETIME) + self.assertEqual(image_meta['deleted_at'], self.NOW_DATETIME) + + def assertDateTimesEmpty(self, image_meta): + self.assertEqual(image_meta['updated_at'], None) + self.assertEqual(image_meta['deleted_at'], None) + + def assertDateTimesBlank(self, image_meta): + self.assertEqual(image_meta['updated_at'], '') + self.assertEqual(image_meta['deleted_at'], '') def test_create_with_instance_id(self): """Ensure instance_id is persisted as an image-property""" @@ -378,8 +216,6 @@ class GlanceImageServiceTest(test.TestCase): 'properties': {'instance_id': '42', 'user_id': 'fake'}} image_id = self.service.create(self.context, fixture)['id'] - expected = fixture - self.assertDictMatch(self.sent_to_glance['metadata'], expected) image_meta = self.service.show(self.context, image_id) expected = {'id': image_id, 'name': 'test image', 'is_public': False, @@ -399,16 +235,21 @@ class GlanceImageServiceTest(test.TestCase): instance_id. Public images are an example of an image not tied to an instance. """ - fixture = {'name': 'test image'} + fixture = {'name': 'test image', 'is_public': False} image_id = self.service.create(self.context, fixture)['id'] - expected = {'name': 'test image'} - self.assertDictMatch(self.sent_to_glance['metadata'], expected) + expected = {'id': image_id, 'name': 'test image', 'is_public': False, + 'size': None, 'location': None, 'disk_format': None, + 'container_format': None, 'checksum': None, + 'created_at': None, 'updated_at': None, + 'deleted_at': None, 'deleted': None, 'status': None, + 'properties': {}} + actual = self.service.show(self.context, image_id) + self.assertDictMatch(actual, expected) def test_create(self): - fixture = self._make_fixture('test image') + fixture = self._make_fixture(name='test image') num_images = len(self.service.index(self.context)) - image_id = self.service.create(self.context, fixture)['id'] self.assertNotEquals(None, image_id) @@ -416,9 +257,7 @@ class GlanceImageServiceTest(test.TestCase): len(self.service.index(self.context))) def test_create_and_show_non_existing_image(self): - fixture = self._make_fixture('test image') - num_images = len(self.service.index(self.context)) - + fixture = self._make_fixture(name='test image') image_id = self.service.create(self.context, fixture)['id'] self.assertNotEquals(None, image_id) @@ -428,9 +267,7 @@ class GlanceImageServiceTest(test.TestCase): 'bad image id') def test_create_and_show_non_existing_image_by_name(self): - fixture = self._make_fixture('test image') - num_images = len(self.service.index(self.context)) - + fixture = self._make_fixture(name='test image') image_id = self.service.create(self.context, fixture)['id'] self.assertNotEquals(None, image_id) @@ -440,17 +277,17 @@ class GlanceImageServiceTest(test.TestCase): 'bad image id') def test_index(self): - fixture = self._make_fixture('test image') + fixture = self._make_fixture(name='test image') image_id = self.service.create(self.context, fixture)['id'] image_metas = self.service.index(self.context) - expected = [{'id': 'DONTCARE', 'name': 'test image'}] + expected = [{'id': image_id, 'name': 'test image'}] self.assertDictListMatch(image_metas, expected) def test_index_default_limit(self): fixtures = [] ids = [] for i in range(10): - fixture = self._make_fixture('TestImage %d' % (i)) + fixture = self._make_fixture(name='TestImage %d' % (i)) fixtures.append(fixture) ids.append(self.service.create(self.context, fixture)['id']) @@ -466,7 +303,7 @@ class GlanceImageServiceTest(test.TestCase): fixtures = [] ids = [] for i in range(10): - fixture = self._make_fixture('TestImage %d' % (i)) + fixture = self._make_fixture(name='TestImage %d' % (i)) fixtures.append(fixture) ids.append(self.service.create(self.context, fixture)['id']) @@ -483,18 +320,18 @@ class GlanceImageServiceTest(test.TestCase): fixtures = [] ids = [] for i in range(10): - fixture = self._make_fixture('TestImage %d' % (i)) + fixture = self._make_fixture(name='TestImage %d' % (i)) fixtures.append(fixture) ids.append(self.service.create(self.context, fixture)['id']) - image_metas = self.service.index(self.context, limit=3) - self.assertEquals(len(image_metas), 3) + image_metas = self.service.index(self.context, limit=5) + self.assertEquals(len(image_metas), 5) def test_index_marker_and_limit(self): fixtures = [] ids = [] for i in range(10): - fixture = self._make_fixture('TestImage %d' % (i)) + fixture = self._make_fixture(name='TestImage %d' % (i)) fixtures.append(fixture) ids.append(self.service.create(self.context, fixture)['id']) @@ -502,7 +339,7 @@ class GlanceImageServiceTest(test.TestCase): self.assertEquals(len(image_metas), 1) i = 4 for meta in image_metas: - expected = {'id': 'DONTCARE', + expected = {'id': ids[i], 'name': 'TestImage %d' % (i)} self.assertDictMatch(meta, expected) i = i + 1 @@ -511,7 +348,7 @@ class GlanceImageServiceTest(test.TestCase): fixtures = [] ids = [] for i in range(10): - fixture = self._make_fixture('TestImage %d' % (i)) + fixture = self._make_fixture(name='TestImage %d' % (i)) fixtures.append(fixture) ids.append(self.service.create(self.context, fixture)['id']) @@ -520,11 +357,11 @@ class GlanceImageServiceTest(test.TestCase): i = 2 for meta in image_metas: expected = { - 'id': 'DONTCARE', + 'id': ids[i], 'status': None, - 'is_public': True, + 'is_public': None, 'name': 'TestImage %d' % (i), - 'properties': {'one': 'two'}, + 'properties': {}, 'size': None, 'location': None, 'disk_format': None, @@ -536,8 +373,6 @@ class GlanceImageServiceTest(test.TestCase): 'deleted': None } - print meta - print expected self.assertDictMatch(meta, expected) i = i + 1 @@ -545,31 +380,31 @@ class GlanceImageServiceTest(test.TestCase): fixtures = [] ids = [] for i in range(10): - fixture = self._make_fixture('TestImage %d' % (i)) + fixture = self._make_fixture(name='TestImage %d' % (i)) fixtures.append(fixture) ids.append(self.service.create(self.context, fixture)['id']) - image_metas = self.service.detail(self.context, limit=3) - self.assertEquals(len(image_metas), 3) + image_metas = self.service.detail(self.context, limit=5) + self.assertEquals(len(image_metas), 5) def test_detail_marker_and_limit(self): fixtures = [] ids = [] for i in range(10): - fixture = self._make_fixture('TestImage %d' % (i)) + fixture = self._make_fixture(name='TestImage %d' % (i)) fixtures.append(fixture) ids.append(self.service.create(self.context, fixture)['id']) - image_metas = self.service.detail(self.context, marker=ids[3], limit=3) - self.assertEquals(len(image_metas), 3) + image_metas = self.service.detail(self.context, marker=ids[3], limit=5) + self.assertEquals(len(image_metas), 5) i = 4 for meta in image_metas: expected = { - 'id': 'DONTCARE', + 'id': ids[i], 'status': None, - 'is_public': True, + 'is_public': None, 'name': 'TestImage %d' % (i), - 'properties': {'one': 'two'}, + 'properties': {}, 'size': None, 'location': None, 'disk_format': None, @@ -584,18 +419,17 @@ class GlanceImageServiceTest(test.TestCase): i = i + 1 def test_update(self): - fixture = self._make_fixture('test image') + fixture = self._make_fixture(name='test image') image_id = self.service.create(self.context, fixture)['id'] - fixture['status'] = 'in progress' - + fixture['name'] = 'new image name' self.service.update(self.context, image_id, fixture) new_image_data = self.service.show(self.context, image_id) - self.assertEquals('in progress', new_image_data['status']) + self.assertEquals('new image name', new_image_data['name']) def test_delete(self): - fixture1 = self._make_fixture('test image 1') - fixture2 = self._make_fixture('test image 2') + fixture1 = self._make_fixture(name='test image 1') + fixture2 = self._make_fixture(name='test image 2') fixtures = [fixture1, fixture2] num_images = len(self.service.index(self.context)) @@ -613,3 +447,118 @@ class GlanceImageServiceTest(test.TestCase): num_images = len(self.service.index(self.context)) self.assertEquals(1, num_images) + + def test_show_passes_through_to_client(self): + fixture = self._make_fixture(name='image1', is_public=True) + image_id = self.service.create(self.context, fixture)['id'] + + image_meta = self.service.show(self.context, image_id) + expected = {'id': image_id, 'name': 'image1', 'is_public': True, + 'size': None, 'location': None, 'disk_format': None, + 'container_format': None, 'checksum': None, + 'created_at': None, 'updated_at': None, + 'deleted_at': None, 'deleted': None, 'status': None, + 'properties': {}} + self.assertEqual(image_meta, expected) + + def test_show_raises_when_no_authtoken_in_the_context(self): + fixture = self._make_fixture(name='image1', + is_public=False, + properties={'one': 'two'}) + image_id = self.service.create(self.context, fixture)['id'] + self.context.auth_token = False + self.assertRaises(exception.ImageNotFound, + self.service.show, + self.context, + image_id) + + def test_detail_passes_through_to_client(self): + fixture = self._make_fixture(name='image10', is_public=True) + image_id = self.service.create(self.context, fixture)['id'] + image_metas = self.service.detail(self.context) + expected = [{'id': image_id, 'name': 'image10', 'is_public': True, + 'size': None, 'location': None, 'disk_format': None, + 'container_format': None, 'checksum': None, + 'created_at': None, 'updated_at': None, + 'deleted_at': None, 'deleted': None, 'status': None, + 'properties': {}}] + self.assertEqual(image_metas, expected) + + def test_show_handles_none_datetimes(self): + fixture = self._make_fixture(updated_at=None, deleted_at=None) + image_id = self.service.create(self.context, fixture)['id'] + image_meta = self.service.show(self.context, image_id) + self.assertDateTimesEmpty(image_meta) + + def test_show_handles_blank_datetimes(self): + fixture = self._make_fixture(updated_at='', deleted_at='') + image_id = self.service.create(self.context, fixture)['id'] + image_meta = self.service.show(self.context, image_id) + self.assertDateTimesBlank(image_meta) + + def test_detail_handles_none_datetimes(self): + fixture = self._make_fixture(updated_at=None, deleted_at=None) + self.service.create(self.context, fixture) + image_meta = self.service.detail(self.context)[0] + self.assertDateTimesEmpty(image_meta) + + def test_detail_handles_blank_datetimes(self): + fixture = self._make_fixture(updated_at='', deleted_at='') + self.service.create(self.context, fixture) + image_meta = self.service.detail(self.context)[0] + self.assertDateTimesBlank(image_meta) + + def test_get_handles_none_datetimes(self): + fixture = self._make_fixture(updated_at=None, deleted_at=None) + image_id = self.service.create(self.context, fixture)['id'] + writer = NullWriter() + image_meta = self.service.get(self.context, image_id, writer) + self.assertDateTimesEmpty(image_meta) + + def test_get_handles_blank_datetimes(self): + fixture = self._make_fixture(updated_at='', deleted_at='') + image_id = self.service.create(self.context, fixture)['id'] + writer = NullWriter() + image_meta = self.service.get(self.context, image_id, writer) + self.assertDateTimesBlank(image_meta) + + def test_show_makes_datetimes(self): + fixture = self._make_datetime_fixture() + image_id = self.service.create(self.context, fixture)['id'] + image_meta = self.service.show(self.context, image_id) + self.assertDateTimesFilled(image_meta) + + def test_detail_makes_datetimes(self): + fixture = self._make_datetime_fixture() + self.service.create(self.context, fixture) + image_meta = self.service.detail(self.context)[0] + self.assertDateTimesFilled(image_meta) + + def test_get_makes_datetimes(self): + fixture = self._make_datetime_fixture() + image_id = self.service.create(self.context, fixture)['id'] + writer = NullWriter() + image_meta = self.service.get(self.context, image_id, writer) + self.assertDateTimesFilled(image_meta) + + def test_create_handles_datetimes(self): + fixture = self._make_datetime_fixture() + image_meta = self.service.create(self.context, fixture) + self.assertDateTimesFilled(image_meta) + + def test_create_handles_none_datetimes(self): + fixture = self._make_none_datetime_fixture() + image_meta = self.service.create(self.context, fixture) + self.assertDateTimesEmpty(image_meta) + + def test_update_handles_datetimes(self): + fixture = self._make_datetime_fixture() + image_id = self.service.create(self.context, fixture)['id'] + image_meta = self.service.update(self.context, image_id, {}) + self.assertDateTimesFilled(image_meta) + + def test_update_handles_none_datetimes(self): + fixture = self._make_none_datetime_fixture() + image_id = self.service.create(self.context, fixture)['id'] + image_meta = self.service.update(self.context, image_id, {}) + self.assertDateTimesEmpty(image_meta) -- cgit From 35e0ae794f6cd5fda47c4795da34f9f57f52614f Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Tue, 6 Sep 2011 15:16:40 -0400 Subject: further cleanup --- nova/api/openstack/image_metadata.py | 58 +++-- nova/image/glance.py | 44 ++-- nova/tests/api/openstack/fakes.py | 125 +++------- nova/tests/api/openstack/test_image_metadata.py | 163 +++++------- nova/tests/api/openstack/test_images.py | 313 ++++++++++++------------ nova/tests/glance/stubs.py | 69 ++++++ nova/tests/image/test_glance.py | 269 ++++++-------------- nova/tests/test_xenapi.py | 2 +- 8 files changed, 461 insertions(+), 582 deletions(-) diff --git a/nova/api/openstack/image_metadata.py b/nova/api/openstack/image_metadata.py index 4d615ea96..adb6bee4b 100644 --- a/nova/api/openstack/image_metadata.py +++ b/nova/api/openstack/image_metadata.py @@ -17,6 +17,7 @@ from webob import exc +from nova import exception from nova import flags from nova import image from nova import utils @@ -33,21 +34,22 @@ class Controller(object): def __init__(self): self.image_service = image.get_default_image_service() - def _get_metadata(self, context, image_id, image=None): - if not image: - image = self.image_service.show(context, image_id) - metadata = image.get('properties', {}) - return metadata + def _get_image(self, context, image_id): + try: + return self.image_service.show(context, image_id) + except exception.NotFound: + msg = _("Image not found.") + raise exc.HTTPNotFound(explanation=msg) def index(self, req, image_id): """Returns the list of metadata for a given instance""" context = req.environ['nova.context'] - metadata = self._get_metadata(context, image_id) + metadata = self._get_image(context, image_id)['properties'] return dict(metadata=metadata) def show(self, req, image_id, id): context = req.environ['nova.context'] - metadata = self._get_metadata(context, image_id) + metadata = self._get_image(context, image_id)['properties'] if id in metadata: return {'meta': {id: metadata[id]}} else: @@ -55,15 +57,13 @@ class Controller(object): def create(self, req, image_id, body): context = req.environ['nova.context'] - img = self.image_service.show(context, image_id) - metadata = self._get_metadata(context, image_id, img) + image = self._get_image(context, image_id) if 'metadata' in body: for key, value in body['metadata'].iteritems(): - metadata[key] = value - common.check_img_metadata_quota_limit(context, metadata) - img['properties'] = metadata - self.image_service.update(context, image_id, img, None) - return dict(metadata=metadata) + image['properties'][key] = value + common.check_img_metadata_quota_limit(context, image['properties']) + self.image_service.update(context, image_id, image, None) + return dict(metadata=image['properties']) def update(self, req, image_id, id, body): context = req.environ['nova.context'] @@ -80,32 +80,30 @@ class Controller(object): if len(meta) > 1: expl = _('Request body contains too many items') raise exc.HTTPBadRequest(explanation=expl) - img = self.image_service.show(context, image_id) - metadata = self._get_metadata(context, image_id, img) - metadata[id] = meta[id] - common.check_img_metadata_quota_limit(context, metadata) - img['properties'] = metadata - self.image_service.update(context, image_id, img, None) + + image = self._get_image(context, image_id) + image['properties'][id] = meta[id] + common.check_img_metadata_quota_limit(context, image['properties']) + self.image_service.update(context, image_id, image, None) return dict(meta=meta) def update_all(self, req, image_id, body): context = req.environ['nova.context'] - img = self.image_service.show(context, image_id) + image = self._get_image(context, image_id) metadata = body.get('metadata', {}) common.check_img_metadata_quota_limit(context, metadata) - img['properties'] = metadata - self.image_service.update(context, image_id, img, None) + image['properties'] = metadata + self.image_service.update(context, image_id, image, None) return dict(metadata=metadata) def delete(self, req, image_id, id): context = req.environ['nova.context'] - img = self.image_service.show(context, image_id) - metadata = self._get_metadata(context, image_id) - if not id in metadata: - raise exc.HTTPNotFound() - metadata.pop(id) - img['properties'] = metadata - self.image_service.update(context, image_id, img, None) + image = self._get_image(context, image_id) + if not id in image['properties']: + msg = _("Invalid metadata key") + raise exc.HTTPNotFound(explanation=msg) + image['properties'].pop(id) + self.image_service.update(context, image_id, image, None) def create_resource(): diff --git a/nova/image/glance.py b/nova/image/glance.py index 8920a5d8a..b73a95276 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -104,7 +104,7 @@ class GlanceImageService(object): images = [] for image_meta in image_metas: if self._is_image_available(context, image_meta): - base_image_meta = self._translate_to_base(image_meta) + base_image_meta = self._translate_from_glance(image_meta) images.append(base_image_meta) return images @@ -169,7 +169,7 @@ class GlanceImageService(object): if not self._is_image_available(context, image_meta): raise exception.ImageNotFound(image_id=image_id) - base_image_meta = self._translate_to_base(image_meta) + base_image_meta = self._translate_from_glance(image_meta) return base_image_meta def show_by_name(self, context, name): @@ -193,7 +193,7 @@ class GlanceImageService(object): for chunk in image_chunks: data.write(chunk) - base_image_meta = self._translate_to_base(image_meta) + base_image_meta = self._translate_from_glance(image_meta) return base_image_meta def create(self, context, image_meta, data=None): @@ -206,7 +206,7 @@ class GlanceImageService(object): # Translate Base -> Service LOG.debug(_('Creating image in Glance. Metadata passed in %s'), image_meta) - sent_service_image_meta = self._translate_to_service(image_meta) + sent_service_image_meta = self._translate_to_glance(image_meta) LOG.debug(_('Metadata after formatting for Glance %s'), sent_service_image_meta) @@ -214,7 +214,7 @@ class GlanceImageService(object): sent_service_image_meta, data) # Translate Service -> Base - base_image_meta = self._translate_to_base(recv_service_image_meta) + base_image_meta = self._translate_from_glance(recv_service_image_meta) LOG.debug(_('Metadata returned from Glance formatted for Base %s'), base_image_meta) return base_image_meta @@ -228,13 +228,13 @@ class GlanceImageService(object): self._set_client_context(context) # NOTE(vish): show is to check if image is available self.show(context, image_id) - image_meta = _convert_to_string(image_meta) + image_meta = self._translate_to_glance(image_meta) try: image_meta = self.client.update_image(image_id, image_meta, data) except glance_exception.NotFound: raise exception.ImageNotFound(image_id=image_id) - base_image_meta = self._translate_to_base(image_meta) + base_image_meta = self._translate_from_glance(image_meta) return base_image_meta def delete(self, context, image_id): @@ -257,13 +257,13 @@ class GlanceImageService(object): pass @classmethod - def _translate_to_service(cls, image_meta): + def _translate_to_glance(cls, image_meta): image_meta = _convert_to_string(image_meta) + image_meta = _remove_read_only(image_meta) return image_meta @classmethod - def _translate_to_base(cls, image_meta): - """Override translation to handle conversion to datetime objects.""" + def _translate_from_glance(cls, image_meta): image_meta = _limit_attributes(image_meta) image_meta = _convert_timestamps_to_datetimes(image_meta) image_meta = _convert_from_string(image_meta) @@ -277,20 +277,7 @@ class GlanceImageService(object): an auth_token. """ - if hasattr(context, 'auth_token') and context.auth_token: - return True - - properties = image_meta.get('properties', {}) - - if context.project_id and ('project_id' in properties): - return str(properties['project_id']) == str(context.project_id) - - try: - user_id = properties['user_id'] - except KeyError: - return False - - return str(user_id) == str(context.user_id) + return hasattr(context, 'auth_token') and context.auth_token # utility functions @@ -365,3 +352,12 @@ def _limit_attributes(image_meta): output['properties'] = image_meta.get('properties', {}) return output + + +def _remove_read_only(image_meta): + IMAGE_ATTRIBUTES = ['updated_at', 'created_at', 'deleted_at'] + output = copy.deepcopy(image_meta) + for attr in IMAGE_ATTRIBUTES: + if attr in output: + del output[attr] + return output diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 44681d395..9faeeaa8a 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -42,6 +42,7 @@ import nova.image.fake from nova.image import glance from nova.image import service from nova.tests import fake_flags +from nova.tests.glance import stubs as glance_stubs class Context(object): @@ -83,7 +84,7 @@ def wsgi_app(inner_app10=None, inner_app11=None, fake_auth=True, if fake_auth_context is not None: ctxt = fake_auth_context else: - ctxt = context.RequestContext('fake', 'fake') + ctxt = context.RequestContext('fake', 'fake', auth_token=True) api10 = openstack.FaultWrapper(api_auth.InjectContext(ctxt, limits.RateLimitingMiddleware(inner_app10))) api11 = openstack.FaultWrapper(api_auth.InjectContext(ctxt, @@ -177,6 +178,38 @@ def stub_out_compute_api_backup(stubs): stubs.Set(nova.compute.API, 'backup', backup) +def _make_image_fixtures(): + NOW_GLANCE_FORMAT = "2010-10-11T10:30:22" + + image_id = 123 + base_attrs = {'deleted': False} + + fixtures = [] + + def add_fixture(**kwargs): + kwargs.update(base_attrs) + fixtures.append(kwargs) + + # Public image + add_fixture(id=image_id, name='public image', is_public=True, + status='active', properties={'key1': 'value1'}) + image_id += 1 + + # Snapshot for User 1 + server_ref = 'http://localhost/v1.1/servers/42' + snapshot_properties = {'instance_ref': server_ref, 'user_id': 'fake'} + for status in ('queued', 'saving', 'active', 'killed'): + add_fixture(id=image_id, name='%s snapshot' % status, + is_public=False, status=status, + properties=snapshot_properties) + image_id += 1 + + # Image without a name + add_fixture(id=image_id, is_public=True, status='active', properties={}) + + return fixtures + + def stub_out_glance_add_image(stubs, sent_to_glance): """ We return the metadata sent to glance by modifying the sent_to_glance dict @@ -192,91 +225,11 @@ def stub_out_glance_add_image(stubs, sent_to_glance): stubs.Set(glance_client.Client, 'add_image', fake_add_image) -def stub_out_glance(stubs, initial_fixtures=None): - - class FakeGlanceClient: - - def __init__(self, initial_fixtures): - self.fixtures = initial_fixtures or [] - - def _filter_images(self, filters=None, marker=None, limit=None): - found = True - if marker: - found = False - if limit == 0: - limit = None - - fixtures = [] - count = 0 - for f in self.fixtures: - if limit and count >= limit: - break - if found: - fixtures.append(f) - count = count + 1 - if f['id'] == marker: - found = True - - return fixtures - - def fake_get_images(self, filters=None, marker=None, limit=None): - fixtures = self._filter_images(filters, marker, limit) - return [dict(id=f['id'], name=f['name']) - for f in fixtures] - - def fake_get_images_detailed(self, filters=None, - marker=None, limit=None): - return self._filter_images(filters, marker, limit) - - def fake_get_image_meta(self, image_id): - image = self._find_image(image_id) - if image: - return copy.deepcopy(image) - raise glance_exc.NotFound - - def fake_add_image(self, image_meta, data=None): - image_meta = copy.deepcopy(image_meta) - image_id = ''.join(random.choice(string.letters) - for _ in range(20)) - image_meta['id'] = image_id - self.fixtures.append(image_meta) - return copy.deepcopy(image_meta) - - def fake_update_image(self, image_id, image_meta, data=None): - for attr in ('created_at', 'updated_at', 'deleted_at', 'deleted'): - if attr in image_meta: - del image_meta[attr] - - f = self._find_image(image_id) - if not f: - raise glance_exc.NotFound - - f.update(image_meta) - return copy.deepcopy(f) - - def fake_delete_image(self, image_id): - f = self._find_image(image_id) - if not f: - raise glance_exc.NotFound - - self.fixtures.remove(f) - - def _find_image(self, image_id): - for f in self.fixtures: - if str(f['id']) == str(image_id): - return f - return None - - GlanceClient = glance_client.Client - fake = FakeGlanceClient(initial_fixtures) - - stubs.Set(GlanceClient, 'get_images', fake.fake_get_images) - stubs.Set(GlanceClient, 'get_images_detailed', - fake.fake_get_images_detailed) - stubs.Set(GlanceClient, 'get_image_meta', fake.fake_get_image_meta) - stubs.Set(GlanceClient, 'add_image', fake.fake_add_image) - stubs.Set(GlanceClient, 'update_image', fake.fake_update_image) - stubs.Set(GlanceClient, 'delete_image', fake.fake_delete_image) +def stub_out_glance(stubs): + def fake_get_image_service(): + client = glance_stubs.StubGlanceClient(_make_image_fixtures()) + return nova.image.glance.GlanceImageService(client) + stubs.Set(nova.image, 'get_default_image_service', fake_get_image_service) class FakeToken(object): diff --git a/nova/tests/api/openstack/test_image_metadata.py b/nova/tests/api/openstack/test_image_metadata.py index fe42e35e5..314c3c38e 100644 --- a/nova/tests/api/openstack/test_image_metadata.py +++ b/nova/tests/api/openstack/test_image_metadata.py @@ -23,7 +23,6 @@ from nova import flags from nova.api import openstack from nova import test from nova.tests.api.openstack import fakes -import nova.wsgi FLAGS = flags.FLAGS @@ -31,76 +30,20 @@ FLAGS = flags.FLAGS class ImageMetaDataTest(test.TestCase): - IMAGE_FIXTURES = [ - {'status': 'active', - 'name': 'image1', - 'deleted': False, - 'container_format': None, - 'checksum': None, - 'created_at': '2011-03-22T17:40:15', - 'disk_format': None, - 'updated_at': '2011-03-22T17:40:15', - 'id': '1', - 'location': 'file:///var/lib/glance/images/1', - 'is_public': True, - 'deleted_at': None, - 'properties': { - 'key1': 'value1', - 'key2': 'value2'}, - 'size': 5882349}, - {'status': 'active', - 'name': 'image2', - 'deleted': False, - 'container_format': None, - 'checksum': None, - 'created_at': '2011-03-22T17:40:15', - 'disk_format': None, - 'updated_at': '2011-03-22T17:40:15', - 'id': '2', - 'location': 'file:///var/lib/glance/images/2', - 'is_public': True, - 'deleted_at': None, - 'properties': { - 'key1': 'value1', - 'key2': 'value2'}, - 'size': 5882349}, - {'status': 'active', - 'name': 'image3', - 'deleted': False, - 'container_format': None, - 'checksum': None, - 'created_at': '2011-03-22T17:40:15', - 'disk_format': None, - 'updated_at': '2011-03-22T17:40:15', - 'id': '3', - 'location': 'file:///var/lib/glance/images/2', - 'is_public': True, - 'deleted_at': None, - 'properties': {}, - 'size': 5882349}, - ] - def setUp(self): super(ImageMetaDataTest, self).setUp() - self.flags(image_service='nova.image.glance.GlanceImageService') - # NOTE(dprince) max out properties/metadata in image 3 for testing - img3 = self.IMAGE_FIXTURES[2] - for num in range(FLAGS.quota_metadata_items): - img3['properties']['key%i' % num] = "blah" - fakes.stub_out_glance(self.stubs, self.IMAGE_FIXTURES) + fakes.stub_out_glance(self.stubs) def test_index(self): - req = webob.Request.blank('/v1.1/123/images/1/metadata') + req = webob.Request.blank('/v1.1/123/images/123/metadata') res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(200, res.status_int) - expected = self.IMAGE_FIXTURES[0]['properties'] - self.assertEqual(len(expected), len(res_dict['metadata'])) - for (key, value) in res_dict['metadata'].items(): - self.assertEqual(value, res_dict['metadata'][key]) + expected = {'metadata': {'key1': 'value1'}} + self.assertEqual(res_dict, expected) def test_show(self): - req = webob.Request.blank('/v1.1/fake/images/1/metadata/key1') + req = webob.Request.blank('/v1.1/fake/images/123/metadata/key1') res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(200, res.status_int) @@ -109,32 +52,38 @@ class ImageMetaDataTest(test.TestCase): self.assertEqual('value1', res_dict['meta']['key1']) def test_show_not_found(self): - req = webob.Request.blank('/v1.1/fake/images/1/metadata/key9') + req = webob.Request.blank('/v1.1/fake/images/123/metadata/key9') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(404, res.status_int) + + def test_show_image_not_found(self): + req = webob.Request.blank('/v1.1/fake/images/100/metadata/key1') res = req.get_response(fakes.wsgi_app()) self.assertEqual(404, res.status_int) def test_create(self): - req = webob.Request.blank('/v1.1/fake/images/2/metadata') + req = webob.Request.blank('/v1.1/fake/images/123/metadata') req.method = 'POST' - req.body = '{"metadata": {"key9": "value9"}}' + req.body = '{"metadata": {"key7": "value7"}}' req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(200, res.status_int) actual_output = json.loads(res.body) + expected_output = {'metadata': {'key1': 'value1', 'key7': 'value7'}} + self.assertEqual(expected_output, actual_output) - expected_output = { - 'metadata': { - 'key1': 'value1', - 'key2': 'value2', - 'key9': 'value9', - }, - } + def test_create_image_not_found(self): + req = webob.Request.blank('/v1.1/fake/images/100/metadata') + req.method = 'POST' + req.body = '{"metadata": {"key7": "value7"}}' + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) - self.assertEqual(expected_output, actual_output) + self.assertEqual(404, res.status_int) def test_update_all(self): - req = webob.Request.blank('/v1.1/fake/images/1/metadata') + req = webob.Request.blank('/v1.1/fake/images/123/metadata') req.method = 'PUT' req.body = '{"metadata": {"key9": "value9"}}' req.headers["content-type"] = "application/json" @@ -142,17 +91,20 @@ class ImageMetaDataTest(test.TestCase): self.assertEqual(200, res.status_int) actual_output = json.loads(res.body) + expected_output = {'metadata': {'key9': 'value9'}} + self.assertEqual(expected_output, actual_output) - expected_output = { - 'metadata': { - 'key9': 'value9', - }, - } + def test_update_all_image_not_found(self): + req = webob.Request.blank('/v1.1/fake/images/100/metadata') + req.method = 'PUT' + req.body = '{"metadata": {"key9": "value9"}}' + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) - self.assertEqual(expected_output, actual_output) + self.assertEqual(404, res.status_int) def test_update_item(self): - req = webob.Request.blank('/v1.1/fake/images/1/metadata/key1') + req = webob.Request.blank('/v1.1/fake/images/123/metadata/key1') req.method = 'PUT' req.body = '{"meta": {"key1": "zz"}}' req.headers["content-type"] = "application/json" @@ -160,15 +112,20 @@ class ImageMetaDataTest(test.TestCase): self.assertEqual(200, res.status_int) actual_output = json.loads(res.body) - expected_output = { - 'meta': { - 'key1': 'zz', - }, - } + expected_output = {'meta': {'key1': 'zz'}} self.assertEqual(actual_output, expected_output) + def test_update_item_image_not_found(self): + req = webob.Request.blank('/v1.1/fake/images/100/metadata/key1') + req.method = 'PUT' + req.body = '{"meta": {"key1": "zz"}}' + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(404, res.status_int) + def test_update_item_bad_body(self): - req = webob.Request.blank('/v1.1/fake/images/1/metadata/key1') + req = webob.Request.blank('/v1.1/fake/images/123/metadata/key1') req.method = 'PUT' req.body = '{"key1": "zz"}' req.headers["content-type"] = "application/json" @@ -176,15 +133,18 @@ class ImageMetaDataTest(test.TestCase): self.assertEqual(400, res.status_int) def test_update_item_too_many_keys(self): - req = webob.Request.blank('/v1.1/fake/images/1/metadata/key1') + req = webob.Request.blank('/v1.1/fake/images/123/metadata/key1') req.method = 'PUT' - req.body = '{"meta": {"key1": "value1", "key2": "value2"}}' + overload = {} + for num in range(FLAGS.quota_metadata_items + 1): + overload['key%s' % num] = 'value%s' % num + req.body = json.dumps({'meta': overload}) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(400, res.status_int) def test_update_item_body_uri_mismatch(self): - req = webob.Request.blank('/v1.1/fake/images/1/metadata/bad') + req = webob.Request.blank('/v1.1/fake/images/123/metadata/bad') req.method = 'PUT' req.body = '{"meta": {"key1": "value1"}}' req.headers["content-type"] = "application/json" @@ -192,7 +152,7 @@ class ImageMetaDataTest(test.TestCase): self.assertEqual(400, res.status_int) def test_update_item_xml(self): - req = webob.Request.blank('/v1.1/fake/images/1/metadata/key1') + req = webob.Request.blank('/v1.1/fake/images/123/metadata/key1') req.method = 'PUT' req.body = 'five' req.headers["content-type"] = "application/xml" @@ -200,22 +160,24 @@ class ImageMetaDataTest(test.TestCase): self.assertEqual(200, res.status_int) actual_output = json.loads(res.body) - expected_output = { - 'meta': { - 'key1': 'five', - }, - } + expected_output = {'meta': {'key1': 'five'}} self.assertEqual(actual_output, expected_output) def test_delete(self): - req = webob.Request.blank('/v1.1/fake/images/2/metadata/key1') + req = webob.Request.blank('/v1.1/fake/images/123/metadata/key1') req.method = 'DELETE' res = req.get_response(fakes.wsgi_app()) self.assertEqual(204, res.status_int) self.assertEqual('', res.body) def test_delete_not_found(self): - req = webob.Request.blank('/v1.1/fake/images/2/metadata/blah') + req = webob.Request.blank('/v1.1/fake/images/123/metadata/blah') + req.method = 'DELETE' + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(404, res.status_int) + + def test_delete_image_not_found(self): + req = webob.Request.blank('/v1.1/fake/images/100/metadata/key1') req.method = 'DELETE' res = req.get_response(fakes.wsgi_app()) self.assertEqual(404, res.status_int) @@ -225,7 +187,7 @@ class ImageMetaDataTest(test.TestCase): for num in range(FLAGS.quota_metadata_items + 1): data['metadata']['key%i' % num] = "blah" json_string = str(data).replace("\'", "\"") - req = webob.Request.blank('/v1.1/fake/images/2/metadata') + req = webob.Request.blank('/v1.1/fake/images/123/metadata') req.method = 'POST' req.body = json_string req.headers["content-type"] = "application/json" @@ -233,7 +195,8 @@ class ImageMetaDataTest(test.TestCase): self.assertEqual(413, res.status_int) def test_too_many_metadata_items_on_put(self): - req = webob.Request.blank('/v1.1/fake/images/3/metadata/blah') + FLAGS.quota_metadata_items = 1 + req = webob.Request.blank('/v1.1/fake/images/123/metadata/blah') req.method = 'PUT' req.body = '{"meta": {"blah": "blah"}}' req.headers["content-type"] = "application/json" diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 942bab5c7..298b7d831 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -29,31 +29,30 @@ import stubout import webob from nova import context -from nova import test import nova.api.openstack from nova.api.openstack import images +from nova import test from nova.tests.api.openstack import fakes +NOW_API_FORMAT = "2010-10-11T10:30:22Z" + + class ImagesTest(test.TestCase): """ Test of the OpenStack API /images application controller w/Glance. """ - NOW_GLANCE_FORMAT = "2010-10-11T10:30:22" - NOW_API_FORMAT = "2010-10-11T10:30:22Z" def setUp(self): """Run before each test.""" super(ImagesTest, self).setUp() - self.flags(image_service='nova.image.glance.GlanceImageService') self.stubs = stubout.StubOutForTesting() fakes.stub_out_networking(self.stubs) fakes.stub_out_rate_limiting(self.stubs) fakes.stub_out_key_pair_funcs(self.stubs) - self.fixtures = self._make_image_fixtures() - fakes.stub_out_glance(self.stubs, initial_fixtures=self.fixtures) fakes.stub_out_compute_api_snapshot(self.stubs) fakes.stub_out_compute_api_backup(self.stubs) + fakes.stub_out_glance(self.stubs) def tearDown(self): """Run after each test.""" @@ -63,36 +62,30 @@ class ImagesTest(test.TestCase): def _get_fake_context(self): class Context(object): project_id = 'fake' + auth_token = True return Context() - def _applicable_fixture(self, fixture, user_id): - """Determine if this fixture is applicable for given user id.""" - is_public = fixture["is_public"] - try: - uid = fixture["properties"]["user_id"] - except KeyError: - uid = None - return uid == user_id or is_public - def test_get_image_index(self): request = webob.Request.blank('/v1.0/images') - response = request.get_response(fakes.wsgi_app()) + app = fakes.wsgi_app(fake_auth_context=self._get_fake_context()) + response = request.get_response(app) response_dict = json.loads(response.body) response_list = response_dict["images"] - expected = [{'id': 123, 'name': 'public image'}, - {'id': 124, 'name': 'queued snapshot'}, - {'id': 125, 'name': 'saving snapshot'}, - {'id': 126, 'name': 'active snapshot'}, - {'id': 127, 'name': 'killed snapshot'}, - {'id': 129, 'name': None}] + expected = [{'id': '123', 'name': 'public image'}, + {'id': '124', 'name': 'queued snapshot'}, + {'id': '125', 'name': 'saving snapshot'}, + {'id': '126', 'name': 'active snapshot'}, + {'id': '127', 'name': 'killed snapshot'}, + {'id': '128', 'name': None}] self.assertDictListMatch(response_list, expected) def test_get_image(self): request = webob.Request.blank('/v1.0/images/123') - response = request.get_response(fakes.wsgi_app()) + app = fakes.wsgi_app(fake_auth_context=self._get_fake_context()) + response = request.get_response(app) self.assertEqual(200, response.status_int) @@ -100,20 +93,21 @@ class ImagesTest(test.TestCase): expected_image = { "image": { - "id": 123, + "id": "123", "name": "public image", - "updated": self.NOW_API_FORMAT, - "created": self.NOW_API_FORMAT, + "updated": NOW_API_FORMAT, + "created": NOW_API_FORMAT, "status": "ACTIVE", "progress": 100, }, } - self.assertEqual(expected_image, actual_image) + self.assertDictMatch(expected_image, actual_image) def test_get_image_v1_1(self): request = webob.Request.blank('/v1.1/fake/images/124') - response = request.get_response(fakes.wsgi_app()) + app = fakes.wsgi_app(fake_auth_context=self._get_fake_context()) + response = request.get_response(app) actual_image = json.loads(response.body) @@ -124,10 +118,10 @@ class ImagesTest(test.TestCase): expected_image = { "image": { - "id": 124, + "id": "124", "name": "queued snapshot", - "updated": self.NOW_API_FORMAT, - "created": self.NOW_API_FORMAT, + "updated": NOW_API_FORMAT, + "created": NOW_API_FORMAT, "status": "QUEUED", "progress": 0, 'server': { @@ -161,11 +155,12 @@ class ImagesTest(test.TestCase): def test_get_image_xml(self): request = webob.Request.blank('/v1.0/images/123') request.accept = "application/xml" - response = request.get_response(fakes.wsgi_app()) + app = fakes.wsgi_app(fake_auth_context=self._get_fake_context()) + response = request.get_response(app) actual_image = minidom.parseString(response.body.replace(" ", "")) - expected_now = self.NOW_API_FORMAT + expected_now = NOW_API_FORMAT expected_image = minidom.parseString(""" Date: Tue, 6 Sep 2011 15:19:37 -0400 Subject: Fixes an issue where 'invalid literal for int' would occur when listing images after making a v1.1 server snapshot (with a UUID). v1.1 image id's are now treated as strings (not integer ID's). The v1.0 API still tries to treat image id's as integers but doesn't fail miserably if they are uuid's either. This should pave the way for image ID's as uuids and more closely matches the v1.1 spec with regards to images and the server refs they contain. --- nova/api/openstack/common.py | 24 ++------- nova/api/openstack/views/images.py | 10 ++++ nova/tests/api/openstack/test_common.py | 40 ++++++++++---- nova/tests/api/openstack/test_images.py | 95 +++++++++++++++++++++++++-------- 4 files changed, 118 insertions(+), 51 deletions(-) diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index d743a66ef..dba3ec8e9 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -185,30 +185,16 @@ def limited_by_marker(items, request, max_limit=FLAGS.osapi_max_limit): def get_id_from_href(href): - """Return the id portion of a url as an int. + """Return the id or uuid portion of a url. Given: 'http://www.foo.com/bar/123?q=4' - Returns: 123 + Returns: '123' - In order to support local hrefs, the href argument can be just an id: - Given: '123' - Returns: 123 + Given: 'http://www.foo.com/bar/abc123?q=4' + Returns: 'abc123' """ - LOG.debug(_("Attempting to treat %(href)s as an integer ID.") % locals()) - - try: - return int(href) - except ValueError: - pass - - LOG.debug(_("Attempting to treat %(href)s as a URL.") % locals()) - - try: - return int(urlparse.urlsplit(href).path.split('/')[-1]) - except ValueError as error: - LOG.debug(_("Failed to parse ID from %(href)s: %(error)s") % locals()) - raise + return urlparse.urlsplit("%s" % href).path.split('/')[-1] def remove_version_from_href(href): diff --git a/nova/api/openstack/views/images.py b/nova/api/openstack/views/images.py index 21f1b2d3e..20c99124b 100644 --- a/nova/api/openstack/views/images.py +++ b/nova/api/openstack/views/images.py @@ -70,6 +70,7 @@ class ViewBuilder(object): } self._build_server(image, image_obj) + self._build_image_id(image, image_obj) if detail: image.update({ @@ -95,6 +96,12 @@ class ViewBuilderV10(ViewBuilder): except (KeyError, ValueError): pass + def _build_image_id(self, image, image_obj): + try: + image['id'] = int(image_obj['id']) + except ValueError: + pass + class ViewBuilderV11(ViewBuilder): """OpenStack API v1.1 Image Builder""" @@ -118,6 +125,9 @@ class ViewBuilderV11(ViewBuilder): except KeyError: return + def _build_image_id(self, image, image_obj): + image['id'] = "%s" % image_obj['id'] + def generate_href(self, image_id): """Return an href string pointing to this object.""" return os.path.join(self.base_url, self.project_id, diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py index b422bc4d1..f519ea72b 100644 --- a/nova/tests/api/openstack/test_common.py +++ b/nova/tests/api/openstack/test_common.py @@ -237,21 +237,41 @@ class MiscFunctionsTest(test.TestCase): common.remove_version_from_href, fixture) - def test_get_id_from_href(self): + def test_get_id_from_href_with_int_url(self): fixture = 'http://www.testsite.com/dir/45' actual = common.get_id_from_href(fixture) - expected = 45 + expected = '45' self.assertEqual(actual, expected) - def test_get_id_from_href_bad_request(self): - fixture = 'http://45' - self.assertRaises(ValueError, - common.get_id_from_href, - fixture) + def test_get_id_from_href_with_int(self): + fixture = '45' + actual = common.get_id_from_href(fixture) + expected = '45' + self.assertEqual(actual, expected) - def test_get_id_from_href_int(self): - fixture = 1 - self.assertEqual(fixture, common.get_id_from_href(fixture)) + def test_get_id_from_href_with_int_url_query(self): + fixture = 'http://www.testsite.com/dir/45?asdf=jkl' + actual = common.get_id_from_href(fixture) + expected = '45' + self.assertEqual(actual, expected) + + def test_get_id_from_href_with_uuid_url(self): + fixture = 'http://www.testsite.com/dir/abc123' + actual = common.get_id_from_href(fixture) + expected = "abc123" + self.assertEqual(actual, expected) + + def test_get_id_from_href_with_uuid_url_query(self): + fixture = 'http://www.testsite.com/dir/abc123?asdf=jkl' + actual = common.get_id_from_href(fixture) + expected = "abc123" + self.assertEqual(actual, expected) + + def test_get_id_from_href_with_uuid(self): + fixture = 'abc123' + actual = common.get_id_from_href(fixture) + expected = 'abc123' + self.assertEqual(actual, expected) def test_get_version_from_href(self): fixture = 'http://www.testsite.com/v1.1/images' diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 2a7cfc382..6448e9986 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -365,7 +365,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): {'id': 125, 'name': 'saving snapshot'}, {'id': 126, 'name': 'active snapshot'}, {'id': 127, 'name': 'killed snapshot'}, - {'id': 129, 'name': None}] + {'id': 128, 'name': 'active UUID snapshot'}, + {'id': 130, 'name': None}] self.assertDictListMatch(response_list, expected) @@ -403,14 +404,14 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): expected_image = { "image": { - "id": 124, + "id": '124', "name": "queued snapshot", "updated": self.NOW_API_FORMAT, "created": self.NOW_API_FORMAT, "status": "QUEUED", "progress": 0, 'server': { - 'id': 42, + 'id': '42', "links": [{ "rel": "self", "href": server_href, @@ -458,7 +459,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): self.assertEqual(expected_image.toxml(), actual_image.toxml()) def test_get_image_xml_no_name(self): - request = webob.Request.blank('/v1.0/images/129') + request = webob.Request.blank('/v1.0/images/130') request.accept = "application/xml" response = request.get_response(fakes.wsgi_app()) @@ -466,7 +467,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): expected_now = self.NOW_API_FORMAT expected_image = minidom.parseString(""" - Date: Tue, 6 Sep 2011 14:26:02 -0500 Subject: Set flat_injected to False by default. --- nova/network/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index e6b30d1a0..6120137ce 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -74,7 +74,7 @@ flags.DEFINE_string('flat_network_bridge', None, 'Bridge for simple network instances') flags.DEFINE_string('flat_network_dns', '8.8.4.4', 'Dns for simple network') -flags.DEFINE_bool('flat_injected', True, +flags.DEFINE_bool('flat_injected', False, 'Whether to attempt to inject network setup into guest') flags.DEFINE_string('flat_interface', None, 'FlatDhcp will bridge into this interface if set') -- cgit From 27b052c1aa2b5ae191c2e2986788c152eef9c221 Mon Sep 17 00:00:00 2001 From: "Kevin L. Mitchell" Date: Tue, 6 Sep 2011 14:36:58 -0500 Subject: novaclient v1_0 has an ipgroups argument, but novaclient v1_1 doesn't --- nova/scheduler/abstract_scheduler.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/scheduler/abstract_scheduler.py b/nova/scheduler/abstract_scheduler.py index 7f17b642f..be6267d3a 100644 --- a/nova/scheduler/abstract_scheduler.py +++ b/nova/scheduler/abstract_scheduler.py @@ -110,7 +110,6 @@ class AbstractScheduler(driver.Scheduler): flavor_id = instance_type['flavorid'] reservation_id = instance_properties['reservation_id'] files = kwargs['injected_files'] - ipgroup = None # Not supported in OS API ... yet child_zone = zone_info['child_zone'] child_blob = zone_info['child_blob'] zone = db.zone_get(context, child_zone) @@ -124,8 +123,9 @@ class AbstractScheduler(driver.Scheduler): except novaclient_exceptions.BadRequest, e: raise exception.NotAuthorized(_("Bad credentials attempting " "to talk to zone at %(url)s.") % locals()) - nova.servers.create(name, image_ref, flavor_id, ipgroup, meta, files, - child_blob, reservation_id=reservation_id) + nova.servers.create(name, image_ref, flavor_id, + meta=meta, files=files, zone_blob=child_blob, + reservation_id=reservation_id) def _provision_resource_from_blob(self, context, build_plan_item, instance_id, request_spec, kwargs): -- cgit From 1f3856ffb92ab690b1d630deb6fa025ae74348f7 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 6 Sep 2011 12:48:41 -0700 Subject: revert changes to display description --- nova/api/openstack/create_instance_helper.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index 9b2928bc8..29e071609 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -161,10 +161,6 @@ class CreateInstanceHelper(object): 'config_drive': config_drive, 'password': password} - # NOTE(vish): This is solely for compatibility with anything - # that is using the display description field. - metadata = server_dict.get('metadata') or {} - display_description = metadata.get('description') or name return (extra_values, create_method(context, inst_type, @@ -172,9 +168,9 @@ class CreateInstanceHelper(object): kernel_id=kernel_id, ramdisk_id=ramdisk_id, display_name=name, - display_description=display_description, + display_description=name, key_name=key_name, - metadata=metadata, + metadata=server_dict.get('metadata', {}), access_ip_v4=server_dict.get('accessIPv4'), access_ip_v6=server_dict.get('accessIPv6'), injected_files=injected_files, -- cgit From 9b2885076d2ed438fb3189b8528f5bec6a2cda4d Mon Sep 17 00:00:00 2001 From: "Kevin L. Mitchell" Date: Tue, 6 Sep 2011 15:02:04 -0500 Subject: Fix a misspelling of "exception" --- nova/network/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index e6b30d1a0..050cec250 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -448,7 +448,7 @@ class NetworkManager(manager.SchedulerDependentManager): try: fixed_ips = kwargs.get('fixed_ips') or \ self.db.fixed_ip_get_by_instance(context, instance_id) - except exceptions.FixedIpNotFoundForInstance: + except exception.FixedIpNotFoundForInstance: fixed_ips = [] LOG.debug(_("network deallocation for instance |%s|"), instance_id, context=context) -- cgit From 0f1db545f1a3c8b016cd7bcf21813019262b9b7a Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Tue, 6 Sep 2011 16:28:34 -0400 Subject: reverting xenapi change --- nova/image/glance.py | 1 - nova/image/s3.py | 1 - nova/tests/api/openstack/fakes.py | 1 - nova/tests/test_xenapi.py | 2 +- 4 files changed, 1 insertion(+), 4 deletions(-) diff --git a/nova/image/glance.py b/nova/image/glance.py index b73a95276..25af8baa1 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -30,7 +30,6 @@ from nova import exception from nova import flags from nova import log as logging from nova import utils -from nova.image import service LOG = logging.getLogger('nova.image.glance') diff --git a/nova/image/s3.py b/nova/image/s3.py index 1e74dd433..343555887 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -34,7 +34,6 @@ from nova import flags from nova import image from nova import log as logging from nova import utils -from nova.image import service from nova.api.ec2 import ec2utils diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 9faeeaa8a..c1b5f884c 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -40,7 +40,6 @@ from nova.api.openstack import limits from nova.auth.manager import User, Project import nova.image.fake from nova.image import glance -from nova.image import service from nova.tests import fake_flags from nova.tests.glance import stubs as glance_stubs diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 27a8a979d..45dad3516 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -745,7 +745,7 @@ class XenAPIMigrateInstance(test.TestCase): fake_utils.stub_out_utils_execute(self.stubs) stubs.stub_out_migration_methods(self.stubs) stubs.stubout_get_this_vm_uuid(self.stubs) - #glance_stubs.stubout_glance_client(self.stubs) + glance_stubs.stubout_glance_client(self.stubs) def test_migrate_disk_and_power_off(self): instance = db.instance_create(self.context, self.values) -- cgit From 57aff93284bff196b37d0deb995ff46d4708bbda Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Tue, 6 Sep 2011 17:38:48 -0400 Subject: reverting change to GlanceImageService._is_image_available --- nova/image/glance.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/nova/image/glance.py b/nova/image/glance.py index 25af8baa1..804d27fbf 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -276,7 +276,23 @@ class GlanceImageService(object): an auth_token. """ - return hasattr(context, 'auth_token') and context.auth_token + if hasattr(context, 'auth_token') and context.auth_token: + return True + + if image_meta['is_public'] or context.is_admin: + return True + + properties = image_meta['properties'] + + if context.project_id and ('project_id' in properties): + return str(properties['project_id']) == str(context.project_id) + + try: + user_id = properties['user_id'] + except KeyError: + return False + + return str(user_id) == str(context.user_id) # utility functions -- cgit From 41f3d157c917255683ae23313704f357e061911c Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Tue, 6 Sep 2011 16:41:35 -0500 Subject: Fixed unit test. --- nova/tests/test_xenapi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 45dad3516..91b4161b0 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -494,6 +494,7 @@ class XenAPIVMTestCase(test.TestCase): self.check_vm_params_for_linux_with_external_kernel() def test_spawn_netinject_file(self): + self.flags(flat_injected=True) db_fakes.stub_out_db_instance_api(self.stubs, injected=True) self._tee_executed = False @@ -611,7 +612,6 @@ class XenAPIVMTestCase(test.TestCase): str(3 * 1024)) def test_rescue(self): - self.flags(flat_injected=False) instance = self._create_instance() conn = xenapi_conn.get_connection(False) conn.rescue(self.context, instance, None, []) -- cgit From 53357516226a1c00217c742eb512b7efc0f574b2 Mon Sep 17 00:00:00 2001 From: Tushar Patil Date: Tue, 6 Sep 2011 18:01:57 -0700 Subject: Added use_single_default_gateway to switch from multiple default gateways to single default gateway --- nova/network/linux_net.py | 72 ++++++++++++++++++++------------------------ nova/tests/test_linux_net.py | 17 +++-------- 2 files changed, 38 insertions(+), 51 deletions(-) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 48438760a..affdde701 100755 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -68,6 +68,9 @@ flags.DEFINE_string('linuxnet_interface_driver', 'Driver used to create ethernet devices.') flags.DEFINE_string('linuxnet_ovs_integration_bridge', 'br-int', 'Name of Open vSwitch bridge used with linuxnet') +flags.DEFINE_bool('use_single_default_gateway', + False, 'Use single default gateway. Only first nic of vm' + ' will get default gateway from dhcp server') binary_name = os.path.basename(inspect.stack()[-1][1]) @@ -513,35 +516,27 @@ def get_dhcp_hosts(context, network_ref): def get_dhcp_opts(context, network_ref): """Get network's hosts config in dhcp-opts format.""" - # create a decision dictionary for default gateway for eache instance hosts = [] - default_gateway_network_node = dict() - network_id = network_ref['id'] - instance_set = set() - ips_ref = db.network_get_associated_fixed_ips(context, network_id) - - for fixed_ip_ref in ips_ref: - instance_set.add(fixed_ip_ref['instance_id']) - - for instance_id in instance_set: - # nic number is decided by xxx from this function in xxx function - vifs = db.virtual_interface_get_by_instance(context, instance_id) - if vifs == None: - default_gateway_network_node[instance_id] = None - continue - # offer a default gateway to the first virtual interface of instance - first_vif = vifs[0] - default_gateway_network_node[instance_id] = first_vif['network_id'] - - for fixed_ip_ref in ips_ref: - instance_id = fixed_ip_ref['instance_id'] - if instance_id in default_gateway_network_node: - target_network_id = default_gateway_network_node[instance_id] - if target_network_id == fixed_ip_ref['network_id']: - hosts.append(_host_dhcp_opts(fixed_ip_ref, gw=True)) - else: - hosts.append(_host_dhcp_opts(fixed_ip_ref, gw=None)) - + ips_ref = db.network_get_associated_fixed_ips(context, network_ref['id']) + + if ips_ref: + #set of instance ids + instance_set = set([fixed_ip_ref['instance_id'] + for fixed_ip_ref in ips_ref]) + default_gw_network_node = {} + for instance_id in instance_set: + vifs = db.virtual_interface_get_by_instance(context, instance_id) + if vifs: + #offer a default gateway to the first virtual interface + default_gw_network_node[instance_id] = vifs[0]['network_id'] + + for fixed_ip_ref in ips_ref: + instance_id = fixed_ip_ref['instance_id'] + if instance_id in default_gw_network_node: + target_network_id = default_gw_network_node[instance_id] + # we don't want default gateway for this fixed ip + if target_network_id != fixed_ip_ref['network_id']: + hosts.append(_host_dhcp_opts(fixed_ip_ref)) return '\n'.join(hosts) @@ -560,13 +555,14 @@ def update_dhcp(context, dev, network_ref): with open(conffile, 'w') as f: f.write(get_dhcp_hosts(context, network_ref)) - optsfile = _dhcp_file(dev, 'opts') - with open(optsfile, 'w') as f: - f.write(get_dhcp_opts(context, network_ref)) + if FLAGS.use_single_default_gateway: + optsfile = _dhcp_file(dev, 'opts') + with open(optsfile, 'w') as f: + f.write(get_dhcp_opts(context, network_ref)) + os.chmod(optsfile, 0644) # Make sure dnsmasq can actually read it (it setuid()s to "nobody") os.chmod(conffile, 0644) - os.chmod(optsfile, 0644) pid = _dnsmasq_pid_for(dev) @@ -598,11 +594,13 @@ def update_dhcp(context, dev, network_ref): '--dhcp-lease-max=%s' % len(netaddr.IPNetwork(network_ref['cidr'])), '--dhcp-hostsfile=%s' % _dhcp_file(dev, 'conf'), '--dhcp-script=%s' % FLAGS.dhcpbridge, - '--dhcp-optsfile=%s' % _dhcp_file(dev, 'opts'), '--leasefile-ro'] if FLAGS.dns_server: cmd += ['-h', '-R', '--server=%s' % FLAGS.dns_server] + if FLAGS.use_single_default_gateway: + cmd += ['--dhcp-optsfile=%s' % _dhcp_file(dev, 'opts')] + _execute(*cmd, run_as_root=True) @@ -681,13 +679,9 @@ def _host_dhcp(fixed_ip_ref): "net:" + _host_dhcp_network(fixed_ip_ref)) -def _host_dhcp_opts(fixed_ip_ref, gw): +def _host_dhcp_opts(fixed_ip_ref): """Return a host string for an address in dhcp-host format.""" - if not gw: - return '%s,%s' % (_host_dhcp_network(fixed_ip_ref), - 3) - else: - return '' + return '%s,%s' % (_host_dhcp_network(fixed_ip_ref), 3) def _execute(*cmd, **kwargs): diff --git a/nova/tests/test_linux_net.py b/nova/tests/test_linux_net.py index be6faa07b..4c72f1e0f 100755 --- a/nova/tests/test_linux_net.py +++ b/nova/tests/test_linux_net.py @@ -23,6 +23,7 @@ from nova import log as logging from nova import test from nova import utils from nova.network import manager as network_manager +from nova.network import linux_net import mox @@ -194,6 +195,7 @@ class LinuxNetworkTestCase(test.TestCase): def setUp(self): super(LinuxNetworkTestCase, self).setUp() network_driver = FLAGS.network_driver + self.flags(use_single_default_gateway=True) self.driver = utils.import_object(network_driver) self.driver.db = db @@ -300,9 +302,7 @@ class LinuxNetworkTestCase(test.TestCase): vifs[5]]) self.mox.ReplayAll() - expected_opts = '\n'\ - 'NW-i00000001-0,3\n'\ - '' + expected_opts = 'NW-i00000001-0,3' actual_opts = self.driver.get_dhcp_opts(None, networks[0]) self.assertEquals(actual_opts, expected_opts) @@ -328,19 +328,12 @@ class LinuxNetworkTestCase(test.TestCase): vifs[5]]) self.mox.ReplayAll() - expected_opts = 'NW-i00000000-1,3\n'\ - '\n'\ - '' + expected_opts = "NW-i00000000-1,3" actual_opts = self.driver.get_dhcp_opts(None, networks[1]) self.assertEquals(actual_opts, expected_opts) - def test_dhcp_opts_default_gateway_network(self): - expected = "" - actual = self.driver._host_dhcp_opts(fixed_ips[0], True) - self.assertEquals(actual, expected) - def test_dhcp_opts_not_default_gateway_network(self): expected = "NW-i00000000-0,3" - actual = self.driver._host_dhcp_opts(fixed_ips[0], False) + actual = self.driver._host_dhcp_opts(fixed_ips[0]) self.assertEquals(actual, expected) -- cgit From c1763deb23dc6dcf7ca4f32aafde47501a87083f Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 7 Sep 2011 11:11:19 -0500 Subject: updated floating_ip generation --- nova/tests/fake_network.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nova/tests/fake_network.py b/nova/tests/fake_network.py index 99b027cf3..19be83bc9 100644 --- a/nova/tests/fake_network.py +++ b/nova/tests/fake_network.py @@ -14,6 +14,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +from itertools import islice from nova import db from nova import flags @@ -68,8 +69,8 @@ def fixed_ips(num_networks, num_ips, num_floating_ips=0): for network_index in xrange(num_networks): for ip_index in xrange(num_ips): fixed_ip_id = network_index * num_ips + ip_index - f_ips = [FakeModel(**floating_ips(fixed_ip_id).next()) - for i in xrange(num_floating_ips)] + f_ips = [FakeModel(**floating_ip_dict) for floating_ip_dict + in islice(floating_ips(fixed_ip_id), num_floating_ips)] yield {'id': fixed_ip_id, 'network_id': network_index, 'address': '192.168.%d.1%02d' % (network_index, ip_index), @@ -94,7 +95,7 @@ flavor = {'id': 0, def floating_ips(fixed_ip_id): for i in xrange(154): yield {'id': i, - 'address': '10.10.10.%d' % (i + 100), + 'address': '10.10.%d.%d' % (fixed_ip_id, i + 100), 'fixed_ip_id': fixed_ip_id, 'project_id': None, 'auto_assigned': False} -- cgit From e3cb2c82224fad59c16010c0842ebcfa1ac0dc95 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 7 Sep 2011 12:41:16 -0500 Subject: changed the fixed_ip_generator --- nova/tests/fake_network.py | 110 +++++++++++++++++++++++++++++---------------- 1 file changed, 71 insertions(+), 39 deletions(-) diff --git a/nova/tests/fake_network.py b/nova/tests/fake_network.py index 19be83bc9..ee1a4a7c7 100644 --- a/nova/tests/fake_network.py +++ b/nova/tests/fake_network.py @@ -14,7 +14,6 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -from itertools import islice from nova import db from nova import flags @@ -35,6 +34,17 @@ class FakeModel(dict): return self[name] +flavor = {'id': 0, + 'name': 'fake_flavor', + 'memory_mb': 2048, + 'vcpus': 2, + 'local_gb': 10, + 'flavor_id': 0, + 'swap': 0, + 'rxtx_quota': 0, + 'rxtx_cap': 3} + + def fake_network(network_id, ipv6=None): if ipv6 is None: ipv6 = FLAGS.use_ipv6 @@ -65,42 +75,6 @@ def fake_network(network_id, ipv6=None): return fake_network -def fixed_ips(num_networks, num_ips, num_floating_ips=0): - for network_index in xrange(num_networks): - for ip_index in xrange(num_ips): - fixed_ip_id = network_index * num_ips + ip_index - f_ips = [FakeModel(**floating_ip_dict) for floating_ip_dict - in islice(floating_ips(fixed_ip_id), num_floating_ips)] - yield {'id': fixed_ip_id, - 'network_id': network_index, - 'address': '192.168.%d.1%02d' % (network_index, ip_index), - 'instance_id': 0, - 'allocated': False, - # and since network_id and vif_id happen to be equivalent - 'virtual_interface_id': network_index, - 'floating_ips': f_ips} - - -flavor = {'id': 0, - 'name': 'fake_flavor', - 'memory_mb': 2048, - 'vcpus': 2, - 'local_gb': 10, - 'flavor_id': 0, - 'swap': 0, - 'rxtx_quota': 0, - 'rxtx_cap': 3} - - -def floating_ips(fixed_ip_id): - for i in xrange(154): - yield {'id': i, - 'address': '10.10.%d.%d' % (fixed_ip_id, i + 100), - 'fixed_ip_id': fixed_ip_id, - 'project_id': None, - 'auto_assigned': False} - - def vifs(n): for x in xrange(n): yield {'id': x, @@ -111,6 +85,58 @@ def vifs(n): 'instance_id': 0} +#def fixed_ips(num_networks, num_ips, num_floating_ips=0): +# for network_index in xrange(num_networks): +# for ip_index in xrange(num_ips): +# fixed_ip_id = network_index * num_ips + ip_index +# islice = itertools.islice +# yield {'id': fixed_ip_id, +# 'network_id': network_index, +# 'address': '192.168.%d.1%02d' % (network_index, ip_index), +# 'instance_id': 0, +# 'allocated': False, +# # and since network_id and vif_id happen to be equivalent +# 'virtual_interface_id': network_index, +# 'floating_ips': f_ips} + + +def floating_ip_ids(): + for i in xrange(99): + yield i + + +def fixed_ip_ids(): + for i in xrange(99): + yield i + + +floating_ip_id = floating_ip_ids() +fixed_ip_id = fixed_ip_ids() + + +def next_fixed_ip(network_id, num_floating_ips=0): + next_id = fixed_ip_id.next() + f_ips = [FakeModel(**next_floating_ip(fixed_ip_id)) + for i in xrange(num_floating_ips)] + return {'id': next, + 'network_id': network_id, + 'address': '192.168.%d.1%02d' % (network_id, next_id), + 'instance_id': 0, + 'allocated': False, + # and since network_id and vif_id happen to be equivalent + 'virtual_interface_id': network_id, + 'floating_ips': f_ips} + + +def next_floating_ip(fixed_ip_id): + next_id = floating_ip_id.next() + return {'id': next_id, + 'address': '10.10.10.1%02d' % next_id, + 'fixed_ip_id': fixed_ip_id, + 'project_id': None, + 'auto_assigned': False} + + def ipv4_like(ip, match_string): ip = ip.split('.') match_octets = match_string.split('.') @@ -123,15 +149,21 @@ def ipv4_like(ip, match_string): return True -def fake_get_instance_nw_info(stubs, num_networks=1, ips_per_vif=2): +def fake_get_instance_nw_info(stubs, num_networks=1, ips_per_vif=2, + floating_ips_per_fixed_ip=0): # stubs is the self.stubs from the test # ips_per_vif is the number of ips each vif will have # num_floating_ips is number of float ips for each fixed ip network = network_manager.FlatManager(host=HOST) network.db = db + # reset the fixed and floating ip generators + floating_ip_id = floating_ip_ids() + fixed_ip_id = fixed_ip_ids() + def fixed_ips_fake(*args, **kwargs): - return list(fixed_ips(num_networks, ips_per_vif)) + return [next_fixed_ip(i, floating_ips_per_fixed_ip) + for i in xrange(num_networks) for j in xrange(ips_per_vif)] def virtual_interfaces_fake(*args, **kwargs): return [vif for vif in vifs(num_networks)] -- cgit From 262b5cf6e8bd577d2b08fb92e6da56f8bcdecd57 Mon Sep 17 00:00:00 2001 From: "Kevin L. Mitchell" Date: Wed, 7 Sep 2011 13:37:29 -0500 Subject: weigh_hosts() needs to return a list of hosts for the instances, not just a list of hosts --- nova/scheduler/base_scheduler.py | 16 +++++++++++-- nova/tests/scheduler/test_abstract_scheduler.py | 32 +++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 2 deletions(-) diff --git a/nova/scheduler/base_scheduler.py b/nova/scheduler/base_scheduler.py index 35e5af035..e9c078b81 100644 --- a/nova/scheduler/base_scheduler.py +++ b/nova/scheduler/base_scheduler.py @@ -55,5 +55,17 @@ class BaseScheduler(abstract_scheduler.AbstractScheduler): scheduling objectives """ # NOTE(sirp): The default logic is the same as the NoopCostFunction - return [dict(weight=1, hostname=hostname, capabilities=capabilities) - for hostname, capabilities in hosts] + hosts = [dict(weight=1, hostname=hostname, capabilities=capabilities) + for hostname, capabilities in hosts] + + # NOTE(Vek): What we actually need to return is enough hosts + # for all the instances! + num_instances = request_spec.get('num_instances', 1) + instances = [] + while num_instances > len(hosts): + instances.extend(hosts) + num_instances -= len(hosts) + if num_instances > 0: + instances.extend(hosts[:num_instances]) + + return instances diff --git a/nova/tests/scheduler/test_abstract_scheduler.py b/nova/tests/scheduler/test_abstract_scheduler.py index aa97e2344..f47699048 100644 --- a/nova/tests/scheduler/test_abstract_scheduler.py +++ b/nova/tests/scheduler/test_abstract_scheduler.py @@ -65,6 +65,11 @@ class FakeAbstractScheduler(abstract_scheduler.AbstractScheduler): pass +class FakeBaseScheduler(base_scheduler.BaseScheduler): + # No need to stub anything at the moment + pass + + class FakeZoneManager(zone_manager.ZoneManager): def __init__(self): self.service_states = { @@ -365,3 +370,30 @@ class AbstractSchedulerTestCase(test.TestCase): self.assertEqual(fixture._decrypt_blob(test_data), json.dumps(test_data)) + + +class BaseSchedulerTestCase(test.TestCase): + """Test case for Base Scheduler.""" + + def test_weigh_hosts(self): + """ + Try to weigh a short list of hosts and make sure enough + entries for a larger number instances are returned. + """ + + sched = FakeBaseScheduler() + + # Fake out a list of hosts + zm = FakeZoneManager() + hostlist = [(host, services['compute']) + for host, services in zm.service_states + if 'compute' in services] + + # Call weigh_hosts() + num_instances = len(hostlist) * 2 + len(hostlist) / 2 + instlist = sched.weigh_hosts('compute', + dict(num_instances=num_instances), + hostlist) + + # Should be enough entries to cover all instances + self.assertEqual(len(instlist), num_instances) -- cgit From 34baac0f11ff2084caa46a533aad411988e1541e Mon Sep 17 00:00:00 2001 From: Tushar Patil Date: Wed, 7 Sep 2011 11:41:33 -0700 Subject: Fix for LP Bug #837867 --- nova/network/manager.py | 6 ++--- nova/tests/test_network.py | 58 ++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 59 insertions(+), 5 deletions(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index 050cec250..d1791f777 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -128,8 +128,8 @@ class RPCAllocateFixedIP(object): """Calls allocate_fixed_ip once for each network.""" green_pool = greenpool.GreenPool() - vpn = kwargs.pop('vpn') - requested_networks = kwargs.pop('requested_networks') + vpn = kwargs.get('vpn') + requested_networks = kwargs.get('requested_networks') for network in networks: address = None @@ -890,7 +890,7 @@ class FlatManager(NetworkManager): def _allocate_fixed_ips(self, context, instance_id, host, networks, **kwargs): """Calls allocate_fixed_ip once for each network.""" - requested_networks = kwargs.pop('requested_networks') + requested_networks = kwargs.get('requested_networks') for network in networks: address = None if requested_networks is not None: diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index 25ff940f0..05fca7bc5 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -58,7 +58,7 @@ networks = [{'id': 0, 'dns1': '192.168.0.1', 'dns2': '192.168.0.2', 'vlan': None, - 'host': None, + 'host': HOST, 'project_id': 'fake_project', 'vpn_public_address': '192.168.0.2'}, {'id': 1, @@ -78,7 +78,7 @@ networks = [{'id': 0, 'dns1': '192.168.0.1', 'dns2': '192.168.0.2', 'vlan': None, - 'host': None, + 'host': HOST, 'project_id': 'fake_project', 'vpn_public_address': '192.168.1.2'}] @@ -247,6 +247,34 @@ class FlatNetworkTestCase(test.TestCase): self.network.validate_networks(None, requested_networks) + def test_add_fixed_ip_instance_without_vpn_requested_networks(self): + self.mox.StubOutWithMock(db, 'network_get') + self.mox.StubOutWithMock(db, 'network_update') + self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool') + self.mox.StubOutWithMock(db, 'instance_get') + self.mox.StubOutWithMock(db, + 'virtual_interface_get_by_instance_and_network') + self.mox.StubOutWithMock(db, 'fixed_ip_update') + + db.fixed_ip_update(mox.IgnoreArg(), + mox.IgnoreArg(), + mox.IgnoreArg()) + db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(), + mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0}) + + db.instance_get(mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn({'security_groups': + [{'id': 0}]}) + db.fixed_ip_associate_pool(mox.IgnoreArg(), + mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn('192.168.0.101') + db.network_get(mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn(networks[0]) + db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) + self.mox.ReplayAll() + self.network.add_fixed_ip_to_instance(self.context, 1, HOST, + networks[0]['id']) + class VlanNetworkTestCase(test.TestCase): def setUp(self): @@ -387,6 +415,32 @@ class VlanNetworkTestCase(test.TestCase): mox.IgnoreArg(), mox.IgnoreArg()) + def test_add_fixed_ip_instance_without_vpn_requested_networks(self): + self.mox.StubOutWithMock(db, 'network_get') + self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool') + self.mox.StubOutWithMock(db, 'instance_get') + self.mox.StubOutWithMock(db, + 'virtual_interface_get_by_instance_and_network') + self.mox.StubOutWithMock(db, 'fixed_ip_update') + + db.fixed_ip_update(mox.IgnoreArg(), + mox.IgnoreArg(), + mox.IgnoreArg()) + db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(), + mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0}) + + db.instance_get(mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn({'security_groups': + [{'id': 0}]}) + db.fixed_ip_associate_pool(mox.IgnoreArg(), + mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn('192.168.0.101') + db.network_get(mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn(networks[0]) + self.mox.ReplayAll() + self.network.add_fixed_ip_to_instance(self.context, 1, HOST, + networks[0]['id']) + class CommonNetworkTestCase(test.TestCase): -- cgit From 3e61268a350ba0ec43d28dfbac6e82503a174dba Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 7 Sep 2011 14:33:43 -0500 Subject: had used wrong variable --- nova/tests/fake_network.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/fake_network.py b/nova/tests/fake_network.py index ee1a4a7c7..76e1b991d 100644 --- a/nova/tests/fake_network.py +++ b/nova/tests/fake_network.py @@ -116,7 +116,7 @@ fixed_ip_id = fixed_ip_ids() def next_fixed_ip(network_id, num_floating_ips=0): next_id = fixed_ip_id.next() - f_ips = [FakeModel(**next_floating_ip(fixed_ip_id)) + f_ips = [FakeModel(**next_floating_ip(next_id)) for i in xrange(num_floating_ips)] return {'id': next, 'network_id': network_id, -- cgit From 31ae07f06c71968deb67c5aa1c111fe9e14fb5d8 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 7 Sep 2011 14:36:22 -0500 Subject: forgot _id --- nova/tests/fake_network.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/fake_network.py b/nova/tests/fake_network.py index 76e1b991d..97231bfa1 100644 --- a/nova/tests/fake_network.py +++ b/nova/tests/fake_network.py @@ -118,7 +118,7 @@ def next_fixed_ip(network_id, num_floating_ips=0): next_id = fixed_ip_id.next() f_ips = [FakeModel(**next_floating_ip(next_id)) for i in xrange(num_floating_ips)] - return {'id': next, + return {'id': next_id, 'network_id': network_id, 'address': '192.168.%d.1%02d' % (network_id, next_id), 'instance_id': 0, -- cgit From a158d5721c18902b290a42bb3874b34e54dbcd7b Mon Sep 17 00:00:00 2001 From: Tushar Patil Date: Wed, 7 Sep 2011 13:10:05 -0700 Subject: exclude net tag from host_dhcp if use_single_default_gateway flag is set to false --- nova/network/linux_net.py | 17 ++++++++++++----- nova/tests/test_linux_net.py | 6 ++++++ 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index affdde701..7d89b2bcc 100755 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -672,11 +672,18 @@ def _host_dhcp_network(fixed_ip_ref): def _host_dhcp(fixed_ip_ref): """Return a host string for an address in dhcp-host format.""" instance_ref = fixed_ip_ref['instance'] - return '%s,%s.%s,%s,%s' % (fixed_ip_ref['virtual_interface']['address'], - instance_ref['hostname'], - FLAGS.dhcp_domain, - fixed_ip_ref['address'], - "net:" + _host_dhcp_network(fixed_ip_ref)) + vif = fixed_ip_ref['virtual_interface'] + if FLAGS.use_single_default_gateway: + return '%s,%s.%s,%s,%s' % (vif['address'], + instance_ref['hostname'], + FLAGS.dhcp_domain, + fixed_ip_ref['address'], + "net:" + _host_dhcp_network(fixed_ip_ref)) + else: + return '%s,%s.%s,%s' % (vif['address'], + instance_ref['hostname'], + FLAGS.dhcp_domain, + fixed_ip_ref['address']) def _host_dhcp_opts(fixed_ip_ref): diff --git a/nova/tests/test_linux_net.py b/nova/tests/test_linux_net.py index 4c72f1e0f..6d0a2b6bd 100755 --- a/nova/tests/test_linux_net.py +++ b/nova/tests/test_linux_net.py @@ -337,3 +337,9 @@ class LinuxNetworkTestCase(test.TestCase): expected = "NW-i00000000-0,3" actual = self.driver._host_dhcp_opts(fixed_ips[0]) self.assertEquals(actual, expected) + + def test_host_dhcp_without_default_gateway_network(self): + self.flags(use_single_default_gateway=False) + expected = ("10.0.0.1,fake_instance00.novalocal,192.168.0.100") + actual = self.driver._host_dhcp(fixed_ips[0]) + self.assertEquals(actual, expected) -- cgit From 4d98408d4ec605ad9a591d5166f2b8ea6e723ecb Mon Sep 17 00:00:00 2001 From: Tushar Patil Date: Wed, 7 Sep 2011 13:37:58 -0700 Subject: modified unit tests, set use_single_default_gateway flag to True whereever needed instead of setting it in the init method --- nova/tests/test_linux_net.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/nova/tests/test_linux_net.py b/nova/tests/test_linux_net.py index 6d0a2b6bd..99577b88e 100755 --- a/nova/tests/test_linux_net.py +++ b/nova/tests/test_linux_net.py @@ -195,11 +195,11 @@ class LinuxNetworkTestCase(test.TestCase): def setUp(self): super(LinuxNetworkTestCase, self).setUp() network_driver = FLAGS.network_driver - self.flags(use_single_default_gateway=True) self.driver = utils.import_object(network_driver) self.driver.db = db def test_update_dhcp_for_nw00(self): + self.flags(use_single_default_gateway=True) self.mox.StubOutWithMock(db, 'network_get_associated_fixed_ips') self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance') @@ -223,6 +223,7 @@ class LinuxNetworkTestCase(test.TestCase): self.driver.update_dhcp(None, "eth0", networks[0]) def test_update_dhcp_for_nw01(self): + self.flags(use_single_default_gateway=True) self.mox.StubOutWithMock(db, 'network_get_associated_fixed_ips') self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance') @@ -246,6 +247,7 @@ class LinuxNetworkTestCase(test.TestCase): self.driver.update_dhcp(None, "eth0", networks[0]) def test_get_dhcp_hosts_for_nw00(self): + self.flags(use_single_default_gateway=True) self.mox.StubOutWithMock(db, 'network_get_associated_fixed_ips') db.network_get_associated_fixed_ips(mox.IgnoreArg(), @@ -264,6 +266,7 @@ class LinuxNetworkTestCase(test.TestCase): self.assertEquals(actual_hosts, expected) def test_get_dhcp_hosts_for_nw01(self): + self.flags(use_single_default_gateway=True) self.mox.StubOutWithMock(db, 'network_get_associated_fixed_ips') db.network_get_associated_fixed_ips(mox.IgnoreArg(), @@ -339,7 +342,6 @@ class LinuxNetworkTestCase(test.TestCase): self.assertEquals(actual, expected) def test_host_dhcp_without_default_gateway_network(self): - self.flags(use_single_default_gateway=False) expected = ("10.0.0.1,fake_instance00.novalocal,192.168.0.100") actual = self.driver._host_dhcp(fixed_ips[0]) self.assertEquals(actual, expected) -- cgit From 591997a76a8395a72c7316207983e1225c9c4a62 Mon Sep 17 00:00:00 2001 From: "Kevin L. Mitchell" Date: Wed, 7 Sep 2011 20:40:48 +0000 Subject: fix a couple of typos in the added unit test --- nova/tests/scheduler/test_abstract_scheduler.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/tests/scheduler/test_abstract_scheduler.py b/nova/tests/scheduler/test_abstract_scheduler.py index f47699048..9bf128b13 100644 --- a/nova/tests/scheduler/test_abstract_scheduler.py +++ b/nova/tests/scheduler/test_abstract_scheduler.py @@ -26,6 +26,7 @@ from nova import test from nova.compute import api as compute_api from nova.scheduler import driver from nova.scheduler import abstract_scheduler +from nova.scheduler import base_scheduler from nova.scheduler import zone_manager @@ -386,7 +387,7 @@ class BaseSchedulerTestCase(test.TestCase): # Fake out a list of hosts zm = FakeZoneManager() hostlist = [(host, services['compute']) - for host, services in zm.service_states + for host, services in zm.service_states.items() if 'compute' in services] # Call weigh_hosts() -- cgit From 3334fabe55c862531e3ced21b211710857c1e087 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 7 Sep 2011 15:48:31 -0500 Subject: removed vestige --- nova/tests/fake_network.py | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/nova/tests/fake_network.py b/nova/tests/fake_network.py index 97231bfa1..5e536a6ee 100644 --- a/nova/tests/fake_network.py +++ b/nova/tests/fake_network.py @@ -85,21 +85,6 @@ def vifs(n): 'instance_id': 0} -#def fixed_ips(num_networks, num_ips, num_floating_ips=0): -# for network_index in xrange(num_networks): -# for ip_index in xrange(num_ips): -# fixed_ip_id = network_index * num_ips + ip_index -# islice = itertools.islice -# yield {'id': fixed_ip_id, -# 'network_id': network_index, -# 'address': '192.168.%d.1%02d' % (network_index, ip_index), -# 'instance_id': 0, -# 'allocated': False, -# # and since network_id and vif_id happen to be equivalent -# 'virtual_interface_id': network_index, -# 'floating_ips': f_ips} - - def floating_ip_ids(): for i in xrange(99): yield i -- cgit From 2bed69e61aefdc8e2aa7eeb31fe9f338e912a01d Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 7 Sep 2011 16:06:35 -0500 Subject: properly handle the id resetters --- nova/tests/fake_network.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/tests/fake_network.py b/nova/tests/fake_network.py index 5e536a6ee..1ecb99b31 100644 --- a/nova/tests/fake_network.py +++ b/nova/tests/fake_network.py @@ -143,6 +143,7 @@ def fake_get_instance_nw_info(stubs, num_networks=1, ips_per_vif=2, network.db = db # reset the fixed and floating ip generators + global floating_ip_id, fixed_ip_id floating_ip_id = floating_ip_ids() fixed_ip_id = fixed_ip_ids() -- cgit From d2b9299408f07f995fffc8b8559f52ee6adeeaad Mon Sep 17 00:00:00 2001 From: "Kevin L. Mitchell" Date: Wed, 7 Sep 2011 18:01:52 -0500 Subject: spread-first strategy --- nova/scheduler/abstract_scheduler.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/nova/scheduler/abstract_scheduler.py b/nova/scheduler/abstract_scheduler.py index 7f17b642f..a81fa53cd 100644 --- a/nova/scheduler/abstract_scheduler.py +++ b/nova/scheduler/abstract_scheduler.py @@ -20,8 +20,9 @@ customize the behavior: filter_hosts() and weigh_hosts(). The default behavior is to simply select all hosts and weight them the same. """ -import operator import json +import operator +import random import M2Crypto @@ -40,6 +41,8 @@ from nova.scheduler import api from nova.scheduler import driver FLAGS = flags.FLAGS +flags.DEFINE_boolean('spread_first', False, + 'Use a spread-first zone scheduler strategy') LOG = logging.getLogger('nova.scheduler.abstract_scheduler') -- cgit From ad25b7aa2ad744607b20432d635f70cc645cc6f6 Mon Sep 17 00:00:00 2001 From: "Kevin L. Mitchell" Date: Wed, 7 Sep 2011 18:08:39 -0500 Subject: actually shuffle the weighted_hosts list... --- nova/scheduler/abstract_scheduler.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nova/scheduler/abstract_scheduler.py b/nova/scheduler/abstract_scheduler.py index a81fa53cd..b4c2bf4f1 100644 --- a/nova/scheduler/abstract_scheduler.py +++ b/nova/scheduler/abstract_scheduler.py @@ -295,6 +295,8 @@ class AbstractScheduler(driver.Scheduler): "child_zone": child_zone, "child_blob": weighting["blob"]} weighted_hosts.append(host_dict) + if FLAGS.spread_first: + random.shuffle(weighted_hosts) weighted_hosts.sort(key=operator.itemgetter('weight')) return weighted_hosts -- cgit From b6d454762d7fdf9b202d8a580dd9bfdf069a5e80 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 7 Sep 2011 16:38:41 -0700 Subject: fix for lp844364: improve check for fixed_ip association --- nova/api/openstack/contrib/floating_ips.py | 4 +- .../api/openstack/contrib/test_floating_ips.py | 78 +++++++++++++++++++++- 2 files changed, 77 insertions(+), 5 deletions(-) diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index d1add8f83..9ceb5858f 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -107,7 +107,7 @@ class FloatingIPController(object): context = req.environ['nova.context'] floating_ip = self.network_api.get_floating_ip(context, id) - if 'fixed_ip' in floating_ip: + if floating_ip['fixed_ip']: self.network_api.disassociate_floating_ip(context, floating_ip['address']) @@ -161,7 +161,7 @@ class Floating_ips(extensions.ExtensionDescriptor): raise webob.exc.HTTPBadRequest(explanation=msg) floating_ip = self.network_api.get_floating_ip_by_ip(context, address) - if 'fixed_ip' in floating_ip: + if floating_ip['fixed_ip']: self.network_api.disassociate_floating_ip(context, address) return webob.Response(status_int=202) diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py index 642f2b841..0744f0a11 100644 --- a/nova/tests/api/openstack/contrib/test_floating_ips.py +++ b/nova/tests/api/openstack/contrib/test_floating_ips.py @@ -212,11 +212,45 @@ class FloatingIpTest(test.TestCase): "fixed_ip": None} self.assertEqual(ip, expected) - def test_floating_ip_release(self): + def test_floating_ip_release_associated(self): + self.disassociated = False + + def get_floating_ip(ignore, context, id): + return {'id': 1, 'address': '10.10.10.10', + 'fixed_ip': {'id': 1}} + + def disassociate(ignore, context, floating_address): + self.disassociated = True + + self.stubs.Set(network.api.API, "get_floating_ip", + get_floating_ip) + self.stubs.Set(network.api.API, "disassociate_floating_ip", + disassociate) + req = webob.Request.blank('/v1.1/123/os-floating-ips/1') + req.method = 'DELETE' + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + self.assertTrue(self.disassociated) + + def test_floating_ip_release_disassociated(self): + self.disassociated = False + + def fake_get_floating_ip(ignore, context, id): + return {'id': 1, 'address': '10.10.10.10', + 'fixed_ip': None} + + def fake_disassociate(ignore, context, floating_address): + self.disassociated = True + + self.stubs.Set(network.api.API, "get_floating_ip", + fake_get_floating_ip) + self.stubs.Set(network.api.API, "disassociate_floating_ip", + fake_disassociate) req = webob.Request.blank('/v1.1/123/os-floating-ips/1') req.method = 'DELETE' res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 202) + self.assertFalse(self.disassociated) def test_add_floating_ip_to_instance(self): self.stubs.Set(network.api.API, "associate_floating_ip", @@ -289,8 +323,45 @@ class FloatingIpTest(test.TestCase): self.assertEqual(resp.status_int, 202) self.assertTrue(self.disassociated) - def test_remove_floating_ip_from_instance(self): - body = dict(removeFloatingIp=dict(address='11.0.0.1')) + def test_remove_associated_floating_ip_from_instance(self): + self.disassociated = False + + def fake_get_floating_ip_by_ip(ignore, context, ip): + return {'id': 1, 'address': '10.10.10.10', + 'fixed_ip': {'id': 1}} + + def fake_disassociate(ignore, context, floating_address): + self.disassociated = True + + self.stubs.Set(network.api.API, "get_floating_ip_by_ip", + fake_get_floating_ip_by_ip) + self.stubs.Set(network.api.API, "disassociate_floating_ip", + fake_disassociate) + body = dict(removeFloatingIp=dict(address='10.10.10.10')) + req = webob.Request.blank('/v1.1/123/servers/test_inst/action') + req.method = "POST" + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 202) + self.assertTrue(self.disassociated) + + def test_remove_disassociated_floating_ip_from_instance(self): + self.disassociated = False + + def fake_get_floating_ip_by_ip(ignore, context, ip): + return {'id': 1, 'address': '10.10.10.10', + 'fixed_ip': None} + + def fake_disassociate(ignore, context, floating_address): + self.disassociated = True + + self.stubs.Set(network.api.API, "get_floating_ip_by_ip", + fake_get_floating_ip_by_ip) + self.stubs.Set(network.api.API, "disassociate_floating_ip", + fake_disassociate) + body = dict(removeFloatingIp=dict(address='10.10.10.10')) req = webob.Request.blank('/v1.1/123/servers/test_inst/action') req.method = "POST" req.body = json.dumps(body) @@ -298,6 +369,7 @@ class FloatingIpTest(test.TestCase): resp = req.get_response(fakes.wsgi_app()) self.assertEqual(resp.status_int, 202) + self.assertFalse(self.disassociated) def test_bad_address_param_in_remove_floating_ip(self): body = dict(removeFloatingIp=dict(badparam='11.0.0.1')) -- cgit From b3bc07d4dcb43b6e070a136eb5532def6b623e6e Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 7 Sep 2011 20:40:47 -0700 Subject: remove the short circuit in abstract scheduler when no local hosts are available --- nova/scheduler/abstract_scheduler.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/nova/scheduler/abstract_scheduler.py b/nova/scheduler/abstract_scheduler.py index 7f17b642f..cb8db599f 100644 --- a/nova/scheduler/abstract_scheduler.py +++ b/nova/scheduler/abstract_scheduler.py @@ -269,9 +269,6 @@ class AbstractScheduler(driver.Scheduler): # Filter local hosts based on requirements ... filtered_hosts = self.filter_hosts(topic, request_spec, unfiltered_hosts) - if not filtered_hosts: - LOG.warn(_("No hosts available")) - return [] # weigh the selected hosts. # weighted_hosts = [{weight=weight, hostname=hostname, -- cgit From 5426687825cd64adf0524de2808eed1cca15f521 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 7 Sep 2011 20:49:42 -0700 Subject: added test to cover case where no local hosts are available but child hosts are --- nova/tests/scheduler/test_abstract_scheduler.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/nova/tests/scheduler/test_abstract_scheduler.py b/nova/tests/scheduler/test_abstract_scheduler.py index aa97e2344..a65d15e43 100644 --- a/nova/tests/scheduler/test_abstract_scheduler.py +++ b/nova/tests/scheduler/test_abstract_scheduler.py @@ -365,3 +365,25 @@ class AbstractSchedulerTestCase(test.TestCase): self.assertEqual(fixture._decrypt_blob(test_data), json.dumps(test_data)) + + def test_empty_local_hosts(self): + """ + Create a nested set of FakeZones, try to build multiple instances + and ensure that a select call returns the appropriate build plan. + """ + sched = FakeAbstractScheduler() + self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method) + self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all) + + zm = FakeZoneManager() + # patch this to have no local hosts + zm.service_states = {} + sched.set_zone_manager(zm) + + fake_context = {} + build_plan = sched.select(fake_context, + {'instance_type': {'memory_mb': 512}, + 'num_instances': 4}) + + # 0 from local zones, 12 from remotes + self.assertEqual(12, len(build_plan)) -- cgit From b13ca667bdd3303bbfcd4e58cc6d773cea09661d Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 7 Sep 2011 21:48:11 -0700 Subject: catch exceptions from novaclient when talking to child zones. store them and re-raise if no other child zones return any results. If no exceptions are raised but no results are returned, raise a NotFound exception. --- nova/scheduler/api.py | 48 +++++++++++++++++++++++++++++++----------------- 1 file changed, 31 insertions(+), 17 deletions(-) diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py index 55cea5f8f..a5124678d 100644 --- a/nova/scheduler/api.py +++ b/nova/scheduler/api.py @@ -103,22 +103,6 @@ def update_service_capabilities(context, service_name, host, capabilities): return rpc.fanout_cast(context, 'scheduler', kwargs) -def _wrap_method(function, self): - """Wrap method to supply self.""" - def _wrap(*args, **kwargs): - return function(self, *args, **kwargs) - return _wrap - - -def _process(func, zone): - """Worker stub for green thread pool. Give the worker - an authenticated nova client and zone info.""" - nova = novaclient.Client(zone.username, zone.password, None, - zone.api_url) - nova.authenticate() - return func(nova, zone) - - def call_zone_method(context, method_name, errors_to_ignore=None, novaclient_collection_name='zones', zones=None, *args, **kwargs): @@ -166,6 +150,28 @@ def child_zone_helper(zone_list, func): For example, if you are calling server.pause(), the list will be whatever the response from server.pause() is. One entry per child zone called.""" + + def _wrap_method(function, arg1): + """Wrap method to supply an argument.""" + def _wrap(*args, **kwargs): + return function(arg1, *args, **kwargs) + return _wrap + + def _process(func, zone): + """Worker stub for green thread pool. Give the worker + an authenticated nova client and zone info.""" + try: + nova = novaclient.Client(zone.username, zone.password, None, + zone.api_url) + nova.authenticate() + except novaclient_exceptions.BadRequest, e: + url = zone.api_url + LOG.warn(_("Failed request to zone; URL=%(url)s: %(e)s") + % locals()) + return e + else: + return func(nova, zone) + green_pool = greenpool.GreenPool() return [result for result in green_pool.imap( _wrap_method(_process, func), zone_list)] @@ -260,6 +266,8 @@ class reroute_compute(object): if not FLAGS.enable_zone_routing: raise exception.InstanceNotFound(instance_id=item_uuid) + self.item_uuid = item_uuid + zones = db.zone_get_all(context) if not zones: raise exception.InstanceNotFound(instance_id=item_uuid) @@ -342,9 +350,13 @@ class reroute_compute(object): dict {'server':{k:v}}. Others may return a list of them, like {'servers':[{k,v}]}""" reduced_response = [] + found_exception = None for zone_response in zone_responses: if not zone_response: continue + if isinstance(zone_response, BaseException): + found_exception = zone_response + continue server = zone_response.__dict__ @@ -355,7 +367,9 @@ class reroute_compute(object): reduced_response.append(dict(server=server)) if reduced_response: return reduced_response[0] # first for now. - return {} + elif found_exception: + raise found_exception + exception.InstanceNotFound(instance_id=self.item_uuid) def redirect_handler(f): -- cgit From bc84c1306e9334d4082cadee4dcb5cd14a905afe Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 7 Sep 2011 22:57:36 -0700 Subject: pep8 fix for tests/api/openstack/test_servers.py which is an issue in trunk --- nova/tests/api/openstack/test_servers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 2ef687709..d063a60c2 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -3615,7 +3615,7 @@ class TestGetKernelRamdiskFromImage(test.TestCase): self.assertRaises(exception.NotFound, self._get_k_r, image_meta) def test_ami_no_ramdisk(self): - """If an ami is missing a ramdisk, return kernel ID and None for + """If an ami is missing a ramdisk, return kernel ID and None for ramdisk ID """ image_meta = {'id': 1, 'status': 'active', 'container_format': 'ami', -- cgit From be645283c85d69e2d3cf4f4eabdbb545aaf139bf Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 7 Sep 2011 23:24:07 -0700 Subject: create a new exception ZoneRequestError to use for returning errors when zone requests couldn't complete --- nova/exception.py | 7 +++++++ nova/scheduler/api.py | 5 ++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/nova/exception.py b/nova/exception.py index 95d8229b5..aa6609461 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -806,3 +806,10 @@ class CannotResizeToSmallerSize(NovaException): class ImageTooLarge(NovaException): message = _("Image is larger than instance type allows") + + +class ZoneRequestError(Error): + def __init__(self, message=None): + if message is None: + message = _("1 or more Zones could not complete the request") + super(ZoneRequestError, self).__init__(message=message) diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py index a5124678d..05685fc15 100644 --- a/nova/scheduler/api.py +++ b/nova/scheduler/api.py @@ -168,7 +168,10 @@ def child_zone_helper(zone_list, func): url = zone.api_url LOG.warn(_("Failed request to zone; URL=%(url)s: %(e)s") % locals()) - return e + # This is being returned instead of raised, so that when results are + # processed in unmarshal_result() after the greenpool.imap completes, + # the exception can be raised there if no other zones had a response. + return exception.ZoneRequestError() else: return func(nova, zone) -- cgit From d1b1f301583fd67050c45f4c863733f251620a9c Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 7 Sep 2011 23:39:39 -0700 Subject: typo trying to raise InstanceNotFound when all zones returned nothing --- nova/scheduler/api.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py index 05685fc15..6081c3f02 100644 --- a/nova/scheduler/api.py +++ b/nova/scheduler/api.py @@ -168,9 +168,10 @@ def child_zone_helper(zone_list, func): url = zone.api_url LOG.warn(_("Failed request to zone; URL=%(url)s: %(e)s") % locals()) - # This is being returned instead of raised, so that when results are - # processed in unmarshal_result() after the greenpool.imap completes, - # the exception can be raised there if no other zones had a response. + # This is being returned instead of raised, so that when + # results are # processed in unmarshal_result() after the + # greenpool.imap completes, # the exception can be raised + # there if no other zones had a response. return exception.ZoneRequestError() else: return func(nova, zone) @@ -372,7 +373,7 @@ class reroute_compute(object): return reduced_response[0] # first for now. elif found_exception: raise found_exception - exception.InstanceNotFound(instance_id=self.item_uuid) + raise exception.InstanceNotFound(instance_id=self.item_uuid) def redirect_handler(f): -- cgit From be7a081976d37b84d93028673d08ab78bc9d8a73 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 7 Sep 2011 23:45:11 -0700 Subject: comment fix --- nova/scheduler/api.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py index 6081c3f02..719437b73 100644 --- a/nova/scheduler/api.py +++ b/nova/scheduler/api.py @@ -169,8 +169,8 @@ def child_zone_helper(zone_list, func): LOG.warn(_("Failed request to zone; URL=%(url)s: %(e)s") % locals()) # This is being returned instead of raised, so that when - # results are # processed in unmarshal_result() after the - # greenpool.imap completes, # the exception can be raised + # results are processed in unmarshal_result() after the + # greenpool.imap completes, the exception can be raised # there if no other zones had a response. return exception.ZoneRequestError() else: -- cgit From 973870c82445d4c1ebbd46f2ba7c3817ae5e7f87 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Thu, 8 Sep 2011 01:09:22 -0700 Subject: added tests for failure cases talking with zones --- nova/tests/scheduler/test_scheduler.py | 102 +++++++++++++++++++++++++++++++-- 1 file changed, 97 insertions(+), 5 deletions(-) diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index a52dd041a..890348192 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -963,9 +963,14 @@ class FakeZone(object): self.password = password +ZONE_API_URL1 = "http://1.example.com" +ZONE_API_URL2 = "http://2.example.com" + + def zone_get_all(context): return [ - FakeZone(1, 'http://example.com', 'bob', 'xxx'), + FakeZone(1, ZONE_API_URL1, 'bob', 'xxx'), + FakeZone(2, ZONE_API_URL2, 'bob', 'xxx'), ] @@ -1065,7 +1070,9 @@ class ZoneRedirectTest(test.TestCase): def test_unmarshal_single_server(self): decorator = api.reroute_compute("foo") - self.assertEquals(decorator.unmarshall_result([]), {}) + decorator.item_uuid = 'fake_uuid' + self.assertRaises(exception.InstanceNotFound, + decorator.unmarshall_result, []) self.assertEquals(decorator.unmarshall_result( [FakeResource(dict(a=1, b=2)), ]), dict(server=dict(a=1, b=2))) @@ -1079,6 +1086,90 @@ class ZoneRedirectTest(test.TestCase): [FakeResource(dict(_a=1, manager=2)), ]), dict(server={})) + def test_one_zone_down_no_instances(self): + + def _fake_issue_novaclient_command(nova, zone, *args, **kwargs): + return None + + class FakeNovaClientWithFailure(object): + def __init__(self, username, password, method, api_url): + self.api_url = api_url + + def authenticate(self): + if self.api_url == ZONE_API_URL2: + raise novaclient_exceptions.BadRequest('foo') + + self.stubs.Set(api, '_issue_novaclient_command', + _fake_issue_novaclient_command) + self.stubs.Set(api.novaclient, 'Client', FakeNovaClientWithFailure) + + @api.reroute_compute("get") + def do_get(self, context, uuid): + pass + + self.assertRaises(exception.ZoneRequestError, + do_get, None, {}, FAKE_UUID) + + def test_one_zone_down_got_instance(self): + + def _fake_issue_novaclient_command(nova, zone, *args, **kwargs): + class FakeServer(object): + def __init__(self): + self.id = FAKE_UUID + self.test = '1234' + return FakeServer() + + class FakeNovaClientWithFailure(object): + def __init__(self, username, password, method, api_url): + self.api_url = api_url + + def authenticate(self): + if self.api_url == ZONE_API_URL2: + raise novaclient_exceptions.BadRequest('foo') + + self.stubs.Set(api, '_issue_novaclient_command', + _fake_issue_novaclient_command) + self.stubs.Set(api.novaclient, 'Client', FakeNovaClientWithFailure) + + @api.reroute_compute("get") + def do_get(self, context, uuid): + pass + + try: + do_get(None, {}, FAKE_UUID) + except api.RedirectResult, e: + results = e.results + self.assertIn('server', results) + self.assertEqual(results['server']['id'], FAKE_UUID) + self.assertEqual(results['server']['test'], '1234') + except Exception, e: + self.fail(_("RedirectResult should have been raised")) + else: + self.fail(_("RedirectResult should have been raised")) + + def test_zones_up_no_instances(self): + + def _fake_issue_novaclient_command(nova, zone, *args, **kwargs): + return None + + class FakeNovaClientNoFailure(object): + def __init__(self, username, password, method, api_url): + pass + + def authenticate(self): + return + + self.stubs.Set(api, '_issue_novaclient_command', + _fake_issue_novaclient_command) + self.stubs.Set(api.novaclient, 'Client', FakeNovaClientNoFailure) + + @api.reroute_compute("get") + def do_get(self, context, uuid): + pass + + self.assertRaises(exception.InstanceNotFound, + do_get, None, {}, FAKE_UUID) + class FakeServerCollection(object): def get(self, instance_id): @@ -1097,7 +1188,7 @@ class FakeEmptyServerCollection(object): class FakeNovaClient(object): - def __init__(self, collection): + def __init__(self, collection, *args, **kwargs): self.servers = collection @@ -1162,8 +1253,9 @@ class CallZoneMethodTest(test.TestCase): context = {} method = 'do_something' results = api.call_zone_method(context, method) - expected = [(1, 42)] - self.assertEqual(expected, results) + self.assertEqual(len(results), 2) + self.assertIn((1, 42), results) + self.assertIn((2, 42), results) def test_call_zone_method_not_present(self): context = {} -- cgit From 98a5e149395ee58d1830d4671e2c70aaf0008585 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 8 Sep 2011 04:45:04 -0700 Subject: fixes vncproxy service listening on rabbit --- bin/nova-vncproxy | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/bin/nova-vncproxy b/bin/nova-vncproxy index dc08e2433..8e75451cb 100755 --- a/bin/nova-vncproxy +++ b/bin/nova-vncproxy @@ -107,10 +107,13 @@ if __name__ == "__main__": else: with_auth = auth.VNCNovaAuthMiddleware(with_logging) - server = wsgi.Server("VNC Proxy", - with_auth, - host=FLAGS.vncproxy_host, - port=FLAGS.vncproxy_port) - server.start_tcp(handle_flash_socket_policy, 843, host=FLAGS.vncproxy_host) - service.serve(server) + wsgi_server = wsgi.Server("VNC Proxy", + with_auth, + host=FLAGS.vncproxy_host, + port=FLAGS.vncproxy_port) + wsgi_server.start_tcp(handle_flash_socket_policy, + 843, + host=FLAGS.vncproxy_host) + server = service.Service.create(binary='nova-vncproxy') + service.serve(wsgi_server, server) service.wait() -- cgit From 212ec3af4b11a770d7e4cf7869b1865b0f067e5c Mon Sep 17 00:00:00 2001 From: Chuck Short Date: Thu, 8 Sep 2011 10:52:29 -0400 Subject: Clean up shutdown of lxc containers --- nova/virt/disk.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/nova/virt/disk.py b/nova/virt/disk.py index 52b2881e8..12a3a64ca 100644 --- a/nova/virt/disk.py +++ b/nova/virt/disk.py @@ -148,16 +148,17 @@ def destroy_container(target, instance, nbd=False): LXC does not support qcow2 images yet. """ + out, err = utils.execute('mount', run_as_root=True) + for loop in out.splitlines(): + if instance['name'] in loop: + device = loop.split()[0] + try: container_dir = '%s/rootfs' % target utils.execute('umount', container_dir, run_as_root=True) - finally: - out, err = utils.execute('losetup', '-a', run_as_root=True) - for loop in out.splitlines(): - if instance['name'] in loop: - device = loop.split(loop, ':') - _unlink_device(device, nbd) - + _unlink_device(device, nbd) + except Exception, exn: + LOG.exception(_('Failed to remove container: %s'), exn) def _link_device(image, nbd): """Link image to device using loopback or nbd""" -- cgit From 6cbbdb909443a33c2af8ddd73b861cd41201fa0b Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Thu, 8 Sep 2011 11:32:11 -0400 Subject: adding can_read_deleted back to db api --- nova/db/sqlalchemy/api.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 523258841..1730b4ddb 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1277,6 +1277,10 @@ def instance_get_all_by_filters(context, filters): changes_since = filters['changes-since'] query_prefix = query_prefix.\ filter(models.Instance.updated_at > changes_since) + else: + # filter out deleted instances if no changes-since filter provided + query_prefix = query_prefix.\ + filter_by(deleted=can_read_deleted(context)) if not context.is_admin: # If we're not admin context, add appropriate filter.. -- cgit From 763bf3f1282e3d9723a356d4014a9599601637eb Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 8 Sep 2011 11:43:43 -0500 Subject: Do not attempt to mount the swap VDI for file injection. --- nova/tests/api/openstack/test_servers.py | 2 +- nova/virt/xenapi/vmops.py | 13 ++++++++++--- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 2ef687709..d063a60c2 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -3615,7 +3615,7 @@ class TestGetKernelRamdiskFromImage(test.TestCase): self.assertRaises(exception.NotFound, self._get_k_r, image_meta) def test_ami_no_ramdisk(self): - """If an ami is missing a ramdisk, return kernel ID and None for + """If an ami is missing a ramdisk, return kernel ID and None for ramdisk ID """ image_meta = {'id': 1, 'status': 'active', 'container_format': 'ami', diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index c5f105f40..bb53a52bc 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -188,9 +188,16 @@ class VMOps(object): ramdisk = VMHelper.fetch_image(context, self._session, instance, instance.ramdisk_id, instance.user_id, instance.project_id, ImageType.RAMDISK)[0] - # Create the VM ref and attach the first disk - first_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', - vdis[0]['vdi_uuid']) + + # NOTE(jk0): Since vdi_type may contain either 'os' or 'swap', we + # need to ensure that the 'swap' VDI is not chosen as the mount + # point for file injection. + first_vdi_ref = None + for vdi in vdis: + if vdi['vdi_type'] != 'swap': + # Create the VM ref and attach the first disk + first_vdi_ref = self._session.call_xenapi( + 'VDI.get_by_uuid', vdi['vdi_uuid']) vm_mode = instance.vm_mode and instance.vm_mode.lower() if vm_mode == 'pv': -- cgit From 3f6738b9f07640b0950793975cfc55e62aa3e1ad Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 8 Sep 2011 12:11:39 -0500 Subject: Use .get instead. --- nova/virt/xenapi/vmops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index bb53a52bc..9c138ee41 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -194,7 +194,7 @@ class VMOps(object): # point for file injection. first_vdi_ref = None for vdi in vdis: - if vdi['vdi_type'] != 'swap': + if vdi.get('vdi_type') != 'swap': # Create the VM ref and attach the first disk first_vdi_ref = self._session.call_xenapi( 'VDI.get_by_uuid', vdi['vdi_uuid']) -- cgit From c8a48eec1fb9f205af5cef2b882fc171bcca4d57 Mon Sep 17 00:00:00 2001 From: "Kevin L. Mitchell" Date: Thu, 8 Sep 2011 19:08:46 +0000 Subject: Add a NOTE() --- nova/scheduler/abstract_scheduler.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/nova/scheduler/abstract_scheduler.py b/nova/scheduler/abstract_scheduler.py index be6267d3a..1ba10c3a9 100644 --- a/nova/scheduler/abstract_scheduler.py +++ b/nova/scheduler/abstract_scheduler.py @@ -123,6 +123,14 @@ class AbstractScheduler(driver.Scheduler): except novaclient_exceptions.BadRequest, e: raise exception.NotAuthorized(_("Bad credentials attempting " "to talk to zone at %(url)s.") % locals()) + # NOTE(Vek): Novaclient has two different calling conventions + # for this call, depending on whether you're using + # 1.0 or 1.1 API: in 1.0, there's an ipgroups + # argument after flavor_id which isn't present in + # 1.1. To work around this, all the extra + # arguments are passed as keyword arguments + # (there's a reasonable default for ipgroups in the + # novaclient call). nova.servers.create(name, image_ref, flavor_id, meta=meta, files=files, zone_blob=child_blob, reservation_id=reservation_id) -- cgit From 4bf6508a026c62a7aa2423b1910c871ddc3f0916 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Thu, 8 Sep 2011 15:26:44 -0400 Subject: converting fix to just address ec2; updating test --- nova/api/ec2/cloud.py | 4 +++- nova/db/sqlalchemy/api.py | 4 ---- nova/tests/test_cloud.py | 4 +--- 3 files changed, 4 insertions(+), 8 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 049ca6f93..4f7030a5a 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -1200,8 +1200,10 @@ class CloudController(object): instances.append(instance) else: try: + # always filter out deleted instances + search_opts['deleted'] = False instances = self.compute_api.get_all(context, - search_opts=search_opts) + search_opts=search_opts) except exception.NotFound: instances = [] for instance in instances: diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 1730b4ddb..523258841 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1277,10 +1277,6 @@ def instance_get_all_by_filters(context, filters): changes_since = filters['changes-since'] query_prefix = query_prefix.\ filter(models.Instance.updated_at > changes_since) - else: - # filter out deleted instances if no changes-since filter provided - query_prefix = query_prefix.\ - filter_by(deleted=can_read_deleted(context)) if not context.is_admin: # If we're not admin context, add appropriate filter.. diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 3fe6a9b42..7fe353b3d 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -486,11 +486,9 @@ class CloudTestCase(test.TestCase): inst2 = db.instance_create(self.context, args2) db.instance_destroy(self.context, inst1.id) result = self.cloud.describe_instances(self.context) + self.assertEqual(len(result['reservationSet']), 1) result1 = result['reservationSet'][0]['instancesSet'] self.assertEqual(result1[0]['instanceId'], - ec2utils.id_to_ec2_id(inst1.id)) - result2 = result['reservationSet'][1]['instancesSet'] - self.assertEqual(result2[0]['instanceId'], ec2utils.id_to_ec2_id(inst2.id)) def _block_device_mapping_create(self, instance_id, mappings): -- cgit From fe355a10ad0a215eb5295e46e6c106221972e7ed Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 8 Sep 2011 13:18:08 -0700 Subject: make check for fixed_ip association more defensive --- nova/api/openstack/contrib/floating_ips.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index 9ceb5858f..d078b26c6 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -107,7 +107,7 @@ class FloatingIPController(object): context = req.environ['nova.context'] floating_ip = self.network_api.get_floating_ip(context, id) - if floating_ip['fixed_ip']: + if floating_ip.get('fixed_ip'): self.network_api.disassociate_floating_ip(context, floating_ip['address']) @@ -161,7 +161,7 @@ class Floating_ips(extensions.ExtensionDescriptor): raise webob.exc.HTTPBadRequest(explanation=msg) floating_ip = self.network_api.get_floating_ip_by_ip(context, address) - if floating_ip['fixed_ip']: + if floating_ip.get('fixed_ip'): self.network_api.disassociate_floating_ip(context, address) return webob.Response(status_int=202) -- cgit From aec647b3b42c4cd56a9509c2d1ac25ff12b0664e Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 8 Sep 2011 16:10:03 -0500 Subject: First pass at adding reboot_type to reboot codepath. --- nova/api/openstack/servers.py | 5 ++--- nova/compute/api.py | 5 +++-- nova/compute/manager.py | 4 ++-- nova/tests/api/openstack/test_servers.py | 2 +- nova/tests/test_compute.py | 15 ++++++++++++--- nova/tests/test_virt_drivers.py | 3 ++- nova/tests/test_vmwareapi.py | 3 ++- nova/virt/driver.py | 3 ++- nova/virt/fake.py | 2 +- nova/virt/hyperv.py | 2 +- nova/virt/vmwareapi_conn.py | 2 +- nova/virt/xenapi/vmops.py | 9 +++++++-- nova/virt/xenapi_conn.py | 4 ++-- 13 files changed, 38 insertions(+), 21 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index d084ac360..f5447edc5 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -334,9 +334,8 @@ class Controller(object): LOG.exception(msg) raise exc.HTTPBadRequest(explanation=msg) try: - # TODO(gundlach): pass reboot_type, support soft reboot in - # virt driver - self.compute_api.reboot(req.environ['nova.context'], id) + self.compute_api.reboot(req.environ['nova.context'], id, + reboot_type) except Exception, e: LOG.exception(_("Error in reboot %s"), e) raise exc.HTTPUnprocessableEntity() diff --git a/nova/compute/api.py b/nova/compute/api.py index 4e2944bb7..b0ea044c5 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -1042,13 +1042,14 @@ class API(base.Base): return recv_meta @scheduler_api.reroute_compute("reboot") - def reboot(self, context, instance_id): + def reboot(self, context, instance_id, reboot_type): """Reboot the given instance.""" self.update(context, instance_id, vm_state=vm_states.ACTIVE, task_state=task_states.REBOOTING) - self._cast_compute_message('reboot_instance', context, instance_id) + self._cast_compute_message('reboot_instance', context, instance_id, + reboot_type) @scheduler_api.reroute_compute("rebuild") def rebuild(self, context, instance_id, image_href, admin_password, diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 0477db745..0be12297f 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -579,7 +579,7 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock - def reboot_instance(self, context, instance_id): + def reboot_instance(self, context, instance_id, reboot_type="SOFT"): """Reboot an instance on this host.""" LOG.audit(_("Rebooting instance %s"), instance_id, context=context) context = context.elevated() @@ -601,7 +601,7 @@ class ComputeManager(manager.SchedulerDependentManager): context=context) network_info = self._get_instance_nw_info(context, instance_ref) - self.driver.reboot(instance_ref, network_info) + self.driver.reboot(instance_ref, network_info, reboot_type) current_power_state = self._get_power_state(context, instance_ref) self._instance_update(context, diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 2ef687709..d063a60c2 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -3615,7 +3615,7 @@ class TestGetKernelRamdiskFromImage(test.TestCase): self.assertRaises(exception.NotFound, self._get_k_r, image_meta) def test_ami_no_ramdisk(self): - """If an ami is missing a ramdisk, return kernel ID and None for + """If an ami is missing a ramdisk, return kernel ID and None for ramdisk ID """ image_meta = {'id': 1, 'status': 'active', 'container_format': 'ami', diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 65fdffbd6..4d463572b 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -300,11 +300,20 @@ class ComputeTestCase(test.TestCase): self.compute.resume_instance(self.context, instance_id) self.compute.terminate_instance(self.context, instance_id) - def test_reboot(self): - """Ensure instance can be rebooted""" + def test_soft_reboot(self): + """Ensure instance can be soft rebooted""" instance_id = self._create_instance() + reboot_type = "SOFT" self.compute.run_instance(self.context, instance_id) - self.compute.reboot_instance(self.context, instance_id) + self.compute.reboot_instance(self.context, instance_id, reboot_type) + self.compute.terminate_instance(self.context, instance_id) + + def test_hard_reboot(self): + """Ensure instance can be hard rebooted""" + instance_id = self._create_instance() + reboot_type = "HARD" + self.compute.run_instance(self.context, instance_id) + self.compute.reboot_instance(self.context, instance_id, reboot_type) self.compute.terminate_instance(self.context, instance_id) def test_set_admin_password(self): diff --git a/nova/tests/test_virt_drivers.py b/nova/tests/test_virt_drivers.py index 480247c91..440d3401b 100644 --- a/nova/tests/test_virt_drivers.py +++ b/nova/tests/test_virt_drivers.py @@ -103,8 +103,9 @@ class _VirtDriverTestCase(test.TestCase): def test_reboot(self): instance_ref = test_utils.get_test_instance() network_info = test_utils.get_test_network_info() + reboot_type = "SOFT" self.connection.spawn(self.ctxt, instance_ref, network_info) - self.connection.reboot(instance_ref, network_info) + self.connection.reboot(instance_ref, network_info, reboot_type) @catch_notimplementederror def test_get_host_ip_addr(self): diff --git a/nova/tests/test_vmwareapi.py b/nova/tests/test_vmwareapi.py index 06daf46e8..e6da1690f 100644 --- a/nova/tests/test_vmwareapi.py +++ b/nova/tests/test_vmwareapi.py @@ -170,7 +170,8 @@ class VMWareAPIVMTestCase(test.TestCase): self._create_vm() info = self.conn.get_info(1) self._check_vm_info(info, power_state.RUNNING) - self.conn.reboot(self.instance, self.network_info) + reboot_type = "SOFT" + self.conn.reboot(self.instance, self.network_info, reboot_type) info = self.conn.get_info(1) self._check_vm_info(info, power_state.RUNNING) diff --git a/nova/virt/driver.py b/nova/virt/driver.py index d05b51bd9..301346c6b 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -165,12 +165,13 @@ class ComputeDriver(object): # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() - def reboot(self, instance, network_info): + def reboot(self, instance, network_info, reboot_type): """Reboot the specified instance. :param instance: Instance object as returned by DB layer. :param network_info: :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` + :param reboot_type: Either a HARD or SOFT reboot """ # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() diff --git a/nova/virt/fake.py b/nova/virt/fake.py index d5e2bf31b..3596d8353 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -103,7 +103,7 @@ class FakeConnection(driver.ComputeDriver): if not instance['name'] in self.instances: raise exception.InstanceNotRunning() - def reboot(self, instance, network_info): + def reboot(self, instance, network_info, reboot_type): pass def get_host_ip_addr(self): diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py index 03a78db1f..76925b405 100644 --- a/nova/virt/hyperv.py +++ b/nova/virt/hyperv.py @@ -367,7 +367,7 @@ class HyperVConnection(driver.ComputeDriver): wmi_obj.Properties_.Item(prop).Value return newinst - def reboot(self, instance, network_info): + def reboot(self, instance, network_info, reboot_type): """Reboot the specified instance.""" vm = self._lookup(instance.name) if vm is None: diff --git a/nova/virt/vmwareapi_conn.py b/nova/virt/vmwareapi_conn.py index 243ee64f5..fa89a8f45 100644 --- a/nova/virt/vmwareapi_conn.py +++ b/nova/virt/vmwareapi_conn.py @@ -133,7 +133,7 @@ class VMWareESXConnection(driver.ComputeDriver): """Create snapshot from a running VM instance.""" self._vmops.snapshot(context, instance, name) - def reboot(self, instance, network_info): + def reboot(self, instance, network_info, reboot_type): """Reboot VM instance.""" self._vmops.reboot(instance, network_info) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index c5f105f40..b7d6a40b4 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -617,10 +617,15 @@ class VMOps(object): str(new_disk_size)) LOG.debug(_("Resize instance %s complete") % (instance.name)) - def reboot(self, instance): + def reboot(self, instance, reboot_type): """Reboot VM instance.""" vm_ref = self._get_vm_opaque_ref(instance) - task = self._session.call_xenapi('Async.VM.clean_reboot', vm_ref) + + if reboot_type == "HARD": + task = self._session.call_xenapi('Async.VM.hard_reboot', vm_ref) + else: + task = self._session.call_xenapi('Async.VM.clean_reboot', vm_ref) + self._session.wait_for_task(task, instance.id) def get_agent_version(self, instance, timeout=None): diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 0d23e7689..f6dbc19f8 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -203,9 +203,9 @@ class XenAPIConnection(driver.ComputeDriver): """ Create snapshot from a running VM instance """ self._vmops.snapshot(context, instance, image_id) - def reboot(self, instance, network_info): + def reboot(self, instance, network_info, reboot_type): """Reboot VM instance""" - self._vmops.reboot(instance) + self._vmops.reboot(instance, reboot_type) def set_admin_password(self, instance, new_pass): """Set the root/admin password on the VM instance""" -- cgit From a8acd5fbd55ad4cd2cd502e95b2399657fbd162f Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 8 Sep 2011 16:02:10 -0700 Subject: if no public-key is given (--key), do not show public-keys in metadata service --- nova/api/ec2/cloud.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 049ca6f93..c6f24858a 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -304,11 +304,6 @@ class CloudController(object): instance_ref = db.instance_get(ctxt, instance_ref[0]['id']) mpi = self._get_mpi_data(ctxt, instance_ref['project_id']) - if instance_ref['key_name']: - keys = {'0': {'_name': instance_ref['key_name'], - 'openssh-key': instance_ref['key_data']}} - else: - keys = '' hostname = instance_ref['hostname'] host = instance_ref['host'] availability_zone = self._get_availability_zone_by_host(ctxt, host) @@ -336,11 +331,15 @@ class CloudController(object): 'placement': {'availability-zone': availability_zone}, 'public-hostname': hostname, 'public-ipv4': floating_ip or '', - 'public-keys': keys, 'reservation-id': instance_ref['reservation_id'], 'security-groups': security_groups, 'mpi': mpi}} + # public-keys should only show up if it is non-empty (if user specified one) + if instance_ref['key_name']: + data['keys'] = {'0': {'_name': instance_ref['key_name'], + 'openssh-key': instance_ref['key_data']}} + for image_type in ['kernel', 'ramdisk']: if instance_ref.get('%s_id' % image_type): ec2_id = self.image_ec2_id(instance_ref['%s_id' % image_type], -- cgit From f2c887824cd56fe83f4db2bf94279684a1daba05 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 8 Sep 2011 17:42:49 -0700 Subject: metadata key is 'public-keys', not 'keys' --- nova/api/ec2/cloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index c6f24858a..c8ff47864 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -337,7 +337,7 @@ class CloudController(object): # public-keys should only show up if it is non-empty (if user specified one) if instance_ref['key_name']: - data['keys'] = {'0': {'_name': instance_ref['key_name'], + data['public-keys'] = {'0': {'_name': instance_ref['key_name'], 'openssh-key': instance_ref['key_data']}} for image_type in ['kernel', 'ramdisk']: -- cgit From 3202b7a9193796170fbb25a793e40ff14f9b9621 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 8 Sep 2011 18:02:02 -0700 Subject: put key into meta-data, not top level 'data' --- nova/api/ec2/cloud.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index c8ff47864..bb860a131 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -337,8 +337,9 @@ class CloudController(object): # public-keys should only show up if it is non-empty (if user specified one) if instance_ref['key_name']: - data['public-keys'] = {'0': {'_name': instance_ref['key_name'], - 'openssh-key': instance_ref['key_data']}} + data['meta-data']['public-keys'] = { + '0': {'_name': instance_ref['key_name'], + 'openssh-key': instance_ref['key_data']}} for image_type in ['kernel', 'ramdisk']: if instance_ref.get('%s_id' % image_type): -- cgit From b890b992f3013a1959e3c3cdf1f149cacf4e569b Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Thu, 8 Sep 2011 21:30:21 -0400 Subject: Fixing security groups stuff --- nova/api/openstack/servers.py | 20 ++++------ .../api/openstack/contrib/test_createserverext.py | 6 ++- nova/tests/api/openstack/test_versions.py | 43 +++++++--------------- 3 files changed, 26 insertions(+), 43 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 6159041e8..7532313e5 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -920,6 +920,13 @@ class ServerXMLSerializer(wsgi.XMLDictSerializer): addresses_elem = self._create_addresses_node( server_dict.get('addresses', {})) server_elem.append(addresses_elem) + groups = server_dict.get('security_groups') + if groups: + groups_elem = etree.SubElement(server_elem, 'security_groups') + for group in groups: + group_elem = etree.SubElement(groups_elem, + 'security_group') + group_elem.set('name', group['name']) for link in server_dict.get('links', []): elem = etree.SubElement(server_elem, @@ -963,19 +970,6 @@ class ServerXMLSerializer(wsgi.XMLDictSerializer): self._populate_server(server, server_dict['server'], True) return self._to_xml(server) - def _security_group_to_xml(self, doc, security_group): - node = doc.createElement('security_group') - node.setAttribute('name', str(security_group.get('name'))) - return node - - def _create_security_groups_node(self, xml_doc, security_groups): - security_groups_node = xml_doc.createElement('security_groups') - if security_groups: - for security_group in security_groups: - node = self._security_group_to_xml(xml_doc, security_group) - security_groups_node.appendChild(node) - return security_groups_node - def create_resource(version='1.0'): controller = { diff --git a/nova/tests/api/openstack/contrib/test_createserverext.py b/nova/tests/api/openstack/contrib/test_createserverext.py index 078b72d67..03c7d1ec5 100644 --- a/nova/tests/api/openstack/contrib/test_createserverext.py +++ b/nova/tests/api/openstack/contrib/test_createserverext.py @@ -49,9 +49,13 @@ INSTANCE = { "id": 1, "display_name": "test_server", "uuid": FAKE_UUID, + "user_id": 'fake_user_id', + "tenant_id": 'fake_tenant_id', "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0), "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0), - "security_groups": [{"id": 1, "name": "test"}] + "security_groups": [{"id": 1, "name": "test"}], + "image_ref": 'http://foo.com/123', + "instance_type": {"flavorid": '124'}, } diff --git a/nova/tests/api/openstack/test_versions.py b/nova/tests/api/openstack/test_versions.py index 8b60db71e..686752509 100644 --- a/nova/tests/api/openstack/test_versions.py +++ b/nova/tests/api/openstack/test_versions.py @@ -23,10 +23,11 @@ from lxml import etree from nova import context from nova import test -from nova.tests.api.openstack import fakes from nova.api.openstack import versions from nova.api.openstack import views from nova.api.openstack import wsgi +from nova.tests.api.openstack import common +from nova.tests.api.openstack import fakes NS = { 'atom': 'http://www.w3.org/2005/Atom', @@ -243,12 +244,12 @@ class VersionsTest(test.TestCase): self.assertTrue(version.xpath('/ns:version', namespaces=NS)) media_types = version.xpath('ns:media-types/ns:media-type', namespaces=NS) - self.assertTrue(_compare_media_types(media_types, + self.assertTrue(common.compare_media_types(media_types, expected['media-types'])) for key in ['id', 'status', 'updated']: self.assertEqual(version.get(key), expected[key]) links = version.xpath('atom:link', namespaces=NS) - self.assertTrue(_compare_links(links, + self.assertTrue(common.compare_links(links, [{'rel': 'self', 'href': 'http://localhost/v1.0/'}] + expected['links'])) @@ -264,12 +265,12 @@ class VersionsTest(test.TestCase): self.assertTrue(version.xpath('/ns:version', namespaces=NS)) media_types = version.xpath('ns:media-types/ns:media-type', namespaces=NS) - self.assertTrue(_compare_media_types(media_types, + self.assertTrue(common.compare_media_types(media_types, expected['media-types'])) for key in ['id', 'status', 'updated']: self.assertEqual(version.get(key), expected[key]) links = version.xpath('atom:link', namespaces=NS) - self.assertTrue(_compare_links(links, + self.assertTrue(common.compare_links(links, [{'rel': 'self', 'href': 'http://localhost/v1.1/'}] + expected['links'])) @@ -291,7 +292,7 @@ class VersionsTest(test.TestCase): for key in ['id', 'status', 'updated']: self.assertEqual(version.get(key), expected[key]) (link,) = version.xpath('atom:link', namespaces=NS) - self.assertTrue(_compare_links(link, + self.assertTrue(common.compare_links(link, [{'rel': 'self', 'href': 'http://localhost/%s/' % v}])) def test_get_version_1_0_detail_atom(self): @@ -496,10 +497,10 @@ class VersionsTest(test.TestCase): self.assertEqual(version.get('status'), 'CURRENT') media_types = version.xpath('ns:media-types/ns:media-type', namespaces=NS) - self.assertTrue(_compare_media_types(media_types, + self.assertTrue(common.compare_media_types(media_types, VERSIONS['v1.1']['media-types'])) links = version.xpath('atom:link', namespaces=NS) - self.assertTrue(_compare_links(links, + self.assertTrue(common.compare_links(links, [{'rel': 'self', 'href': 'http://localhost/v1.1/images/1'}])) version = versions[1] @@ -507,10 +508,10 @@ class VersionsTest(test.TestCase): self.assertEqual(version.get('status'), 'DEPRECATED') media_types = version.xpath('ns:media-types/ns:media-type', namespaces=NS) - self.assertTrue(_compare_media_types(media_types, + self.assertTrue(common.compare_media_types(media_types, VERSIONS['v1.0']['media-types'])) links = version.xpath('atom:link', namespaces=NS) - self.assertTrue(_compare_links(links, + self.assertTrue(common.compare_links(links, [{'rel': 'self', 'href': 'http://localhost/v1.0/images/1'}])) def test_multi_choice_server_atom(self): @@ -654,7 +655,7 @@ class VersionsSerializerTests(test.TestCase): versions_data['versions'][0]['status']) (link,) = version.xpath('atom:link', namespaces=NS) - self.assertTrue(_compare_links(link, [{ + self.assertTrue(common.compare_links(link, [{ 'rel': 'self', 'href': 'http://test/2.7.1', 'type': 'application/atom+xml'}])) @@ -693,11 +694,11 @@ class VersionsSerializerTests(test.TestCase): media_types = version.xpath('ns:media-types/ns:media-type', namespaces=NS) - self.assertTrue(_compare_media_types(media_types, + self.assertTrue(common.compare_media_types(media_types, versions_data['choices'][0]['media-types'])) (link,) = version.xpath('atom:link', namespaces=NS) - self.assertTrue(_compare_links(link, + self.assertTrue(common.compare_links(link, versions_data['choices'][0]['links'])) def test_version_detail_xml_serializer(self): @@ -882,19 +883,3 @@ class VersionsSerializerTests(test.TestCase): 'servers/api/v1.1/application.wadl', }, ]) - - -def _compare_links(actual, expected): - return _compare_tree_to_dict(actual, expected, ('rel', 'href', 'type')) - - -def _compare_media_types(actual, expected): - return _compare_tree_to_dict(actual, expected, ('base', 'type')) - - -def _compare_tree_to_dict(actual, expected, keys): - for elem, data in zip(actual, expected): - for key in keys: - if elem.get(key) != data.get(key): - return False - return True -- cgit From 13ee200d8265175922b5747f9e00fc31db4803fd Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Thu, 8 Sep 2011 21:37:40 -0400 Subject: pep 8 --- nova/tests/api/openstack/test_servers.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index c3203eb43..9d0f9c4f7 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -496,7 +496,8 @@ class ServersTest(test.TestCase): self.assertEqual(image.get('id'), str(expected['image']['id'])) links = root.xpath('ns:image/atom:link', namespaces=XPATH_NS) - self.assertTrue(common.compare_links(links, expected['image']['links'])) + self.assertTrue(common.compare_links(links, + expected['image']['links'])) (flavor,) = root.xpath('ns:flavor', namespaces=XPATH_NS) self.assertEqual(flavor.get('id'), str(expected['flavor']['id'])) @@ -3663,7 +3664,7 @@ class TestGetKernelRamdiskFromImage(test.TestCase): self.assertRaises(exception.NotFound, self._get_k_r, image_meta) def test_ami_no_ramdisk(self): - """If an ami is missing a ramdisk, return kernel ID and None for + """If an ami is missing a ramdisk, return kernel ID and None for ramdisk ID """ image_meta = {'id': 1, 'status': 'active', 'container_format': 'ami', -- cgit From 4ffe41dffaab64c96649bfc3236e5ba6bb9d8b37 Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Fri, 9 Sep 2011 09:27:26 +0200 Subject: Open Essex (switch version to 2012.1) --- nova/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/version.py b/nova/version.py index 1f8d08e8c..06810df46 100644 --- a/nova/version.py +++ b/nova/version.py @@ -22,7 +22,7 @@ except ImportError: 'revno': 0} -NOVA_VERSION = ['2011', '3'] +NOVA_VERSION = ['2012', '1'] YEAR, COUNT = NOVA_VERSION FINAL = False # This becomes true at Release Candidate time -- cgit From 7352e3e1eb7a4d29b556492a208e80439828f211 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Fri, 9 Sep 2011 09:48:38 -0400 Subject: removing key_name and config_drive from non-detailed server entity --- nova/api/openstack/views/servers.py | 7 ++----- nova/tests/api/openstack/test_servers.py | 4 ---- 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index ac09b5864..473dc9e7e 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -145,6 +145,8 @@ class ViewBuilderV11(ViewBuilder): response['server']['accessIPv4'] = inst.get('access_ip_v4') or "" response['server']['accessIPv6'] = inst.get('access_ip_v6') or "" + response['server']['key_name'] = inst.get('key_name', '') + response['server']['config_drive'] = inst.get('config_drive') return response @@ -185,8 +187,6 @@ class ViewBuilderV11(ViewBuilder): def _build_extra(self, response, inst): self._build_links(response, inst) response['uuid'] = inst['uuid'] - response['key_name'] = inst.get('key_name', '') - self._build_config_drive(response, inst) def _build_links(self, response, inst): href = self.generate_href(inst["id"]) @@ -205,9 +205,6 @@ class ViewBuilderV11(ViewBuilder): response["links"] = links - def _build_config_drive(self, response, inst): - response['config_drive'] = inst.get('config_drive') - def generate_href(self, server_id): """Create an url that refers to a specific server id.""" return os.path.join(self.base_url, self.project_id, diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index d063a60c2..f0a1c5ce5 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -3715,7 +3715,6 @@ class ServersViewBuilderV11Test(test.TestCase): "id": 1, "uuid": self.instance['uuid'], "name": "test_server", - "key_name": '', "links": [ { "rel": "self", @@ -3726,7 +3725,6 @@ class ServersViewBuilderV11Test(test.TestCase): "href": "http://localhost/servers/1", }, ], - "config_drive": None, } } @@ -3739,8 +3737,6 @@ class ServersViewBuilderV11Test(test.TestCase): "id": 1, "uuid": self.instance['uuid'], "name": "test_server", - "key_name": '', - "config_drive": None, "links": [ { "rel": "self", -- cgit From 9dd2d6c49a36c1834d0ef842c47d2ef400642ff2 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 9 Sep 2011 08:59:02 -0700 Subject: remove sudo from qemu-img commands --- nova/virt/libvirt/connection.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 363a20ed0..19cef5ad7 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -1696,7 +1696,7 @@ class LibvirtConnection(driver.ComputeDriver): base = os.path.basename(info['path']) # Get image type and create empty disk image. instance_disk = os.path.join(instance_dir, base) - utils.execute('sudo', 'qemu-img', 'create', '-f', info['type'], + utils.execute('qemu-img', 'create', '-f', info['type'], instance_disk, info['local_gb']) # if image has kernel and ramdisk, just download @@ -1788,7 +1788,7 @@ class LibvirtConnection(driver.ComputeDriver): if disk_type == 'raw': size = int(os.path.getsize(path)) else: - out, err = utils.execute('sudo', 'qemu-img', 'info', path) + out, err = utils.execute('qemu-img', 'info', path) size = [i.split('(')[1].split()[0] for i in out.split('\n') if i.strip().find('virtual size') >= 0] size = int(size[0]) -- cgit From 5ddfd1c1add955aa14c5e5174b1942eb8f748031 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 9 Sep 2011 10:20:36 -0700 Subject: shorten comment to < 79 chars --- nova/api/ec2/cloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index bb860a131..eafecbca8 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -335,7 +335,7 @@ class CloudController(object): 'security-groups': security_groups, 'mpi': mpi}} - # public-keys should only show up if it is non-empty (if user specified one) + # public-keys should be in meta-data only if user specified one if instance_ref['key_name']: data['meta-data']['public-keys'] = { '0': {'_name': instance_ref['key_name'], -- cgit From d05d4e77df0bdfd2b802186762391d7f91361701 Mon Sep 17 00:00:00 2001 From: "Kevin L. Mitchell" Date: Fri, 9 Sep 2011 14:35:38 -0500 Subject: Add comment to document why random.shuffle() works --- nova/scheduler/abstract_scheduler.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/nova/scheduler/abstract_scheduler.py b/nova/scheduler/abstract_scheduler.py index b4c2bf4f1..e4f615b94 100644 --- a/nova/scheduler/abstract_scheduler.py +++ b/nova/scheduler/abstract_scheduler.py @@ -296,6 +296,13 @@ class AbstractScheduler(driver.Scheduler): "child_blob": weighting["blob"]} weighted_hosts.append(host_dict) if FLAGS.spread_first: + # NOTE(Vek): If all the weights are unique, then the sort + # below undoes this shuffle; however, if + # several responses from several zones have the + # same weight, then this shuffle serves to + # break up the monolithic blocks and cause the + # instances to be uniformly spread across the + # zones. random.shuffle(weighted_hosts) weighted_hosts.sort(key=operator.itemgetter('weight')) return weighted_hosts -- cgit From c0700ea7bbb4d860610b71e635b8dbde19157e85 Mon Sep 17 00:00:00 2001 From: "Kevin L. Mitchell" Date: Fri, 9 Sep 2011 20:27:22 +0000 Subject: don't need random in abstract_scheduler.py anymore... --- nova/scheduler/abstract_scheduler.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/scheduler/abstract_scheduler.py b/nova/scheduler/abstract_scheduler.py index 76329bbc6..6e8c7d715 100644 --- a/nova/scheduler/abstract_scheduler.py +++ b/nova/scheduler/abstract_scheduler.py @@ -22,7 +22,6 @@ behavior is to simply select all hosts and weight them the same. import json import operator -import random import M2Crypto -- cgit From 65a0cc41b1b9ead5acd3128a4a6202bb02e3a6e5 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Fri, 9 Sep 2011 17:25:45 -0400 Subject: fixing image status mapping --- nova/api/openstack/views/images.py | 15 ++++++++------- nova/tests/api/openstack/test_images.py | 10 +++++----- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/nova/api/openstack/views/images.py b/nova/api/openstack/views/images.py index 21f1b2d3e..8983b2957 100644 --- a/nova/api/openstack/views/images.py +++ b/nova/api/openstack/views/images.py @@ -37,17 +37,18 @@ class ViewBuilder(object): def _format_status(self, image): """Update the status field to standardize format.""" status_mapping = { - 'pending': 'QUEUED', - 'decrypting': 'PREPARING', - 'untarring': 'SAVING', - 'available': 'ACTIVE', - 'killed': 'FAILED', + 'active': 'ACTIVE', + 'queued': 'SAVING', + 'saving': 'SAVING', + 'deleted': 'DELETED', + 'pending_delete': 'DELETED', + 'killed': 'ERROR', } try: - image['status'] = status_mapping[image['status']].upper() + image['status'] = status_mapping[image['status']] except KeyError: - image['status'] = image['status'].upper() + image['status'] = 'UNKNOWN' def _build_server(self, image, image_obj): """Indicates that you must use a ViewBuilder subclass.""" diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 2a7cfc382..46f763d5e 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -407,7 +407,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): "name": "queued snapshot", "updated": self.NOW_API_FORMAT, "created": self.NOW_API_FORMAT, - "status": "QUEUED", + "status": "SAVING", "progress": 0, 'server': { 'id': 42, @@ -603,7 +603,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): 'name': 'queued snapshot', 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, - 'status': 'QUEUED', + 'status': 'SAVING', 'progress': 0, }, { @@ -627,7 +627,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): 'name': 'killed snapshot', 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, - 'status': 'FAILED', + 'status': 'ERROR', 'progress': 0, }, { @@ -676,7 +676,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): }, 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, - 'status': 'QUEUED', + 'status': 'SAVING', 'progress': 0, 'server': { 'id': 42, @@ -769,7 +769,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): }, 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, - 'status': 'FAILED', + 'status': 'ERROR', 'progress': 0, 'server': { 'id': 42, -- cgit From c3cb1d38ca4a6f3308503c79e13e3e8688143163 Mon Sep 17 00:00:00 2001 From: Chuck Short Date: Fri, 9 Sep 2011 20:47:37 -0400 Subject: Fix spelling mistake --- nova/virt/libvirt/connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 6ae458537..7c1edc373 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -196,7 +196,7 @@ class LibvirtConnection(driver.ComputeDriver): def _test_connection(self): try: - self._wrapped_conn.geCapabilities() + self._wrapped_conn.getCapabilities() return True except libvirt.libvirtError as e: if e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and \ -- cgit From 248787462473195ab35591946ed6e3f0e2a818b0 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Sat, 10 Sep 2011 17:08:43 +0900 Subject: virt/libvirt: format ephemeral device and add fs label when formating ext3 fs his patch fixes the but reported by ttps://bugs.launchpad.net/bugs/827590 ttps://bugs.launchpad.net/nova/+bug/828357 The ephemeral device is formated as ext3 on Amazon ec2. The new options, vir_mkfs, is introduced. virt_mkfs use the format of = --- nova/virt/disk.py | 41 +++++++++++++++++++++++++++++++++++++++++ nova/virt/libvirt/connection.py | 13 +++++++++++-- 2 files changed, 52 insertions(+), 2 deletions(-) diff --git a/nova/virt/disk.py b/nova/virt/disk.py index 52b2881e8..2c994626c 100644 --- a/nova/virt/disk.py +++ b/nova/virt/disk.py @@ -52,6 +52,47 @@ flags.DEFINE_integer('timeout_nbd', 10, flags.DEFINE_integer('max_nbd_devices', 16, 'maximum number of possible nbd devices') +# NOTE(yamahata): DEFINE_list() doesn't work because the command may +# include ','. For example, +# mkfs.ext3 -O dir_index,extent -E stride=8,stripe-width=16 +# --label %(fs_label)s %(target)s +# +# DEFINE_list() parses its argument by +# [s.strip() for s in argument.split(self._token)] +# where self._token = ',' +# No escape nor exceptional handling for ','. +# DEFINE_list() doesn't give us what we need. +flags.DEFINE_multistring('virt_mkfs', + ['windows=mkfs.ntfs --fast --label %(fs_label)s ' + '%(target)s', + # NOTE(yamahata): vfat case + #'windows=mkfs.vfat -n %(fs_label)s %(target)s', + 'linux=mkfs.ext3 -L %(fs_label)s -F %(target)s', + 'default=mkfs.ext3 -L %(fs_label)s -F %(target)s'], + 'mkfs commands for ephemeral device. The format is' + '=') + + +_MKFS_COMMAND = {} +_DEFAULT_MKFS_COMMAND = None + + +for s in FLAGS.virt_mkfs: + # NOTE(yamahata): mkfs command may includes '=' for its options. + # So item.partition('=') doesn't work here + os_type, mkfs_command = s.split('=', 1) + if os_type: + _MKFS_COMMAND[os_type] = mkfs_command + if os_type == 'default': + _DEFAULT_MKFS_COMMAND = mkfs_command + + +def mkfs(os_type, fs_label, target): + mkfs_command = (_MKFS_COMMAND.get(os_type, _DEFAULT_MKFS_COMMAND) or + '') % locals() + if mkfs_command: + utils.execute(*mkfs_command.split()) + def extend(image, size): """Increase image to size""" diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 19cef5ad7..6b740d995 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -38,6 +38,7 @@ Supports KVM, LXC, QEMU, UML, and XEN. """ import hashlib +import functools import multiprocessing import netaddr import os @@ -778,6 +779,10 @@ class LibvirtConnection(driver.ComputeDriver): if fs_format: utils.execute('mkfs', '-t', fs_format, target) + def _create_ephemeral(self, target, local_size, fs_label, os_type): + self._create_local(target, local_size) + disk.mkfs(os_type, fs_label, target) + def _create_swap(self, target, swap_gb): """Create a swap file of specified size""" self._create_local(target, swap_gb) @@ -866,9 +871,13 @@ class LibvirtConnection(driver.ComputeDriver): local_size=local_gb) for eph in driver.block_device_info_get_ephemerals(block_device_info): - self._cache_image(fn=self._create_local, + fn = functools.partial(self._create_ephemeral, + fs_label='ephemeral%d' % eph['num'], + os_type=inst.os_type) + self._cache_image(fn=fn, target=basepath(_get_eph_disk(eph)), - fname="local_%s" % eph['size'], + fname="ephemeral_%s_%s_%s" % + (eph['num'], eph['size'], inst.os_type), cow=FLAGS.use_cow_images, local_size=eph['size']) -- cgit From c890890c7ccbc7df1060d59747089b5e39c5510a Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Sat, 10 Sep 2011 17:11:21 +0900 Subject: api/ec2: make get_metadata() return correct mappings The entries corresponding to volumes are in the form of ebs': --- nova/api/ec2/cloud.py | 20 ++++++++++++++++++-- nova/tests/test_cloud.py | 4 +++- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 4f7030a5a..50c551f86 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -272,11 +272,17 @@ class CloudController(object): mappings = {} mappings['ami'] = block_device.strip_dev(root_device_name) mappings['root'] = root_device_name + ebs_devices = [] - # 'ephemeralN' and 'swap' + # 'ephemeralN', 'swap' and ebs for bdm in db.block_device_mapping_get_all_by_instance( ctxt, instance_ref['id']): - if (bdm['volume_id'] or bdm['snapshot_id'] or bdm['no_device']): + if bdm['no_device']: + continue + + # ebs volume case + if (bdm['volume_id'] or bdm['snapshot_id']): + ebs_devices.append(bdm['device_name']) continue virtual_name = bdm['virtual_name'] @@ -286,6 +292,16 @@ class CloudController(object): if block_device.is_swap_or_ephemeral(virtual_name): mappings[virtual_name] = bdm['device_name'] + # NOTE(yamahata): I'm not sure how ebs device should be numbered. + # Right now sort by device name for deterministic + # result. + if ebs_devices: + nebs = 0 + ebs_devices.sort() + for ebs in ebs_devices: + mappings['ebs%d' % nebs] = ebs + nebs += 1 + return mappings def get_metadata(self, address): diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 7fe353b3d..7bdae0552 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -1540,7 +1540,9 @@ class CloudTestCase(test.TestCase): 'ephemeral0': '/dev/sdb', 'swap': '/dev/sdc', 'ephemeral1': '/dev/sdd', - 'ephemeral2': '/dev/sd3'} + 'ephemeral2': '/dev/sd3', + 'ebs0': '/dev/sdh', + 'ebs1': '/dev/sdi'} self.assertEqual(self.cloud._format_instance_mapping(ctxt, instance_ref0), -- cgit From d8abe79da8dde2667936ee97d88d30d5cf0e6d7f Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Sat, 10 Sep 2011 17:11:31 +0900 Subject: api/ec2/ebs: make metadata returns correct swap and ephemeral0 --- nova/api/ec2/cloud.py | 6 +++ .../migrate_repo/versions/046_add_instance_swap.py | 48 ++++++++++++++++++++++ nova/db/sqlalchemy/models.py | 2 + nova/virt/libvirt/connection.py | 8 ++++ 4 files changed, 64 insertions(+) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/046_add_instance_swap.py diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 50c551f86..0ad2d94f3 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -272,6 +272,12 @@ class CloudController(object): mappings = {} mappings['ami'] = block_device.strip_dev(root_device_name) mappings['root'] = root_device_name + default_local_device = instance_ref.get('default_local_device') + if default_local_device: + mappings['ephemeral0'] = default_local_device + default_swap_device = instance_ref.get('default_swap_device') + if default_swap_device: + mappings['swap'] = default_swap_device ebs_devices = [] # 'ephemeralN', 'swap' and ebs diff --git a/nova/db/sqlalchemy/migrate_repo/versions/046_add_instance_swap.py b/nova/db/sqlalchemy/migrate_repo/versions/046_add_instance_swap.py new file mode 100644 index 000000000..63e7bc4f9 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/046_add_instance_swap.py @@ -0,0 +1,48 @@ +# Copyright 2011 Isaku Yamahata +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, Table, String + +meta = MetaData() + +default_local_device = Column( + 'default_local_device', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True) + +default_swap_device = Column( + 'default_swap_device', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True) + +instances = Table('instances', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + instances.create_column(default_local_device) + instances.create_column(default_swap_device) + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + meta.bind = migrate_engine + instances.drop_column('default_swap_device') + instances.drop_column('default_local_device') diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 211049112..b5f30a1e3 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -232,6 +232,8 @@ class Instance(BASE, NovaBase): uuid = Column(String(36)) root_device_name = Column(String(255)) + default_local_device = Column(String(255), nullable=True) + default_swap_device = Column(String(255), nullable=True) config_drive = Column(String(255)) # User editable field meant to represent what ip should be used diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 6b740d995..2a6f75d35 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -1111,6 +1111,11 @@ class LibvirtConnection(driver.ComputeDriver): nova_context.get_admin_context(), instance['id'], {'root_device_name': '/dev/' + self.default_root_device}) + if local_device: + db.instance_update( + nova_context.get_admin_context(), instance['id'], + {'default_local_device': '/dev/' + self.default_local_device}) + swap = driver.block_device_info_get_swap(block_device_info) if driver.swap_is_usable(swap): xml_info['swap_device'] = block_device.strip_dev( @@ -1119,6 +1124,9 @@ class LibvirtConnection(driver.ComputeDriver): not self._volume_in_mapping(self.default_swap_device, block_device_info)): xml_info['swap_device'] = self.default_swap_device + db.instance_update( + nova_context.get_admin_context(), instance['id'], + {'default_swap_device': '/dev/' + self.default_swap_device}) config_drive = False if instance.get('config_drive') or instance.get('config_drive_id'): -- cgit From 9482275a60ab8caa546ec402f61c60b9f5e7e33f Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Sat, 10 Sep 2011 13:56:54 -0400 Subject: Update GlanceClient, GlanceImageService, and Glance Xen plugin to work with Glance keystone. --- nova/api/auth.py | 1 + nova/api/openstack/create_instance_helper.py | 3 +- nova/compute/api.py | 3 +- nova/compute/manager.py | 3 +- nova/context.py | 9 ++- nova/image/__init__.py | 56 +------------ nova/image/glance.py | 91 ++++++++++++++++------ nova/tests/api/openstack/fakes.py | 2 +- nova/tests/glance/stubs.py | 6 +- nova/tests/integrated/integrated_helpers.py | 2 +- nova/tests/test_xenapi.py | 3 +- nova/virt/images.py | 3 +- nova/virt/libvirt/connection.py | 4 +- nova/virt/vmwareapi/fake.py | 6 +- nova/virt/vmwareapi/vmops.py | 4 +- nova/virt/vmwareapi/vmware_images.py | 69 +++------------- nova/virt/xenapi/vm_utils.py | 17 ++-- nova/virt/xenapi/vmops.py | 4 +- plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 6 +- 19 files changed, 124 insertions(+), 168 deletions(-) diff --git a/nova/api/auth.py b/nova/api/auth.py index cd0d38b3f..f73cae01e 100644 --- a/nova/api/auth.py +++ b/nova/api/auth.py @@ -70,6 +70,7 @@ class KeystoneContext(wsgi.Middleware): project_id, roles=roles, auth_token=auth_token, + strategy='keystone', remote_address=remote_address) req.environ['nova.context'] = ctx diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index 67e669c17..e27ddf78b 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -92,7 +92,8 @@ class CreateInstanceHelper(object): if str(image_href).startswith(req.application_url): image_href = image_href.split('/').pop() try: - image_service, image_id = nova.image.get_image_service(image_href) + image_service, image_id = nova.image.get_image_service(context, + image_href) kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( req, image_service, image_id) images = set([str(x['id']) for x in image_service.index(context)]) diff --git a/nova/compute/api.py b/nova/compute/api.py index 4e2944bb7..95b4f5dea 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -202,7 +202,8 @@ class API(base.Base): self._check_injected_file_quota(context, injected_files) self._check_requested_networks(context, requested_networks) - (image_service, image_id) = nova.image.get_image_service(image_href) + (image_service, image_id) = nova.image.get_image_service(context, + image_href) image = image_service.show(context, image_id) config_drive_id = None diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 0477db745..25d44e502 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -322,7 +322,8 @@ class ComputeManager(manager.SchedulerDependentManager): # used by the image service. This should be refactored to be # consistent. image_href = instance['image_ref'] - image_service, image_id = nova.image.get_image_service(image_href) + image_service, image_id = nova.image.get_image_service(context, + image_href) image_meta = image_service.show(context, image_id) try: diff --git a/nova/context.py b/nova/context.py index 5c22641a0..de5b791c4 100644 --- a/nova/context.py +++ b/nova/context.py @@ -32,7 +32,7 @@ class RequestContext(object): def __init__(self, user_id, project_id, is_admin=None, read_deleted=False, roles=None, remote_address=None, timestamp=None, - request_id=None, auth_token=None): + request_id=None, auth_token=None, strategy='noauth'): self.user_id = user_id self.project_id = project_id self.roles = roles or [] @@ -50,6 +50,7 @@ class RequestContext(object): request_id = unicode(uuid.uuid4()) self.request_id = request_id self.auth_token = auth_token + self.strategy = strategy def to_dict(self): return {'user_id': self.user_id, @@ -60,7 +61,8 @@ class RequestContext(object): 'remote_address': self.remote_address, 'timestamp': utils.strtime(self.timestamp), 'request_id': self.request_id, - 'auth_token': self.auth_token} + 'auth_token': self.auth_token, + 'strategy': self.strategy} @classmethod def from_dict(cls, values): @@ -77,7 +79,8 @@ class RequestContext(object): remote_address=self.remote_address, timestamp=self.timestamp, request_id=self.request_id, - auth_token=self.auth_token) + auth_token=self.auth_token, + strategy=self.strategy) def get_admin_context(read_deleted=False): diff --git a/nova/image/__init__.py b/nova/image/__init__.py index 5447c8a3a..307b73f01 100644 --- a/nova/image/__init__.py +++ b/nova/image/__init__.py @@ -16,70 +16,20 @@ # under the License. -from urlparse import urlparse - import nova -from nova import exception from nova import utils from nova import flags -from nova.image import glance as glance_image_service +from nova.image import glance FLAGS = flags.FLAGS -GlanceClient = utils.import_class('glance.client.Client') - - -def _parse_image_ref(image_href): - """Parse an image href into composite parts. - - :param image_href: href of an image - :returns: a tuple of the form (image_id, host, port) - :raises ValueError - - """ - o = urlparse(image_href) - port = o.port or 80 - host = o.netloc.split(':', 1)[0] - image_id = int(o.path.split('/')[-1]) - return (image_id, host, port) - - def get_default_image_service(): ImageService = utils.import_class(FLAGS.image_service) return ImageService() -# FIXME(sirp): perhaps this should be moved to nova/images/glance so that we -# keep Glance specific code together for the most part -def get_glance_client(image_href): - """Get the correct glance client and id for the given image_href. - - The image_href param can be an href of the form - http://myglanceserver:9292/images/42, or just an int such as 42. If the - image_href is an int, then flags are used to create the default - glance client. - - :param image_href: image ref/id for an image - :returns: a tuple of the form (glance_client, image_id) - - """ - image_href = image_href or 0 - if str(image_href).isdigit(): - glance_host, glance_port = \ - glance_image_service.pick_glance_api_server() - glance_client = GlanceClient(glance_host, glance_port) - return (glance_client, int(image_href)) - - try: - (image_id, host, port) = _parse_image_ref(image_href) - except ValueError: - raise exception.InvalidImageRef(image_href=image_href) - glance_client = GlanceClient(host, port) - return (glance_client, image_id) - - -def get_image_service(image_href): +def get_image_service(context, image_href): """Get the proper image_service and id for the given image_href. The image_href param can be an href of the form @@ -94,6 +44,6 @@ def get_image_service(image_href): if str(image_href).isdigit(): return (get_default_image_service(), int(image_href)) - (glance_client, image_id) = get_glance_client(image_href) + (glance_client, image_id) = glance.get_glance_client(context, image_href) image_service = nova.image.glance.GlanceImageService(glance_client) return (image_service, image_id) diff --git a/nova/image/glance.py b/nova/image/glance.py index 80abc7384..e735f4082 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -23,6 +23,7 @@ import copy import datetime import json import random +from urlparse import urlparse from glance.common import exception as glance_exception @@ -42,6 +43,35 @@ FLAGS = flags.FLAGS GlanceClient = utils.import_class('glance.client.Client') +def _parse_image_ref(image_href): + """Parse an image href into composite parts. + + :param image_href: href of an image + :returns: a tuple of the form (image_id, host, port) + :raises ValueError + + """ + o = urlparse(image_href) + port = o.port or 80 + host = o.netloc.split(':', 1)[0] + image_id = int(o.path.split('/')[-1]) + return (image_id, host, port) + + +def _create_glance_client(context, host, port): + if context.strategy == 'keystone': + # NOTE(dprince): Glance client just needs auth_tok right? Should we + # add username and tenant to the creds below? + creds={'strategy': 'keystone', + 'username': context.user_id, + 'tenant': context.project_id} + glance_client = GlanceClient(host, port, auth_tok=context.auth_token, + creds=creds) + else: + glance_client = GlanceClient(host, port) + return glance_client + + def pick_glance_api_server(): """Return which Glance API server to use for the request @@ -57,6 +87,33 @@ def pick_glance_api_server(): return host, port +def get_glance_client(context, image_href): + """Get the correct glance client and id for the given image_href. + + The image_href param can be an href of the form + http://myglanceserver:9292/images/42, or just an int such as 42. If the + image_href is an int, then flags are used to create the default + glance client. + + :param image_href: image ref/id for an image + :returns: a tuple of the form (glance_client, image_id) + + """ + image_href = image_href or 0 + if str(image_href).isdigit(): + glance_host, glance_port = pick_glance_api_server() + glance_client = _create_glance_client(context, glance_host, + glance_port) + return (glance_client, int(image_href)) + + try: + (image_id, host, port) = _parse_image_ref(image_href) + except ValueError: + raise exception.InvalidImageRef(image_href=image_href) + glance_client = _create_glance_client(context, glance_host, glance_port) + return (glance_client, image_id) + + class GlanceImageService(service.BaseImageService): """Provides storage and retrieval of disk image objects within Glance.""" @@ -71,23 +128,14 @@ class GlanceImageService(service.BaseImageService): def __init__(self, client=None): self._client = client - def _get_client(self): + def _get_client(self, context): # NOTE(sirp): we want to load balance each request across glance # servers. Since GlanceImageService is a long-lived object, `client` # is made to choose a new server each time via this property. if self._client is not None: return self._client glance_host, glance_port = pick_glance_api_server() - return GlanceClient(glance_host, glance_port) - - def _set_client(self, client): - self._client = client - - client = property(_get_client, _set_client) - - def _set_client_context(self, context): - """Sets the client's auth token.""" - self.client.set_auth_token(context.auth_token) + return _create_glance_client(context, glance_host, glance_port) def index(self, context, **kwargs): """Calls out to Glance for a list of images available.""" @@ -128,14 +176,14 @@ class GlanceImageService(service.BaseImageService): def _get_images(self, context, **kwargs): """Get image entitites from images service""" - self._set_client_context(context) # ensure filters is a dict kwargs['filters'] = kwargs.get('filters') or {} # NOTE(vish): don't filter out private images kwargs['filters'].setdefault('is_public', 'none') - return self._fetch_images(self.client.get_images_detailed, **kwargs) + client = self._get_client(context) + return self._fetch_images(client.get_images_detailed, **kwargs) def _fetch_images(self, fetch_func, **kwargs): """Paginate through results from glance server""" @@ -168,9 +216,8 @@ class GlanceImageService(service.BaseImageService): def show(self, context, image_id): """Returns a dict with image data for the given opaque image id.""" - self._set_client_context(context) try: - image_meta = self.client.get_image_meta(image_id) + image_meta = self._get_client(context).get_image_meta(image_id) except glance_exception.NotFound: raise exception.ImageNotFound(image_id=image_id) @@ -192,9 +239,9 @@ class GlanceImageService(service.BaseImageService): def get(self, context, image_id, data): """Calls out to Glance for metadata and data and writes data.""" - self._set_client_context(context) try: - image_meta, image_chunks = self.client.get_image(image_id) + client = self._get_client(context) + image_meta, image_chunks = client.get_image(image_id) except glance_exception.NotFound: raise exception.ImageNotFound(image_id=image_id) @@ -210,7 +257,6 @@ class GlanceImageService(service.BaseImageService): :raises: AlreadyExists if the image already exist. """ - self._set_client_context(context) # Translate Base -> Service LOG.debug(_('Creating image in Glance. Metadata passed in %s'), image_meta) @@ -218,7 +264,7 @@ class GlanceImageService(service.BaseImageService): LOG.debug(_('Metadata after formatting for Glance %s'), sent_service_image_meta) - recv_service_image_meta = self.client.add_image( + recv_service_image_meta = self._get_client(context).add_image( sent_service_image_meta, data) # Translate Service -> Base @@ -233,12 +279,12 @@ class GlanceImageService(service.BaseImageService): :raises: ImageNotFound if the image does not exist. """ - self._set_client_context(context) # NOTE(vish): show is to check if image is available self.show(context, image_id) image_meta = _convert_to_string(image_meta) try: - image_meta = self.client.update_image(image_id, image_meta, data) + client = self._get_client(context) + image_meta = client.update_image(image_id, image_meta, data) except glance_exception.NotFound: raise exception.ImageNotFound(image_id=image_id) @@ -251,11 +297,10 @@ class GlanceImageService(service.BaseImageService): :raises: ImageNotFound if the image does not exist. """ - self._set_client_context(context) # NOTE(vish): show is to check if image is available self.show(context, image_id) try: - result = self.client.delete_image(image_id) + result = self._get_client(context).delete_image(image_id) except glance_exception.NotFound: raise exception.ImageNotFound(image_id=image_id) return result diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 44681d395..098b1e284 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -124,7 +124,7 @@ def stub_out_key_pair_funcs(stubs, have_key_pair=True): def stub_out_image_service(stubs): - def fake_get_image_service(image_href): + def fake_get_image_service(context, image_href): return (nova.image.fake.FakeImageService(), image_href) stubs.Set(nova.image, 'get_image_service', fake_get_image_service) stubs.Set(nova.image, 'get_default_image_service', diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py index f2a19f22d..6b74e671c 100644 --- a/nova/tests/glance/stubs.py +++ b/nova/tests/glance/stubs.py @@ -16,14 +16,14 @@ import StringIO -import nova.image +from nova.image import glance def stubout_glance_client(stubs): - def fake_get_glance_client(image_href): + def fake_get_glance_client(context, image_href): image_id = int(str(image_href).split('/')[-1]) return (FakeGlance('foo'), image_id) - stubs.Set(nova.image, 'get_glance_client', fake_get_glance_client) + stubs.Set(glance, 'get_glance_client', fake_get_glance_client) class FakeGlance(object): diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py index 343190427..49de9c854 100644 --- a/nova/tests/integrated/integrated_helpers.py +++ b/nova/tests/integrated/integrated_helpers.py @@ -64,7 +64,7 @@ class _IntegratedTestBase(test.TestCase): self.flags(**f) self.flags(verbose=True) - def fake_get_image_service(image_href): + def fake_get_image_service(context, image_href): image_id = int(str(image_href).split('/')[-1]) return (nova.image.fake.FakeImageService(), image_id) self.stubs.Set(nova.image, 'get_image_service', fake_get_image_service) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 91b4161b0..4a83d139e 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -932,8 +932,9 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase): self.fake_instance.architecture = 'x86-64' def assert_disk_type(self, disk_type): + ctx = context.RequestContext('fake', 'fake') dt = vm_utils.VMHelper.determine_disk_image_type( - self.fake_instance) + self.fake_instance, ctx) self.assertEqual(disk_type, dt) def test_instance_disk(self): diff --git a/nova/virt/images.py b/nova/virt/images.py index 54c691a40..810b359d9 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -37,7 +37,8 @@ def fetch(context, image_href, path, _user_id, _project_id): # when it is added to glance. Right now there is no # auth checking in glance, so we assume that access was # checked before we got here. - (image_service, image_id) = nova.image.get_image_service(image_href) + (image_service, image_id) = nova.image.get_image_service(context, + image_href) with open(path, "wb") as image_file: metadata = image_service.get(context, image_id, image_file) return metadata diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 19cef5ad7..fd902ca4b 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -398,10 +398,10 @@ class LibvirtConnection(driver.ComputeDriver): virt_dom = self._lookup_by_name(instance['name']) (image_service, image_id) = nova.image.get_image_service( - instance['image_ref']) + context, instance['image_ref']) base = image_service.show(context, image_id) (snapshot_image_service, snapshot_image_id) = \ - nova.image.get_image_service(image_href) + nova.image.get_image_service(context, image_href) snapshot = snapshot_image_service.show(context, snapshot_image_id) metadata = {'is_public': False, diff --git a/nova/virt/vmwareapi/fake.py b/nova/virt/vmwareapi/fake.py index 4c62d18bb..0dea13aba 100644 --- a/nova/virt/vmwareapi/fake.py +++ b/nova/virt/vmwareapi/fake.py @@ -412,7 +412,7 @@ def fake_get_network(*args, **kwargs): return [{'type': 'fake'}] -def fake_fetch_image(image, instance, **kwargs): +def fake_fetch_image(context, image, instance, **kwargs): """Fakes fetch image call. Just adds a reference to the db for the file.""" ds_name = kwargs.get("datastore_name") file_path = kwargs.get("file_path") @@ -420,12 +420,12 @@ def fake_fetch_image(image, instance, **kwargs): _add_file(ds_file_path) -def fake_upload_image(image, instance, **kwargs): +def fake_upload_image(context, image, instance, **kwargs): """Fakes the upload of an image.""" pass -def fake_get_vmdk_size_and_properties(image_id, instance): +def fake_get_vmdk_size_and_properties(context, image_id, instance): """Fakes the file size and properties fetch for the image file.""" props = {"vmware_ostype": "otherGuest", "vmware_adaptertype": "lsiLogic"} diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index 07a6ba6ab..6bdc2f23a 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -157,7 +157,7 @@ class VMWareVMOps(object): repository. """ image_size, image_properties = \ - vmware_images.get_vmdk_size_and_properties( + vmware_images.get_vmdk_size_and_properties(context, instance.image_ref, instance) vmdk_file_size_in_kb = int(image_size) / 1024 os_type = image_properties.get("vmware_ostype", "otherGuest") @@ -282,6 +282,7 @@ class VMWareVMOps(object): # Upload the -flat.vmdk file whose meta-data file we just created # above vmware_images.fetch_image( + context, instance.image_ref, instance, host=self._session._host_ip, @@ -448,6 +449,7 @@ class VMWareVMOps(object): # Upload the contents of -flat.vmdk file which has the disk data. LOG.debug(_("Uploading image %s") % snapshot_name) vmware_images.upload_image( + context, snapshot_name, instance, os_type=os_type, diff --git a/nova/virt/vmwareapi/vmware_images.py b/nova/virt/vmwareapi/vmware_images.py index f5f75dae2..53f2d372e 100644 --- a/nova/virt/vmwareapi/vmware_images.py +++ b/nova/virt/vmwareapi/vmware_images.py @@ -20,15 +20,13 @@ Utility functions for Image transfer. from nova import exception from nova import flags -import nova.image +from nova.image import glance from nova import log as logging from nova.virt.vmwareapi import io_util from nova.virt.vmwareapi import read_write_util LOG = logging.getLogger("nova.virt.vmwareapi.vmware_images") -FLAGS = flags.FLAGS - QUEUE_BUFFER_SIZE = 10 @@ -87,36 +85,10 @@ def start_transfer(read_file_handle, data_size, write_file_handle=None, write_file_handle.close() -def fetch_image(image, instance, **kwargs): - """Fetch an image for attaching to the newly created VM.""" - # Depending upon the image service, make appropriate image service call - if FLAGS.image_service == "nova.image.glance.GlanceImageService": - func = _get_glance_image - elif FLAGS.image_service == "nova.image.s3.S3ImageService": - func = _get_s3_image - else: - raise NotImplementedError(_("The Image Service %s is not implemented") - % FLAGS.image_service) - return func(image, instance, **kwargs) - - -def upload_image(image, instance, **kwargs): - """Upload the newly snapshotted VM disk file.""" - # Depending upon the image service, make appropriate image service call - if FLAGS.image_service == "nova.image.glance.GlanceImageService": - func = _put_glance_image - elif FLAGS.image_service == "nova.image.s3.S3ImageService": - func = _put_s3_image - else: - raise NotImplementedError(_("The Image Service %s is not implemented") - % FLAGS.image_service) - return func(image, instance, **kwargs) - - -def _get_glance_image(image, instance, **kwargs): +def fetch_image(context, image, instance, **kwargs): """Download image from the glance image server.""" LOG.debug(_("Downloading image %s from glance image server") % image) - (glance_client, image_id) = nova.image.get_glance_client(image) + (glance_client, image_id) = glance.get_glance_client(context, image) metadata, read_iter = glance_client.get_image(image_id) read_file_handle = read_write_util.GlanceFileRead(read_iter) file_size = int(metadata['size']) @@ -132,17 +104,7 @@ def _get_glance_image(image, instance, **kwargs): LOG.debug(_("Downloaded image %s from glance image server") % image) -def _get_s3_image(image, instance, **kwargs): - """Download image from the S3 image server.""" - raise NotImplementedError - - -def _get_local_image(image, instance, **kwargs): - """Download image from the local nova compute node.""" - raise NotImplementedError - - -def _put_glance_image(image, instance, **kwargs): +def upload_image(context, image, instance, **kwargs): """Upload the snapshotted vm disk file to Glance image server.""" LOG.debug(_("Uploading image %s to the Glance image server") % image) read_file_handle = read_write_util.VmWareHTTPReadFile( @@ -152,7 +114,7 @@ def _put_glance_image(image, instance, **kwargs): kwargs.get("cookies"), kwargs.get("file_path")) file_size = read_file_handle.get_size() - (glance_client, image_id) = nova.image.get_glance_client(image) + (glance_client, image_id) = glance.get_glance_client(context, image) # The properties and other fields that we need to set for the image. image_metadata = {"is_public": True, "disk_format": "vmdk", @@ -168,17 +130,7 @@ def _put_glance_image(image, instance, **kwargs): LOG.debug(_("Uploaded image %s to the Glance image server") % image) -def _put_local_image(image, instance, **kwargs): - """Upload the snapshotted vm disk file to the local nova compute node.""" - raise NotImplementedError - - -def _put_s3_image(image, instance, **kwargs): - """Upload the snapshotted vm disk file to S3 image server.""" - raise NotImplementedError - - -def get_vmdk_size_and_properties(image, instance): +def get_vmdk_size_and_properties(context, image, instance): """ Get size of the vmdk file that is to be downloaded for attach in spawn. Need this to create the dummy virtual disk for the meta-data file. The @@ -186,12 +138,9 @@ def get_vmdk_size_and_properties(image, instance): """ LOG.debug(_("Getting image size for the image %s") % image) - if FLAGS.image_service == "nova.image.glance.GlanceImageService": - (glance_client, image_id) = nova.image.get_glance_client(image) - meta_data = glance_client.get_image_meta(image_id) - size, properties = meta_data["size"], meta_data["properties"] - elif FLAGS.image_service == "nova.image.s3.S3ImageService": - raise NotImplementedError + (glance_client, image_id) = glance.get_glance_client(context, image) + meta_data = glance_client.get_image_meta(image_id) + size, properties = meta_data["size"], meta_data["properties"] LOG.debug(_("Got image size of %(size)s for the image %(image)s") % locals()) return size, properties diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index efbea7076..302238c98 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -31,12 +31,10 @@ import urllib import uuid from xml.dom import minidom -import glance.client from nova import db from nova import exception from nova import flags -import nova.image -from nova.image import glance as glance_image_service +from nova.image import glance from nova import log as logging from nova import utils from nova.compute import instance_types @@ -383,8 +381,7 @@ class VMHelper(HelperBase): os_type = instance.os_type or FLAGS.default_os_type - glance_host, glance_port = \ - glance_image_service.pick_glance_api_server() + glance_host, glance_port = glance.pick_glance_api_server() params = {'vdi_uuids': vdi_uuids, 'image_id': image_id, 'glance_host': glance_host, @@ -447,8 +444,7 @@ class VMHelper(HelperBase): # pass them as arguments uuid_stack = [str(uuid.uuid4()) for i in xrange(2)] - glance_host, glance_port = \ - glance_image_service.pick_glance_api_server() + glance_host, glance_port = glance.pick_glance_api_server() params = {'image_id': image, 'glance_host': glance_host, 'glance_port': glance_port, @@ -546,7 +542,7 @@ class VMHelper(HelperBase): else: sr_ref = safe_find_sr(session) - glance_client, image_id = nova.image.get_glance_client(image) + glance_client, image_id = glance.get_glance_client(context, image) glance_client.set_auth_token(getattr(context, 'auth_token', None)) meta, image_file = glance_client.get_image(image_id) virtual_size = int(meta['size']) @@ -606,7 +602,7 @@ class VMHelper(HelperBase): raise e @classmethod - def determine_disk_image_type(cls, instance): + def determine_disk_image_type(cls, instance, context): """Disk Image Types are used to determine where the kernel will reside within an image. To figure out which type we're dealing with, we use the following rules: @@ -639,7 +635,8 @@ class VMHelper(HelperBase): 'vhd': ImageType.DISK_VHD, 'iso': ImageType.DISK_ISO} image_ref = instance.image_ref - glance_client, image_id = nova.image.get_glance_client(image_ref) + glance_client, image_id = glance.get_glance_client(context, + image_ref) meta = glance_client.get_image_meta(image_id) disk_format = meta['disk_format'] try: diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 9c138ee41..038c041c7 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -135,7 +135,7 @@ class VMOps(object): self._session.call_xenapi('VM.start', vm_ref, False, False) def _create_disks(self, context, instance): - disk_image_type = VMHelper.determine_disk_image_type(instance) + disk_image_type = VMHelper.determine_disk_image_type(instance, context) vdis = VMHelper.fetch_image(context, self._session, instance, instance.image_ref, instance.user_id, instance.project_id, @@ -176,7 +176,7 @@ class VMOps(object): power_state.SHUTDOWN) return - disk_image_type = VMHelper.determine_disk_image_type(instance) + disk_image_type = VMHelper.determine_disk_image_type(instance, context) kernel = None ramdisk = None try: diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index a06312890..1a9ac37e9 100755 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -252,7 +252,11 @@ def _upload_tarball(staging_path, image_id, glance_host, glance_port, os_type, # NOTE(dprince): We need to resend any existing Glance meta/property # headers so they are preserved in Glance. We obtain them here with a # HEAD request. - conn.request('HEAD', '/v1/images/%s' % image_id) + conn.putrequest('HEAD', '/v1/images/%s' % image_id) + if auth_token: + conn.putheader('x-auth-token', auth_token) + conn.endheaders() + resp = conn.getresponse() if resp.status != httplib.OK: raise Exception("Unexpected response from Glance %i" % resp.status) -- cgit From 7ac94ccf7dbb5838ef877b9d954ea96bf1412b4b Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Mon, 12 Sep 2011 00:16:49 -0700 Subject: fix for lp847604 to unbreak instance rebooting --- nova/compute/api.py | 2 +- nova/virt/libvirt/connection.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index b0ea044c5..48bb266b7 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -1049,7 +1049,7 @@ class API(base.Base): vm_state=vm_states.ACTIVE, task_state=task_states.REBOOTING) self._cast_compute_message('reboot_instance', context, instance_id, - reboot_type) + params={'reboot_type': reboot_type}) @scheduler_api.reroute_compute("rebuild") def rebuild(self, context, instance_id, image_href, admin_password, diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 19cef5ad7..fb2aed651 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -466,7 +466,7 @@ class LibvirtConnection(driver.ComputeDriver): shutil.rmtree(temp_dir) @exception.wrap_exception() - def reboot(self, instance, network_info): + def reboot(self, instance, network_info, reboot_type): """Reboot a virtual machine, given an instance reference. This method actually destroys and re-creates the domain to ensure the -- cgit From cd5084f8a69b0e2a14f01aa9a4f3d8588a83c923 Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Mon, 12 Sep 2011 14:30:56 +0200 Subject: Fix rogue usage of 'sudo' bypassing the run_as_root=True method --- nova/tests/test_libvirt.py | 4 ++-- nova/virt/disk.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 8c6775b29..fea2b7cd3 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -743,7 +743,7 @@ class LibvirtConnTestCase(test.TestCase): # qemu-img should be mockd since test environment might not have # large disk space. self.mox.StubOutWithMock(utils, "execute") - utils.execute('sudo', 'qemu-img', 'create', '-f', 'raw', + utils.execute('qemu-img', 'create', '-f', 'raw', '%s/%s/disk' % (tmpdir, instance_ref.name), '10G') self.mox.ReplayAll() @@ -795,7 +795,7 @@ class LibvirtConnTestCase(test.TestCase): os.path.getsize("/test/disk").AndReturn(10 * 1024 * 1024 * 1024) # another is qcow image, so qemu-img should be mocked. self.mox.StubOutWithMock(utils, "execute") - utils.execute('sudo', 'qemu-img', 'info', '/test/disk.local').\ + utils.execute('qemu-img', 'info', '/test/disk.local').\ AndReturn((ret, '')) self.mox.ReplayAll() diff --git a/nova/virt/disk.py b/nova/virt/disk.py index 52b2881e8..e6cf5f5c4 100644 --- a/nova/virt/disk.py +++ b/nova/virt/disk.py @@ -228,8 +228,8 @@ def _inject_metadata_into_fs(metadata, fs, execute=None): metadata_path = os.path.join(fs, "meta.js") metadata = dict([(m.key, m.value) for m in metadata]) - utils.execute('sudo', 'tee', metadata_path, - process_input=json.dumps(metadata)) + utils.execute('tee', metadata_path, + process_input=json.dumps(metadata), run_as_root=True) def _inject_key_into_fs(key, fs, execute=None): -- cgit From f40955d419c886be29213f73f5ffdf2f38e00057 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Mon, 12 Sep 2011 08:00:30 -0700 Subject: add test for method sig --- nova/tests/test_libvirt.py | 46 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 93967ceec..d776a386b 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -16,6 +16,7 @@ import copy import eventlet +import inspect import mox import os import re @@ -35,6 +36,7 @@ from nova import utils from nova.api.ec2 import cloud from nova.compute import power_state from nova.compute import vm_states +from nova.virt import driver from nova.virt.libvirt import connection from nova.virt.libvirt import firewall from nova.tests import fake_network @@ -840,6 +842,50 @@ class LibvirtConnTestCase(test.TestCase): _assert_volume_in_mapping('sdg', False) _assert_volume_in_mapping('sdh1', False) + def test_reboot_signature(self): + """Test that libvirt driver method sig matches interface""" + def fake_reboot_with_correct_sig(ignore, instance, + network_info, reboot_type): + pass + + def fake_destroy(instance, network_info, cleanup=False): + pass + + def fake_plug_vifs(instance, network_info): + pass + + def fake_create_new_domain(xml): + return + + def fake_none(self, instance): + return + + instance = db.instance_create(self.context, self.test_instance) + network_info = _fake_network_info(self.stubs, 1) + + self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn') + connection.LibvirtConnection._conn.lookupByName = self.fake_lookup + + conn = connection.LibvirtConnection(False) + self.stubs.Set(conn, 'destroy', fake_destroy) + self.stubs.Set(conn, 'plug_vifs', fake_plug_vifs) + self.stubs.Set(conn.firewall_driver, + 'setup_basic_filtering', + fake_none) + self.stubs.Set(conn.firewall_driver, + 'prepare_instance_filter', + fake_none) + self.stubs.Set(conn, '_create_new_domain', fake_create_new_domain) + self.stubs.Set(conn.firewall_driver, + 'apply_instance_filter', + fake_none) + + args = [instance, network_info, 'SOFT'] + conn.reboot(*args) + + compute_driver = driver.ComputeDriver() + self.assertRaises(NotImplementedError, compute_driver.reboot, *args) + class NWFilterFakes: def __init__(self): -- cgit From 4cbfc60b225d0386b6719e49fc9797fd72dc219b Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Mon, 12 Sep 2011 09:15:31 -0700 Subject: remove unused dep --- nova/tests/test_libvirt.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index d776a386b..5346e089b 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -16,7 +16,6 @@ import copy import eventlet -import inspect import mox import os import re -- cgit From 9b8e73d9ef1a5bd4efb460f3a0c033fc748ccdd9 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Mon, 12 Sep 2011 14:22:46 -0400 Subject: adding tests for deleted and pending_delete statuses --- nova/tests/api/openstack/test_images.py | 96 ++++++++++++++++++++++++++++++--- 1 file changed, 89 insertions(+), 7 deletions(-) diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 46f763d5e..c63d1203a 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -365,7 +365,9 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): {'id': 125, 'name': 'saving snapshot'}, {'id': 126, 'name': 'active snapshot'}, {'id': 127, 'name': 'killed snapshot'}, - {'id': 129, 'name': None}] + {'id': 128, 'name': 'deleted snapshot'}, + {'id': 129, 'name': 'pending_delete snapshot'}, + {'id': 131, 'name': None}] self.assertDictListMatch(response_list, expected) @@ -458,7 +460,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): self.assertEqual(expected_image.toxml(), actual_image.toxml()) def test_get_image_xml_no_name(self): - request = webob.Request.blank('/v1.0/images/129') + request = webob.Request.blank('/v1.0/images/131') request.accept = "application/xml" response = request.get_response(fakes.wsgi_app()) @@ -466,7 +468,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): expected_now = self.NOW_API_FORMAT expected_image = minidom.parseString(""" - Date: Mon, 12 Sep 2011 15:17:57 -0400 Subject: pep8 fix. --- nova/image/glance.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/image/glance.py b/nova/image/glance.py index e735f4082..13c8ff843 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -62,9 +62,9 @@ def _create_glance_client(context, host, port): if context.strategy == 'keystone': # NOTE(dprince): Glance client just needs auth_tok right? Should we # add username and tenant to the creds below? - creds={'strategy': 'keystone', - 'username': context.user_id, - 'tenant': context.project_id} + creds = {'strategy': 'keystone', + 'username': context.user_id, + 'tenant': context.project_id} glance_client = GlanceClient(host, port, auth_tok=context.auth_token, creds=creds) else: -- cgit From 81fe8c89061fa15ebcea9d20f39cf79b63cf8522 Mon Sep 17 00:00:00 2001 From: Antony Messerli Date: Mon, 12 Sep 2011 14:43:15 -0500 Subject: pep8 fixes --- nova/db/sqlalchemy/api.py | 2 +- nova/virt/disk.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 40e2ca167..e5a661c7f 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -672,7 +672,7 @@ def floating_ip_update(context, address, values): def fixed_ip_associate(context, address, instance_id, network_id=None, reserved=False): """Keyword arguments: - reserved -- should be a boolean value(True or False), exact value will be + reserved -- should be a boolean value(True or False), exact value will be used to filter on the fixed ip address """ session = get_session() diff --git a/nova/virt/disk.py b/nova/virt/disk.py index d0745c82d..cd3422829 100644 --- a/nova/virt/disk.py +++ b/nova/virt/disk.py @@ -160,6 +160,7 @@ def destroy_container(target, instance, nbd=False): except Exception, exn: LOG.exception(_('Failed to remove container: %s'), exn) + def _link_device(image, nbd): """Link image to device using loopback or nbd""" -- cgit From 050be203cb43a12ca430eadfd30c87690b33b9cf Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Mon, 12 Sep 2011 20:10:57 +0000 Subject: Add support for vendor content types --- nova/api/openstack/versions.py | 7 +++++++ nova/api/openstack/wsgi.py | 20 +++++++++++++++++--- 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/nova/api/openstack/versions.py b/nova/api/openstack/versions.py index e2f892fb6..04d9915ca 100644 --- a/nova/api/openstack/versions.py +++ b/nova/api/openstack/versions.py @@ -100,13 +100,17 @@ class Versions(wsgi.Resource): body_serializers = { 'application/atom+xml': VersionsAtomSerializer(metadata=metadata), 'application/xml': VersionsXMLSerializer(metadata=metadata), + 'application/vnd.openstack.compute+xml': + VersionsXMLSerializer(metadata=metadata), } serializer = wsgi.ResponseSerializer( body_serializers=body_serializers, headers_serializer=headers_serializer) supported_content_types = ('application/json', + 'application/vnd.openstack.compute+json', 'application/xml', + 'application/vnd.openstack.compute+xml', 'application/atom+xml') deserializer = VersionsRequestDeserializer( supported_content_types=supported_content_types) @@ -383,12 +387,15 @@ def create_resource(version='1.0'): body_serializers = { 'application/xml': VersionsXMLSerializer(), + 'application/vnd.openstack.compute+xml': VersionsXMLSerializer(), 'application/atom+xml': VersionsAtomSerializer(), } serializer = wsgi.ResponseSerializer(body_serializers) supported_content_types = ('application/json', + 'application/vnd.openstack.compute+json', 'application/xml', + 'application/vnd.openstack.compute+xml', 'application/atom+xml') deserializer = wsgi.RequestDeserializer( supported_content_types=supported_content_types) diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index 8641e960a..ee6b87403 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -28,8 +28,12 @@ class Request(webob.Request): Based on the query extension then the Accept header. """ + LOG.info('supported = %s' % repr(supported_content_types)) supported_content_types = supported_content_types or \ - ('application/json', 'application/xml') + ('application/json', + 'application/vnd.openstack.compute+json', + 'application/xml', + 'application/vnd.openstack.compute+xml') parts = self.path.rsplit('.', 1) if len(parts) > 1: @@ -51,7 +55,10 @@ class Request(webob.Request): if not "Content-Type" in self.headers: return None - allowed_types = ("application/xml", "application/json") + allowed_types = ('application/json', + 'application/vnd.openstack.compute+json', + 'application/xml', + 'application/vnd.openstack.compute+xml') content_type = self.content_type if content_type not in allowed_types: @@ -191,11 +198,16 @@ class RequestDeserializer(object): supported_content_types=None): self.supported_content_types = supported_content_types or \ - ('application/json', 'application/xml') + ('application/json', + 'application/vnd.openstack.compute+json', + 'application/xml', + 'application/vnd.openstack.compute+xml') self.body_deserializers = { 'application/xml': XMLDeserializer(), + 'application/vnd.openstack.compute+xml': XMLDeserializer(), 'application/json': JSONDeserializer(), + 'application/vnd.openstack.compute+json': JSONDeserializer(), } self.body_deserializers.update(body_deserializers or {}) @@ -409,7 +421,9 @@ class ResponseSerializer(object): def __init__(self, body_serializers=None, headers_serializer=None): self.body_serializers = { 'application/xml': XMLDictSerializer(), + 'application/vnd.openstack.compute+xml': XMLDictSerializer(), 'application/json': JSONDictSerializer(), + 'application/vnd.openstack.compute+json': JSONDictSerializer(), } self.body_serializers.update(body_serializers or {}) -- cgit From 17afc6e53f5d53ea80fd747d6ce059c11980c817 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Mon, 12 Sep 2011 18:17:32 -0400 Subject: relocating ec2 tests --- nova/tests/api/ec2/public_key/dummy.fingerprint | 1 + nova/tests/api/ec2/public_key/dummy.pub | 1 + nova/tests/api/ec2/test_cloud.py | 1623 +++++++++++++++++++++++ nova/tests/public_key/dummy.fingerprint | 1 - nova/tests/public_key/dummy.pub | 1 - nova/tests/test_cloud.py | 1623 ----------------------- 6 files changed, 1625 insertions(+), 1625 deletions(-) create mode 100644 nova/tests/api/ec2/public_key/dummy.fingerprint create mode 100644 nova/tests/api/ec2/public_key/dummy.pub create mode 100644 nova/tests/api/ec2/test_cloud.py delete mode 100644 nova/tests/public_key/dummy.fingerprint delete mode 100644 nova/tests/public_key/dummy.pub delete mode 100644 nova/tests/test_cloud.py diff --git a/nova/tests/api/ec2/public_key/dummy.fingerprint b/nova/tests/api/ec2/public_key/dummy.fingerprint new file mode 100644 index 000000000..715bca27a --- /dev/null +++ b/nova/tests/api/ec2/public_key/dummy.fingerprint @@ -0,0 +1 @@ +1c:87:d1:d9:32:fd:62:3c:78:2b:c0:ad:c0:15:88:df diff --git a/nova/tests/api/ec2/public_key/dummy.pub b/nova/tests/api/ec2/public_key/dummy.pub new file mode 100644 index 000000000..d4cf2bc0d --- /dev/null +++ b/nova/tests/api/ec2/public_key/dummy.pub @@ -0,0 +1 @@ +ssh-dss AAAAB3NzaC1kc3MAAACBAMGJlY9XEIm2X234pdO5yFWMp2JuOQx8U0E815IVXhmKxYCBK9ZakgZOIQmPbXoGYyV+mziDPp6HJ0wKYLQxkwLEFr51fAZjWQvRss0SinURRuLkockDfGFtD4pYJthekr/rlqMKlBSDUSpGq8jUWW60UJ18FGooFpxR7ESqQRx/AAAAFQC96LRglaUeeP+E8U/yblEJocuiWwAAAIA3XiMR8Skiz/0aBm5K50SeQznQuMJTyzt9S9uaz5QZWiFu69hOyGSFGw8fqgxEkXFJIuHobQQpGYQubLW0NdaYRqyE/Vud3JUJUb8Texld6dz8vGemyB5d1YvtSeHIo8/BGv2msOqR3u5AZTaGCBD9DhpSGOKHEdNjTtvpPd8S8gAAAIBociGZ5jf09iHLVENhyXujJbxfGRPsyNTyARJfCOGl0oFV6hEzcQyw8U/ePwjgvjc2UizMWLl8tsb2FXKHRdc2v+ND3Us+XqKQ33X3ADP4FZ/+Oj213gMyhCmvFTP0u5FmHog9My4CB7YcIWRuUR42WlhQ2IfPvKwUoTk3R+T6Og== www-data@mk diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py new file mode 100644 index 000000000..7fe353b3d --- /dev/null +++ b/nova/tests/api/ec2/test_cloud.py @@ -0,0 +1,1623 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import mox + +import functools + +from base64 import b64decode +from M2Crypto import BIO +from M2Crypto import RSA +import os + +from eventlet import greenthread + +from nova import context +from nova import crypto +from nova import db +from nova import exception +from nova import flags +from nova import log as logging +from nova import network +from nova import rpc +from nova import test +from nova import utils +from nova.api.ec2 import cloud +from nova.api.ec2 import ec2utils +from nova.compute import vm_states +from nova.image import fake + + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.cloud') + + +class CloudTestCase(test.TestCase): + def setUp(self): + super(CloudTestCase, self).setUp() + self.flags(connection_type='fake', + stub_network=True) + + # set up our cloud + self.cloud = cloud.CloudController() + + # set up services + self.compute = self.start_service('compute') + self.scheduter = self.start_service('scheduler') + self.network = self.start_service('network') + self.volume = self.start_service('volume') + self.image_service = utils.import_object(FLAGS.image_service) + + self.user_id = 'fake' + self.project_id = 'fake' + self.context = context.RequestContext(self.user_id, + self.project_id, + True) + + def fake_show(meh, context, id): + return {'id': 1, 'container_format': 'ami', + 'properties': {'kernel_id': 1, 'ramdisk_id': 1, + 'type': 'machine', 'image_state': 'available'}} + + self.stubs.Set(fake._FakeImageService, 'show', fake_show) + self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show) + + # NOTE(vish): set up a manual wait so rpc.cast has a chance to finish + rpc_cast = rpc.cast + + def finish_cast(*args, **kwargs): + rpc_cast(*args, **kwargs) + greenthread.sleep(0.2) + + self.stubs.Set(rpc, 'cast', finish_cast) + + def _create_key(self, name): + # NOTE(vish): create depends on pool, so just call helper directly + return cloud._gen_key(self.context, self.context.user_id, name) + + def test_describe_regions(self): + """Makes sure describe regions runs without raising an exception""" + result = self.cloud.describe_regions(self.context) + self.assertEqual(len(result['regionInfo']), 1) + self.flags(region_list=["one=test_host1", "two=test_host2"]) + result = self.cloud.describe_regions(self.context) + self.assertEqual(len(result['regionInfo']), 2) + + def test_describe_addresses(self): + """Makes sure describe addresses runs without raising an exception""" + address = "10.10.10.10" + db.floating_ip_create(self.context, + {'address': address, + 'host': self.network.host}) + self.cloud.allocate_address(self.context) + self.cloud.describe_addresses(self.context) + self.cloud.release_address(self.context, + public_ip=address) + db.floating_ip_destroy(self.context, address) + + def test_allocate_address(self): + address = "10.10.10.10" + allocate = self.cloud.allocate_address + db.floating_ip_create(self.context, + {'address': address, + 'host': self.network.host}) + self.assertEqual(allocate(self.context)['publicIp'], address) + db.floating_ip_destroy(self.context, address) + self.assertRaises(exception.NoMoreFloatingIps, + allocate, + self.context) + + def test_release_address(self): + address = "10.10.10.10" + allocate = self.cloud.allocate_address + db.floating_ip_create(self.context, + {'address': address, + 'host': self.network.host}) + result = self.cloud.release_address(self.context, address) + self.assertEqual(result['releaseResponse'], ['Address released.']) + + def test_release_address_still_associated(self): + address = "10.10.10.10" + fixed_ip = {'instance': {'id': 1}} + floating_ip = {'id': 0, + 'address': address, + 'fixed_ip_id': 0, + 'fixed_ip': fixed_ip, + 'project_id': None, + 'auto_assigned': False} + network_api = network.api.API() + self.mox.StubOutWithMock(network_api.db, 'floating_ip_get_by_address') + network_api.db.floating_ip_get_by_address(mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn(floating_ip) + self.mox.ReplayAll() + release = self.cloud.release_address + # ApiError: Floating ip is in use. Disassociate it before releasing. + self.assertRaises(exception.ApiError, release, self.context, address) + + def test_associate_disassociate_address(self): + """Verifies associate runs cleanly without raising an exception""" + address = "10.10.10.10" + db.floating_ip_create(self.context, {'address': address}) + self.cloud.allocate_address(self.context) + # TODO(jkoelker) Probably need to query for instance_type_id and + # make sure we get a valid one + inst = db.instance_create(self.context, {'host': self.compute.host, + 'instance_type_id': 1}) + networks = db.network_get_all(self.context) + for network in networks: + db.network_update(self.context, network['id'], + {'host': self.network.host}) + project_id = self.context.project_id + type_id = inst['instance_type_id'] + ips = self.network.allocate_for_instance(self.context, + instance_id=inst['id'], + host=inst['host'], + vpn=None, + instance_type_id=type_id, + project_id=project_id) + # TODO(jkoelker) Make this mas bueno + self.assertTrue(ips) + self.assertTrue('ips' in ips[0][1]) + self.assertTrue(ips[0][1]['ips']) + self.assertTrue('ip' in ips[0][1]['ips'][0]) + + fixed = ips[0][1]['ips'][0]['ip'] + + ec2_id = ec2utils.id_to_ec2_id(inst['id']) + self.cloud.associate_address(self.context, + instance_id=ec2_id, + public_ip=address) + self.cloud.disassociate_address(self.context, + public_ip=address) + self.cloud.release_address(self.context, + public_ip=address) + self.network.deallocate_fixed_ip(self.context, fixed) + db.instance_destroy(self.context, inst['id']) + db.floating_ip_destroy(self.context, address) + + def test_describe_security_groups(self): + """Makes sure describe_security_groups works and filters results.""" + sec = db.security_group_create(self.context, + {'project_id': self.context.project_id, + 'name': 'test'}) + result = self.cloud.describe_security_groups(self.context) + # NOTE(vish): should have the default group as well + self.assertEqual(len(result['securityGroupInfo']), 2) + result = self.cloud.describe_security_groups(self.context, + group_name=[sec['name']]) + self.assertEqual(len(result['securityGroupInfo']), 1) + self.assertEqual( + result['securityGroupInfo'][0]['groupName'], + sec['name']) + db.security_group_destroy(self.context, sec['id']) + + def test_describe_security_groups_by_id(self): + sec = db.security_group_create(self.context, + {'project_id': self.context.project_id, + 'name': 'test'}) + result = self.cloud.describe_security_groups(self.context, + group_id=[sec['id']]) + self.assertEqual(len(result['securityGroupInfo']), 1) + self.assertEqual( + result['securityGroupInfo'][0]['groupName'], + sec['name']) + default = db.security_group_get_by_name(self.context, + self.context.project_id, + 'default') + result = self.cloud.describe_security_groups(self.context, + group_id=[default['id']]) + self.assertEqual(len(result['securityGroupInfo']), 1) + self.assertEqual( + result['securityGroupInfo'][0]['groupName'], + 'default') + db.security_group_destroy(self.context, sec['id']) + + def test_create_delete_security_group(self): + descript = 'test description' + create = self.cloud.create_security_group + result = create(self.context, 'testgrp', descript) + group_descript = result['securityGroupSet'][0]['groupDescription'] + self.assertEqual(descript, group_descript) + delete = self.cloud.delete_security_group + self.assertTrue(delete(self.context, 'testgrp')) + + def test_delete_security_group_by_id(self): + sec = db.security_group_create(self.context, + {'project_id': self.context.project_id, + 'name': 'test'}) + delete = self.cloud.delete_security_group + self.assertTrue(delete(self.context, group_id=sec['id'])) + + def test_delete_security_group_with_bad_name(self): + delete = self.cloud.delete_security_group + notfound = exception.SecurityGroupNotFound + self.assertRaises(notfound, delete, self.context, 'badname') + + def test_delete_security_group_with_bad_group_id(self): + delete = self.cloud.delete_security_group + notfound = exception.SecurityGroupNotFound + self.assertRaises(notfound, delete, self.context, group_id=999) + + def test_delete_security_group_no_params(self): + delete = self.cloud.delete_security_group + self.assertRaises(exception.ApiError, delete, self.context) + + def test_authorize_security_group_ingress(self): + kwargs = {'project_id': self.context.project_id, 'name': 'test'} + sec = db.security_group_create(self.context, kwargs) + authz = self.cloud.authorize_security_group_ingress + kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'} + self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs)) + + def test_authorize_security_group_ingress_ip_permissions_ip_ranges(self): + kwargs = {'project_id': self.context.project_id, 'name': 'test'} + sec = db.security_group_create(self.context, kwargs) + authz = self.cloud.authorize_security_group_ingress + kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81, + 'ip_ranges': + {'1': {'cidr_ip': u'0.0.0.0/0'}, + '2': {'cidr_ip': u'10.10.10.10/32'}}, + 'ip_protocol': u'tcp'}]} + self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs)) + + def test_authorize_security_group_fail_missing_source_group(self): + kwargs = {'project_id': self.context.project_id, 'name': 'test'} + sec = db.security_group_create(self.context, kwargs) + authz = self.cloud.authorize_security_group_ingress + kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81, + 'ip_ranges':{'1': {'cidr_ip': u'0.0.0.0/0'}, + '2': {'cidr_ip': u'10.10.10.10/32'}}, + 'groups': {'1': {'user_id': u'someuser', + 'group_name': u'somegroup1'}}, + 'ip_protocol': u'tcp'}]} + self.assertRaises(exception.SecurityGroupNotFound, authz, + self.context, group_name=sec['name'], **kwargs) + + def test_authorize_security_group_ingress_ip_permissions_groups(self): + kwargs = {'project_id': self.context.project_id, 'name': 'test'} + sec = db.security_group_create(self.context, + {'project_id': 'someuser', + 'name': 'somegroup1'}) + sec = db.security_group_create(self.context, + {'project_id': 'someuser', + 'name': 'othergroup2'}) + sec = db.security_group_create(self.context, kwargs) + authz = self.cloud.authorize_security_group_ingress + kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81, + 'groups': {'1': {'user_id': u'someuser', + 'group_name': u'somegroup1'}, + '2': {'user_id': u'someuser', + 'group_name': u'othergroup2'}}, + 'ip_protocol': u'tcp'}]} + self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs)) + + def test_revoke_security_group_ingress(self): + kwargs = {'project_id': self.context.project_id, 'name': 'test'} + sec = db.security_group_create(self.context, kwargs) + authz = self.cloud.authorize_security_group_ingress + kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'} + authz(self.context, group_id=sec['id'], **kwargs) + revoke = self.cloud.revoke_security_group_ingress + self.assertTrue(revoke(self.context, group_name=sec['name'], **kwargs)) + + def test_authorize_revoke_security_group_ingress_by_id(self): + sec = db.security_group_create(self.context, + {'project_id': self.context.project_id, + 'name': 'test'}) + authz = self.cloud.authorize_security_group_ingress + kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'} + authz(self.context, group_id=sec['id'], **kwargs) + revoke = self.cloud.revoke_security_group_ingress + self.assertTrue(revoke(self.context, group_id=sec['id'], **kwargs)) + + def test_authorize_security_group_ingress_missing_protocol_params(self): + sec = db.security_group_create(self.context, + {'project_id': self.context.project_id, + 'name': 'test'}) + authz = self.cloud.authorize_security_group_ingress + self.assertRaises(exception.ApiError, authz, self.context, 'test') + + def test_authorize_security_group_ingress_missing_group_name_or_id(self): + kwargs = {'project_id': self.context.project_id, 'name': 'test'} + authz = self.cloud.authorize_security_group_ingress + self.assertRaises(exception.ApiError, authz, self.context, **kwargs) + + def test_authorize_security_group_ingress_already_exists(self): + kwargs = {'project_id': self.context.project_id, 'name': 'test'} + sec = db.security_group_create(self.context, kwargs) + authz = self.cloud.authorize_security_group_ingress + kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'} + authz(self.context, group_name=sec['name'], **kwargs) + self.assertRaises(exception.ApiError, authz, self.context, + group_name=sec['name'], **kwargs) + + def test_revoke_security_group_ingress_missing_group_name_or_id(self): + kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'} + revoke = self.cloud.revoke_security_group_ingress + self.assertRaises(exception.ApiError, revoke, self.context, **kwargs) + + def test_describe_volumes(self): + """Makes sure describe_volumes works and filters results.""" + vol1 = db.volume_create(self.context, {}) + vol2 = db.volume_create(self.context, {}) + result = self.cloud.describe_volumes(self.context) + self.assertEqual(len(result['volumeSet']), 2) + volume_id = ec2utils.id_to_ec2_vol_id(vol2['id']) + result = self.cloud.describe_volumes(self.context, + volume_id=[volume_id]) + self.assertEqual(len(result['volumeSet']), 1) + self.assertEqual( + ec2utils.ec2_id_to_id(result['volumeSet'][0]['volumeId']), + vol2['id']) + db.volume_destroy(self.context, vol1['id']) + db.volume_destroy(self.context, vol2['id']) + + def test_create_volume_from_snapshot(self): + """Makes sure create_volume works when we specify a snapshot.""" + vol = db.volume_create(self.context, {'size': 1}) + snap = db.snapshot_create(self.context, {'volume_id': vol['id'], + 'volume_size': vol['size'], + 'status': "available"}) + snapshot_id = ec2utils.id_to_ec2_snap_id(snap['id']) + + result = self.cloud.create_volume(self.context, + snapshot_id=snapshot_id) + volume_id = result['volumeId'] + result = self.cloud.describe_volumes(self.context) + self.assertEqual(len(result['volumeSet']), 2) + self.assertEqual(result['volumeSet'][1]['volumeId'], volume_id) + + db.volume_destroy(self.context, ec2utils.ec2_id_to_id(volume_id)) + db.snapshot_destroy(self.context, snap['id']) + db.volume_destroy(self.context, vol['id']) + + def test_describe_availability_zones(self): + """Makes sure describe_availability_zones works and filters results.""" + service1 = db.service_create(self.context, {'host': 'host1_zones', + 'binary': "nova-compute", + 'topic': 'compute', + 'report_count': 0, + 'availability_zone': "zone1"}) + service2 = db.service_create(self.context, {'host': 'host2_zones', + 'binary': "nova-compute", + 'topic': 'compute', + 'report_count': 0, + 'availability_zone': "zone2"}) + result = self.cloud.describe_availability_zones(self.context) + self.assertEqual(len(result['availabilityZoneInfo']), 3) + db.service_destroy(self.context, service1['id']) + db.service_destroy(self.context, service2['id']) + + def test_describe_snapshots(self): + """Makes sure describe_snapshots works and filters results.""" + vol = db.volume_create(self.context, {}) + snap1 = db.snapshot_create(self.context, {'volume_id': vol['id']}) + snap2 = db.snapshot_create(self.context, {'volume_id': vol['id']}) + result = self.cloud.describe_snapshots(self.context) + self.assertEqual(len(result['snapshotSet']), 2) + snapshot_id = ec2utils.id_to_ec2_snap_id(snap2['id']) + result = self.cloud.describe_snapshots(self.context, + snapshot_id=[snapshot_id]) + self.assertEqual(len(result['snapshotSet']), 1) + self.assertEqual( + ec2utils.ec2_id_to_id(result['snapshotSet'][0]['snapshotId']), + snap2['id']) + db.snapshot_destroy(self.context, snap1['id']) + db.snapshot_destroy(self.context, snap2['id']) + db.volume_destroy(self.context, vol['id']) + + def test_create_snapshot(self): + """Makes sure create_snapshot works.""" + vol = db.volume_create(self.context, {'status': "available"}) + volume_id = ec2utils.id_to_ec2_vol_id(vol['id']) + + result = self.cloud.create_snapshot(self.context, + volume_id=volume_id) + snapshot_id = result['snapshotId'] + result = self.cloud.describe_snapshots(self.context) + self.assertEqual(len(result['snapshotSet']), 1) + self.assertEqual(result['snapshotSet'][0]['snapshotId'], snapshot_id) + + db.snapshot_destroy(self.context, ec2utils.ec2_id_to_id(snapshot_id)) + db.volume_destroy(self.context, vol['id']) + + def test_delete_snapshot(self): + """Makes sure delete_snapshot works.""" + vol = db.volume_create(self.context, {'status': "available"}) + snap = db.snapshot_create(self.context, {'volume_id': vol['id'], + 'status': "available"}) + snapshot_id = ec2utils.id_to_ec2_snap_id(snap['id']) + + result = self.cloud.delete_snapshot(self.context, + snapshot_id=snapshot_id) + self.assertTrue(result) + + db.volume_destroy(self.context, vol['id']) + + def test_describe_instances(self): + """Makes sure describe_instances works and filters results.""" + inst1 = db.instance_create(self.context, {'reservation_id': 'a', + 'image_ref': 1, + 'host': 'host1'}) + inst2 = db.instance_create(self.context, {'reservation_id': 'a', + 'image_ref': 1, + 'host': 'host2'}) + comp1 = db.service_create(self.context, {'host': 'host1', + 'availability_zone': 'zone1', + 'topic': "compute"}) + comp2 = db.service_create(self.context, {'host': 'host2', + 'availability_zone': 'zone2', + 'topic': "compute"}) + result = self.cloud.describe_instances(self.context) + result = result['reservationSet'][0] + self.assertEqual(len(result['instancesSet']), 2) + instance_id = ec2utils.id_to_ec2_id(inst2['id']) + result = self.cloud.describe_instances(self.context, + instance_id=[instance_id]) + result = result['reservationSet'][0] + self.assertEqual(len(result['instancesSet']), 1) + self.assertEqual(result['instancesSet'][0]['instanceId'], + instance_id) + self.assertEqual(result['instancesSet'][0] + ['placement']['availabilityZone'], 'zone2') + db.instance_destroy(self.context, inst1['id']) + db.instance_destroy(self.context, inst2['id']) + db.service_destroy(self.context, comp1['id']) + db.service_destroy(self.context, comp2['id']) + + def test_describe_instances_deleted(self): + args1 = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'} + inst1 = db.instance_create(self.context, args1) + args2 = {'reservation_id': 'b', 'image_ref': 1, 'host': 'host1'} + inst2 = db.instance_create(self.context, args2) + db.instance_destroy(self.context, inst1.id) + result = self.cloud.describe_instances(self.context) + self.assertEqual(len(result['reservationSet']), 1) + result1 = result['reservationSet'][0]['instancesSet'] + self.assertEqual(result1[0]['instanceId'], + ec2utils.id_to_ec2_id(inst2.id)) + + def _block_device_mapping_create(self, instance_id, mappings): + volumes = [] + for bdm in mappings: + db.block_device_mapping_create(self.context, bdm) + if 'volume_id' in bdm: + values = {'id': bdm['volume_id']} + for bdm_key, vol_key in [('snapshot_id', 'snapshot_id'), + ('snapshot_size', 'volume_size'), + ('delete_on_termination', + 'delete_on_termination')]: + if bdm_key in bdm: + values[vol_key] = bdm[bdm_key] + vol = db.volume_create(self.context, values) + db.volume_attached(self.context, vol['id'], + instance_id, bdm['device_name']) + volumes.append(vol) + return volumes + + def _setUpBlockDeviceMapping(self): + inst1 = db.instance_create(self.context, + {'image_ref': 1, + 'root_device_name': '/dev/sdb1'}) + inst2 = db.instance_create(self.context, + {'image_ref': 2, + 'root_device_name': '/dev/sdc1'}) + + instance_id = inst1['id'] + mappings0 = [ + {'instance_id': instance_id, + 'device_name': '/dev/sdb1', + 'snapshot_id': '1', + 'volume_id': '2'}, + {'instance_id': instance_id, + 'device_name': '/dev/sdb2', + 'volume_id': '3', + 'volume_size': 1}, + {'instance_id': instance_id, + 'device_name': '/dev/sdb3', + 'delete_on_termination': True, + 'snapshot_id': '4', + 'volume_id': '5'}, + {'instance_id': instance_id, + 'device_name': '/dev/sdb4', + 'delete_on_termination': False, + 'snapshot_id': '6', + 'volume_id': '7'}, + {'instance_id': instance_id, + 'device_name': '/dev/sdb5', + 'snapshot_id': '8', + 'volume_id': '9', + 'volume_size': 0}, + {'instance_id': instance_id, + 'device_name': '/dev/sdb6', + 'snapshot_id': '10', + 'volume_id': '11', + 'volume_size': 1}, + {'instance_id': instance_id, + 'device_name': '/dev/sdb7', + 'no_device': True}, + {'instance_id': instance_id, + 'device_name': '/dev/sdb8', + 'virtual_name': 'swap'}, + {'instance_id': instance_id, + 'device_name': '/dev/sdb9', + 'virtual_name': 'ephemeral3'}] + + volumes = self._block_device_mapping_create(instance_id, mappings0) + return (inst1, inst2, volumes) + + def _tearDownBlockDeviceMapping(self, inst1, inst2, volumes): + for vol in volumes: + db.volume_destroy(self.context, vol['id']) + for id in (inst1['id'], inst2['id']): + for bdm in db.block_device_mapping_get_all_by_instance( + self.context, id): + db.block_device_mapping_destroy(self.context, bdm['id']) + db.instance_destroy(self.context, inst2['id']) + db.instance_destroy(self.context, inst1['id']) + + _expected_instance_bdm1 = { + 'instanceId': 'i-00000001', + 'rootDeviceName': '/dev/sdb1', + 'rootDeviceType': 'ebs'} + + _expected_block_device_mapping0 = [ + {'deviceName': '/dev/sdb1', + 'ebs': {'status': 'in-use', + 'deleteOnTermination': False, + 'volumeId': 2, + }}, + {'deviceName': '/dev/sdb2', + 'ebs': {'status': 'in-use', + 'deleteOnTermination': False, + 'volumeId': 3, + }}, + {'deviceName': '/dev/sdb3', + 'ebs': {'status': 'in-use', + 'deleteOnTermination': True, + 'volumeId': 5, + }}, + {'deviceName': '/dev/sdb4', + 'ebs': {'status': 'in-use', + 'deleteOnTermination': False, + 'volumeId': 7, + }}, + {'deviceName': '/dev/sdb5', + 'ebs': {'status': 'in-use', + 'deleteOnTermination': False, + 'volumeId': 9, + }}, + {'deviceName': '/dev/sdb6', + 'ebs': {'status': 'in-use', + 'deleteOnTermination': False, + 'volumeId': 11, }}] + # NOTE(yamahata): swap/ephemeral device case isn't supported yet. + + _expected_instance_bdm2 = { + 'instanceId': 'i-00000002', + 'rootDeviceName': '/dev/sdc1', + 'rootDeviceType': 'instance-store'} + + def test_format_instance_bdm(self): + (inst1, inst2, volumes) = self._setUpBlockDeviceMapping() + + result = {} + self.cloud._format_instance_bdm(self.context, inst1['id'], '/dev/sdb1', + result) + self.assertSubDictMatch( + {'rootDeviceType': self._expected_instance_bdm1['rootDeviceType']}, + result) + self._assertEqualBlockDeviceMapping( + self._expected_block_device_mapping0, result['blockDeviceMapping']) + + result = {} + self.cloud._format_instance_bdm(self.context, inst2['id'], '/dev/sdc1', + result) + self.assertSubDictMatch( + {'rootDeviceType': self._expected_instance_bdm2['rootDeviceType']}, + result) + + self._tearDownBlockDeviceMapping(inst1, inst2, volumes) + + def _assertInstance(self, instance_id): + ec2_instance_id = ec2utils.id_to_ec2_id(instance_id) + result = self.cloud.describe_instances(self.context, + instance_id=[ec2_instance_id]) + result = result['reservationSet'][0] + self.assertEqual(len(result['instancesSet']), 1) + result = result['instancesSet'][0] + self.assertEqual(result['instanceId'], ec2_instance_id) + return result + + def _assertEqualBlockDeviceMapping(self, expected, result): + self.assertEqual(len(expected), len(result)) + for x in expected: + found = False + for y in result: + if x['deviceName'] == y['deviceName']: + self.assertSubDictMatch(x, y) + found = True + break + self.assertTrue(found) + + def test_describe_instances_bdm(self): + """Make sure describe_instances works with root_device_name and + block device mappings + """ + (inst1, inst2, volumes) = self._setUpBlockDeviceMapping() + + result = self._assertInstance(inst1['id']) + self.assertSubDictMatch(self._expected_instance_bdm1, result) + self._assertEqualBlockDeviceMapping( + self._expected_block_device_mapping0, result['blockDeviceMapping']) + + result = self._assertInstance(inst2['id']) + self.assertSubDictMatch(self._expected_instance_bdm2, result) + + self._tearDownBlockDeviceMapping(inst1, inst2, volumes) + + def test_describe_images(self): + describe_images = self.cloud.describe_images + + def fake_detail(meh, context): + return [{'id': 1, 'container_format': 'ami', + 'properties': {'kernel_id': 1, 'ramdisk_id': 1, + 'type': 'machine'}}] + + def fake_show_none(meh, context, id): + raise exception.ImageNotFound(image_id='bad_image_id') + + self.stubs.Set(fake._FakeImageService, 'detail', fake_detail) + # list all + result1 = describe_images(self.context) + result1 = result1['imagesSet'][0] + self.assertEqual(result1['imageId'], 'ami-00000001') + # provided a valid image_id + result2 = describe_images(self.context, ['ami-00000001']) + self.assertEqual(1, len(result2['imagesSet'])) + # provide more than 1 valid image_id + result3 = describe_images(self.context, ['ami-00000001', + 'ami-00000002']) + self.assertEqual(2, len(result3['imagesSet'])) + # provide an non-existing image_id + self.stubs.UnsetAll() + self.stubs.Set(fake._FakeImageService, 'show', fake_show_none) + self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show_none) + self.assertRaises(exception.ImageNotFound, describe_images, + self.context, ['ami-fake']) + + def assertDictListUnorderedMatch(self, L1, L2, key): + self.assertEqual(len(L1), len(L2)) + for d1 in L1: + self.assertTrue(key in d1) + for d2 in L2: + self.assertTrue(key in d2) + if d1[key] == d2[key]: + self.assertDictMatch(d1, d2) + + def _setUpImageSet(self, create_volumes_and_snapshots=False): + mappings1 = [ + {'device': '/dev/sda1', 'virtual': 'root'}, + + {'device': 'sdb0', 'virtual': 'ephemeral0'}, + {'device': 'sdb1', 'virtual': 'ephemeral1'}, + {'device': 'sdb2', 'virtual': 'ephemeral2'}, + {'device': 'sdb3', 'virtual': 'ephemeral3'}, + {'device': 'sdb4', 'virtual': 'ephemeral4'}, + + {'device': 'sdc0', 'virtual': 'swap'}, + {'device': 'sdc1', 'virtual': 'swap'}, + {'device': 'sdc2', 'virtual': 'swap'}, + {'device': 'sdc3', 'virtual': 'swap'}, + {'device': 'sdc4', 'virtual': 'swap'}] + block_device_mapping1 = [ + {'device_name': '/dev/sdb1', 'snapshot_id': 01234567}, + {'device_name': '/dev/sdb2', 'volume_id': 01234567}, + {'device_name': '/dev/sdb3', 'virtual_name': 'ephemeral5'}, + {'device_name': '/dev/sdb4', 'no_device': True}, + + {'device_name': '/dev/sdc1', 'snapshot_id': 12345678}, + {'device_name': '/dev/sdc2', 'volume_id': 12345678}, + {'device_name': '/dev/sdc3', 'virtual_name': 'ephemeral6'}, + {'device_name': '/dev/sdc4', 'no_device': True}] + image1 = { + 'id': 1, + 'properties': { + 'kernel_id': 1, + 'type': 'machine', + 'image_state': 'available', + 'mappings': mappings1, + 'block_device_mapping': block_device_mapping1, + } + } + + mappings2 = [{'device': '/dev/sda1', 'virtual': 'root'}] + block_device_mapping2 = [{'device_name': '/dev/sdb1', + 'snapshot_id': 01234567}] + image2 = { + 'id': 2, + 'properties': { + 'kernel_id': 2, + 'type': 'machine', + 'root_device_name': '/dev/sdb1', + 'mappings': mappings2, + 'block_device_mapping': block_device_mapping2}} + + def fake_show(meh, context, image_id): + for i in [image1, image2]: + if i['id'] == image_id: + return i + raise exception.ImageNotFound(image_id=image_id) + + def fake_detail(meh, context): + return [image1, image2] + + self.stubs.Set(fake._FakeImageService, 'show', fake_show) + self.stubs.Set(fake._FakeImageService, 'detail', fake_detail) + + volumes = [] + snapshots = [] + if create_volumes_and_snapshots: + for bdm in block_device_mapping1: + if 'volume_id' in bdm: + vol = self._volume_create(bdm['volume_id']) + volumes.append(vol['id']) + if 'snapshot_id' in bdm: + snap = db.snapshot_create(self.context, + {'id': bdm['snapshot_id'], + 'volume_id': 76543210, + 'status': "available", + 'volume_size': 1}) + snapshots.append(snap['id']) + return (volumes, snapshots) + + def _assertImageSet(self, result, root_device_type, root_device_name): + self.assertEqual(1, len(result['imagesSet'])) + result = result['imagesSet'][0] + self.assertTrue('rootDeviceType' in result) + self.assertEqual(result['rootDeviceType'], root_device_type) + self.assertTrue('rootDeviceName' in result) + self.assertEqual(result['rootDeviceName'], root_device_name) + self.assertTrue('blockDeviceMapping' in result) + + return result + + _expected_root_device_name1 = '/dev/sda1' + # NOTE(yamahata): noDevice doesn't make sense when returning mapping + # It makes sense only when user overriding existing + # mapping. + _expected_bdms1 = [ + {'deviceName': '/dev/sdb0', 'virtualName': 'ephemeral0'}, + {'deviceName': '/dev/sdb1', 'ebs': {'snapshotId': + 'snap-00053977'}}, + {'deviceName': '/dev/sdb2', 'ebs': {'snapshotId': + 'vol-00053977'}}, + {'deviceName': '/dev/sdb3', 'virtualName': 'ephemeral5'}, + # {'deviceName': '/dev/sdb4', 'noDevice': True}, + + {'deviceName': '/dev/sdc0', 'virtualName': 'swap'}, + {'deviceName': '/dev/sdc1', 'ebs': {'snapshotId': + 'snap-00bc614e'}}, + {'deviceName': '/dev/sdc2', 'ebs': {'snapshotId': + 'vol-00bc614e'}}, + {'deviceName': '/dev/sdc3', 'virtualName': 'ephemeral6'}, + # {'deviceName': '/dev/sdc4', 'noDevice': True} + ] + + _expected_root_device_name2 = '/dev/sdb1' + _expected_bdms2 = [{'deviceName': '/dev/sdb1', + 'ebs': {'snapshotId': 'snap-00053977'}}] + + # NOTE(yamahata): + # InstanceBlockDeviceMappingItemType + # rootDeviceType + # rootDeviceName + # blockDeviceMapping + # deviceName + # virtualName + # ebs + # snapshotId + # volumeSize + # deleteOnTermination + # noDevice + def test_describe_image_mapping(self): + """test for rootDeviceName and blockDeiceMapping""" + describe_images = self.cloud.describe_images + self._setUpImageSet() + + result = describe_images(self.context, ['ami-00000001']) + result = self._assertImageSet(result, 'instance-store', + self._expected_root_device_name1) + + self.assertDictListUnorderedMatch(result['blockDeviceMapping'], + self._expected_bdms1, 'deviceName') + + result = describe_images(self.context, ['ami-00000002']) + result = self._assertImageSet(result, 'ebs', + self._expected_root_device_name2) + + self.assertDictListUnorderedMatch(result['blockDeviceMapping'], + self._expected_bdms2, 'deviceName') + + self.stubs.UnsetAll() + + def test_describe_image_attribute(self): + describe_image_attribute = self.cloud.describe_image_attribute + + def fake_show(meh, context, id): + return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, + 'type': 'machine'}, 'container_format': 'ami', + 'is_public': True} + + self.stubs.Set(fake._FakeImageService, 'show', fake_show) + self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show) + result = describe_image_attribute(self.context, 'ami-00000001', + 'launchPermission') + self.assertEqual([{'group': 'all'}], result['launchPermission']) + + def test_describe_image_attribute_root_device_name(self): + describe_image_attribute = self.cloud.describe_image_attribute + self._setUpImageSet() + + result = describe_image_attribute(self.context, 'ami-00000001', + 'rootDeviceName') + self.assertEqual(result['rootDeviceName'], + self._expected_root_device_name1) + result = describe_image_attribute(self.context, 'ami-00000002', + 'rootDeviceName') + self.assertEqual(result['rootDeviceName'], + self._expected_root_device_name2) + + def test_describe_image_attribute_block_device_mapping(self): + describe_image_attribute = self.cloud.describe_image_attribute + self._setUpImageSet() + + result = describe_image_attribute(self.context, 'ami-00000001', + 'blockDeviceMapping') + self.assertDictListUnorderedMatch(result['blockDeviceMapping'], + self._expected_bdms1, 'deviceName') + result = describe_image_attribute(self.context, 'ami-00000002', + 'blockDeviceMapping') + self.assertDictListUnorderedMatch(result['blockDeviceMapping'], + self._expected_bdms2, 'deviceName') + + def test_modify_image_attribute(self): + modify_image_attribute = self.cloud.modify_image_attribute + + fake_metadata = {'id': 1, 'container_format': 'ami', + 'properties': {'kernel_id': 1, 'ramdisk_id': 1, + 'type': 'machine'}, 'is_public': False} + + def fake_show(meh, context, id): + return fake_metadata + + def fake_update(meh, context, image_id, metadata, data=None): + fake_metadata.update(metadata) + return fake_metadata + + self.stubs.Set(fake._FakeImageService, 'show', fake_show) + self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show) + self.stubs.Set(fake._FakeImageService, 'update', fake_update) + result = modify_image_attribute(self.context, 'ami-00000001', + 'launchPermission', 'add', + user_group=['all']) + self.assertEqual(True, result['is_public']) + + def test_deregister_image(self): + deregister_image = self.cloud.deregister_image + + def fake_delete(self, context, id): + return None + + self.stubs.Set(fake._FakeImageService, 'delete', fake_delete) + # valid image + result = deregister_image(self.context, 'ami-00000001') + self.assertEqual(result['imageId'], 'ami-00000001') + # invalid image + self.stubs.UnsetAll() + + def fake_detail_empty(self, context): + return [] + + self.stubs.Set(fake._FakeImageService, 'detail', fake_detail_empty) + self.assertRaises(exception.ImageNotFound, deregister_image, + self.context, 'ami-bad001') + + def test_deregister_image_wrong_container_type(self): + deregister_image = self.cloud.deregister_image + + def fake_delete(self, context, id): + return None + + self.stubs.Set(fake._FakeImageService, 'delete', fake_delete) + self.assertRaises(exception.NotFound, deregister_image, self.context, + 'aki-00000001') + + def _run_instance(self, **kwargs): + rv = self.cloud.run_instances(self.context, **kwargs) + instance_id = rv['instancesSet'][0]['instanceId'] + return instance_id + + def _run_instance_wait(self, **kwargs): + ec2_instance_id = self._run_instance(**kwargs) + self._wait_for_running(ec2_instance_id) + return ec2_instance_id + + def test_console_output(self): + instance_id = self._run_instance( + image_id='ami-1', + instance_type=FLAGS.default_instance_type, + max_count=1) + output = self.cloud.get_console_output(context=self.context, + instance_id=[instance_id]) + self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE?OUTPUT') + # TODO(soren): We need this until we can stop polling in the rpc code + # for unit tests. + rv = self.cloud.terminate_instances(self.context, [instance_id]) + + def test_ajax_console(self): + instance_id = self._run_instance(image_id='ami-1') + output = self.cloud.get_ajax_console(context=self.context, + instance_id=[instance_id]) + self.assertEquals(output['url'], + '%s/?token=FAKETOKEN' % FLAGS.ajax_console_proxy_url) + # TODO(soren): We need this until we can stop polling in the rpc code + # for unit tests. + rv = self.cloud.terminate_instances(self.context, [instance_id]) + + def test_key_generation(self): + result = self._create_key('test') + private_key = result['private_key'] + key = RSA.load_key_string(private_key, callback=lambda: None) + bio = BIO.MemoryBuffer() + public_key = db.key_pair_get(self.context, + self.context.user_id, + 'test')['public_key'] + key.save_pub_key_bio(bio) + converted = crypto.ssl_pub_to_ssh_pub(bio.read()) + # assert key fields are equal + self.assertEqual(public_key.split(" ")[1].strip(), + converted.split(" ")[1].strip()) + + def test_describe_key_pairs(self): + self._create_key('test1') + self._create_key('test2') + result = self.cloud.describe_key_pairs(self.context) + keys = result["keySet"] + self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys)) + self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys)) + + def test_import_public_key(self): + # test when user provides all values + result1 = self.cloud.import_public_key(self.context, + 'testimportkey1', + 'mytestpubkey', + 'mytestfprint') + self.assertTrue(result1) + keydata = db.key_pair_get(self.context, + self.context.user_id, + 'testimportkey1') + self.assertEqual('mytestpubkey', keydata['public_key']) + self.assertEqual('mytestfprint', keydata['fingerprint']) + # test when user omits fingerprint + pubkey_path = os.path.join(os.path.dirname(__file__), 'public_key') + f = open(pubkey_path + '/dummy.pub', 'r') + dummypub = f.readline().rstrip() + f.close + f = open(pubkey_path + '/dummy.fingerprint', 'r') + dummyfprint = f.readline().rstrip() + f.close + result2 = self.cloud.import_public_key(self.context, + 'testimportkey2', + dummypub) + self.assertTrue(result2) + keydata = db.key_pair_get(self.context, + self.context.user_id, + 'testimportkey2') + self.assertEqual(dummypub, keydata['public_key']) + self.assertEqual(dummyfprint, keydata['fingerprint']) + + def test_delete_key_pair(self): + self._create_key('test') + self.cloud.delete_key_pair(self.context, 'test') + + def test_run_instances(self): + kwargs = {'image_id': FLAGS.default_image, + 'instance_type': FLAGS.default_instance_type, + 'max_count': 1} + run_instances = self.cloud.run_instances + result = run_instances(self.context, **kwargs) + instance = result['instancesSet'][0] + self.assertEqual(instance['imageId'], 'ami-00000001') + self.assertEqual(instance['displayName'], 'Server 1') + self.assertEqual(instance['instanceId'], 'i-00000001') + self.assertEqual(instance['instanceState']['name'], 'running') + self.assertEqual(instance['instanceType'], 'm1.small') + + def test_run_instances_image_state_none(self): + kwargs = {'image_id': FLAGS.default_image, + 'instance_type': FLAGS.default_instance_type, + 'max_count': 1} + run_instances = self.cloud.run_instances + + def fake_show_no_state(self, context, id): + return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, + 'type': 'machine'}, 'container_format': 'ami'} + + self.stubs.UnsetAll() + self.stubs.Set(fake._FakeImageService, 'show', fake_show_no_state) + self.assertRaises(exception.ApiError, run_instances, + self.context, **kwargs) + + def test_run_instances_image_state_invalid(self): + kwargs = {'image_id': FLAGS.default_image, + 'instance_type': FLAGS.default_instance_type, + 'max_count': 1} + run_instances = self.cloud.run_instances + + def fake_show_decrypt(self, context, id): + return {'id': 1, 'container_format': 'ami', + 'properties': {'kernel_id': 1, 'ramdisk_id': 1, + 'type': 'machine', 'image_state': 'decrypting'}} + + self.stubs.UnsetAll() + self.stubs.Set(fake._FakeImageService, 'show', fake_show_decrypt) + self.assertRaises(exception.ApiError, run_instances, + self.context, **kwargs) + + def test_run_instances_image_status_active(self): + kwargs = {'image_id': FLAGS.default_image, + 'instance_type': FLAGS.default_instance_type, + 'max_count': 1} + run_instances = self.cloud.run_instances + + def fake_show_stat_active(self, context, id): + return {'id': 1, 'container_format': 'ami', + 'properties': {'kernel_id': 1, 'ramdisk_id': 1, + 'type': 'machine'}, 'status': 'active'} + + self.stubs.Set(fake._FakeImageService, 'show', fake_show_stat_active) + + result = run_instances(self.context, **kwargs) + self.assertEqual(len(result['instancesSet']), 1) + + def test_terminate_instances(self): + inst1 = db.instance_create(self.context, {'reservation_id': 'a', + 'image_ref': 1, + 'host': 'host1'}) + terminate_instances = self.cloud.terminate_instances + # valid instance_id + result = terminate_instances(self.context, ['i-00000001']) + self.assertTrue(result) + # non-existing instance_id + self.assertRaises(exception.InstanceNotFound, terminate_instances, + self.context, ['i-2']) + db.instance_destroy(self.context, inst1['id']) + + def test_update_of_instance_display_fields(self): + inst = db.instance_create(self.context, {}) + ec2_id = ec2utils.id_to_ec2_id(inst['id']) + self.cloud.update_instance(self.context, ec2_id, + display_name='c00l 1m4g3') + inst = db.instance_get(self.context, inst['id']) + self.assertEqual('c00l 1m4g3', inst['display_name']) + db.instance_destroy(self.context, inst['id']) + + def test_update_of_instance_wont_update_private_fields(self): + inst = db.instance_create(self.context, {}) + host = inst['host'] + ec2_id = ec2utils.id_to_ec2_id(inst['id']) + self.cloud.update_instance(self.context, ec2_id, + display_name='c00l 1m4g3', + host='otherhost') + inst = db.instance_get(self.context, inst['id']) + self.assertEqual(host, inst['host']) + db.instance_destroy(self.context, inst['id']) + + def test_update_of_volume_display_fields(self): + vol = db.volume_create(self.context, {}) + self.cloud.update_volume(self.context, + ec2utils.id_to_ec2_vol_id(vol['id']), + display_name='c00l v0lum3') + vol = db.volume_get(self.context, vol['id']) + self.assertEqual('c00l v0lum3', vol['display_name']) + db.volume_destroy(self.context, vol['id']) + + def test_update_of_volume_wont_update_private_fields(self): + vol = db.volume_create(self.context, {}) + self.cloud.update_volume(self.context, + ec2utils.id_to_ec2_vol_id(vol['id']), + mountpoint='/not/here') + vol = db.volume_get(self.context, vol['id']) + self.assertEqual(None, vol['mountpoint']) + db.volume_destroy(self.context, vol['id']) + + def _restart_compute_service(self, periodic_interval=None): + """restart compute service. NOTE: fake driver forgets all instances.""" + self.compute.kill() + if periodic_interval: + self.compute = self.start_service( + 'compute', periodic_interval=periodic_interval) + else: + self.compute = self.start_service('compute') + + def _wait_for_state(self, ctxt, instance_id, predicate): + """Wait for a stopped instance to be a given state""" + id = ec2utils.ec2_id_to_id(instance_id) + while True: + info = self.cloud.compute_api.get(context=ctxt, instance_id=id) + LOG.debug(info) + if predicate(info): + break + greenthread.sleep(1) + + def _wait_for_running(self, instance_id): + def is_running(info): + vm_state = info["vm_state"] + task_state = info["task_state"] + return vm_state == vm_states.ACTIVE and task_state == None + self._wait_for_state(self.context, instance_id, is_running) + + def _wait_for_stopped(self, instance_id): + def is_stopped(info): + vm_state = info["vm_state"] + task_state = info["task_state"] + return vm_state == vm_states.STOPPED and task_state == None + self._wait_for_state(self.context, instance_id, is_stopped) + + def _wait_for_terminate(self, instance_id): + def is_deleted(info): + return info['deleted'] + elevated = self.context.elevated(read_deleted=True) + self._wait_for_state(elevated, instance_id, is_deleted) + + def test_stop_start_instance(self): + """Makes sure stop/start instance works""" + # enforce periodic tasks run in short time to avoid wait for 60s. + self._restart_compute_service(periodic_interval=0.3) + + kwargs = {'image_id': 'ami-1', + 'instance_type': FLAGS.default_instance_type, + 'max_count': 1, } + instance_id = self._run_instance_wait(**kwargs) + + # a running instance can't be started. It is just ignored. + result = self.cloud.start_instances(self.context, [instance_id]) + greenthread.sleep(0.3) + self.assertTrue(result) + + result = self.cloud.stop_instances(self.context, [instance_id]) + greenthread.sleep(0.3) + self.assertTrue(result) + self._wait_for_stopped(instance_id) + + result = self.cloud.start_instances(self.context, [instance_id]) + greenthread.sleep(0.3) + self.assertTrue(result) + self._wait_for_running(instance_id) + + result = self.cloud.stop_instances(self.context, [instance_id]) + greenthread.sleep(0.3) + self.assertTrue(result) + self._wait_for_stopped(instance_id) + + result = self.cloud.terminate_instances(self.context, [instance_id]) + greenthread.sleep(0.3) + self.assertTrue(result) + + self._restart_compute_service() + + def _volume_create(self, volume_id=None): + kwargs = {'status': 'available', + 'host': self.volume.host, + 'size': 1, + 'attach_status': 'detached', } + if volume_id: + kwargs['id'] = volume_id + return db.volume_create(self.context, kwargs) + + def _assert_volume_attached(self, vol, instance_id, mountpoint): + self.assertEqual(vol['instance_id'], instance_id) + self.assertEqual(vol['mountpoint'], mountpoint) + self.assertEqual(vol['status'], "in-use") + self.assertEqual(vol['attach_status'], "attached") + + def _assert_volume_detached(self, vol): + self.assertEqual(vol['instance_id'], None) + self.assertEqual(vol['mountpoint'], None) + self.assertEqual(vol['status'], "available") + self.assertEqual(vol['attach_status'], "detached") + + def test_stop_start_with_volume(self): + """Make sure run instance with block device mapping works""" + + # enforce periodic tasks run in short time to avoid wait for 60s. + self._restart_compute_service(periodic_interval=0.3) + + vol1 = self._volume_create() + vol2 = self._volume_create() + kwargs = {'image_id': 'ami-1', + 'instance_type': FLAGS.default_instance_type, + 'max_count': 1, + 'block_device_mapping': [{'device_name': '/dev/vdb', + 'volume_id': vol1['id'], + 'delete_on_termination': False}, + {'device_name': '/dev/vdc', + 'volume_id': vol2['id'], + 'delete_on_termination': True}, + ]} + ec2_instance_id = self._run_instance_wait(**kwargs) + instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) + + vols = db.volume_get_all_by_instance(self.context, instance_id) + self.assertEqual(len(vols), 2) + for vol in vols: + self.assertTrue(vol['id'] == vol1['id'] or vol['id'] == vol2['id']) + + vol = db.volume_get(self.context, vol1['id']) + self._assert_volume_attached(vol, instance_id, '/dev/vdb') + + vol = db.volume_get(self.context, vol2['id']) + self._assert_volume_attached(vol, instance_id, '/dev/vdc') + + result = self.cloud.stop_instances(self.context, [ec2_instance_id]) + self.assertTrue(result) + self._wait_for_stopped(ec2_instance_id) + + vol = db.volume_get(self.context, vol1['id']) + self._assert_volume_detached(vol) + vol = db.volume_get(self.context, vol2['id']) + self._assert_volume_detached(vol) + + self.cloud.start_instances(self.context, [ec2_instance_id]) + self._wait_for_running(ec2_instance_id) + vols = db.volume_get_all_by_instance(self.context, instance_id) + self.assertEqual(len(vols), 2) + for vol in vols: + self.assertTrue(vol['id'] == vol1['id'] or vol['id'] == vol2['id']) + self.assertTrue(vol['mountpoint'] == '/dev/vdb' or + vol['mountpoint'] == '/dev/vdc') + self.assertEqual(vol['instance_id'], instance_id) + self.assertEqual(vol['status'], "in-use") + self.assertEqual(vol['attach_status'], "attached") + + self.cloud.terminate_instances(self.context, [ec2_instance_id]) + greenthread.sleep(0.3) + + admin_ctxt = context.get_admin_context(read_deleted=False) + vol = db.volume_get(admin_ctxt, vol1['id']) + self.assertFalse(vol['deleted']) + db.volume_destroy(self.context, vol1['id']) + + greenthread.sleep(0.3) + admin_ctxt = context.get_admin_context(read_deleted=True) + vol = db.volume_get(admin_ctxt, vol2['id']) + self.assertTrue(vol['deleted']) + + self._restart_compute_service() + + def test_stop_with_attached_volume(self): + """Make sure attach info is reflected to block device mapping""" + # enforce periodic tasks run in short time to avoid wait for 60s. + self._restart_compute_service(periodic_interval=0.3) + + vol1 = self._volume_create() + vol2 = self._volume_create() + kwargs = {'image_id': 'ami-1', + 'instance_type': FLAGS.default_instance_type, + 'max_count': 1, + 'block_device_mapping': [{'device_name': '/dev/vdb', + 'volume_id': vol1['id'], + 'delete_on_termination': True}]} + ec2_instance_id = self._run_instance_wait(**kwargs) + instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) + + vols = db.volume_get_all_by_instance(self.context, instance_id) + self.assertEqual(len(vols), 1) + for vol in vols: + self.assertEqual(vol['id'], vol1['id']) + self._assert_volume_attached(vol, instance_id, '/dev/vdb') + + vol = db.volume_get(self.context, vol2['id']) + self._assert_volume_detached(vol) + + self.cloud.compute_api.attach_volume(self.context, + instance_id=instance_id, + volume_id=vol2['id'], + device='/dev/vdc') + greenthread.sleep(0.3) + vol = db.volume_get(self.context, vol2['id']) + self._assert_volume_attached(vol, instance_id, '/dev/vdc') + + self.cloud.compute_api.detach_volume(self.context, + volume_id=vol1['id']) + greenthread.sleep(0.3) + vol = db.volume_get(self.context, vol1['id']) + self._assert_volume_detached(vol) + + result = self.cloud.stop_instances(self.context, [ec2_instance_id]) + self.assertTrue(result) + self._wait_for_stopped(ec2_instance_id) + + for vol_id in (vol1['id'], vol2['id']): + vol = db.volume_get(self.context, vol_id) + self._assert_volume_detached(vol) + + self.cloud.start_instances(self.context, [ec2_instance_id]) + self._wait_for_running(ec2_instance_id) + vols = db.volume_get_all_by_instance(self.context, instance_id) + self.assertEqual(len(vols), 1) + for vol in vols: + self.assertEqual(vol['id'], vol2['id']) + self._assert_volume_attached(vol, instance_id, '/dev/vdc') + + vol = db.volume_get(self.context, vol1['id']) + self._assert_volume_detached(vol) + + self.cloud.terminate_instances(self.context, [ec2_instance_id]) + greenthread.sleep(0.3) + + for vol_id in (vol1['id'], vol2['id']): + vol = db.volume_get(self.context, vol_id) + self.assertEqual(vol['id'], vol_id) + self._assert_volume_detached(vol) + db.volume_destroy(self.context, vol_id) + + self._restart_compute_service() + + def _create_snapshot(self, ec2_volume_id): + result = self.cloud.create_snapshot(self.context, + volume_id=ec2_volume_id) + greenthread.sleep(0.3) + return result['snapshotId'] + + def test_run_with_snapshot(self): + """Makes sure run/stop/start instance with snapshot works.""" + vol = self._volume_create() + ec2_volume_id = ec2utils.id_to_ec2_vol_id(vol['id']) + + ec2_snapshot1_id = self._create_snapshot(ec2_volume_id) + snapshot1_id = ec2utils.ec2_id_to_id(ec2_snapshot1_id) + ec2_snapshot2_id = self._create_snapshot(ec2_volume_id) + snapshot2_id = ec2utils.ec2_id_to_id(ec2_snapshot2_id) + + kwargs = {'image_id': 'ami-1', + 'instance_type': FLAGS.default_instance_type, + 'max_count': 1, + 'block_device_mapping': [{'device_name': '/dev/vdb', + 'snapshot_id': snapshot1_id, + 'delete_on_termination': False, }, + {'device_name': '/dev/vdc', + 'snapshot_id': snapshot2_id, + 'delete_on_termination': True}]} + ec2_instance_id = self._run_instance_wait(**kwargs) + instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) + + vols = db.volume_get_all_by_instance(self.context, instance_id) + self.assertEqual(len(vols), 2) + vol1_id = None + vol2_id = None + for vol in vols: + snapshot_id = vol['snapshot_id'] + if snapshot_id == snapshot1_id: + vol1_id = vol['id'] + mountpoint = '/dev/vdb' + elif snapshot_id == snapshot2_id: + vol2_id = vol['id'] + mountpoint = '/dev/vdc' + else: + self.fail() + + self._assert_volume_attached(vol, instance_id, mountpoint) + + self.assertTrue(vol1_id) + self.assertTrue(vol2_id) + + self.cloud.terminate_instances(self.context, [ec2_instance_id]) + greenthread.sleep(0.3) + self._wait_for_terminate(ec2_instance_id) + + greenthread.sleep(0.3) + admin_ctxt = context.get_admin_context(read_deleted=False) + vol = db.volume_get(admin_ctxt, vol1_id) + self._assert_volume_detached(vol) + self.assertFalse(vol['deleted']) + db.volume_destroy(self.context, vol1_id) + + greenthread.sleep(0.3) + admin_ctxt = context.get_admin_context(read_deleted=True) + vol = db.volume_get(admin_ctxt, vol2_id) + self.assertTrue(vol['deleted']) + + for snapshot_id in (ec2_snapshot1_id, ec2_snapshot2_id): + self.cloud.delete_snapshot(self.context, snapshot_id) + greenthread.sleep(0.3) + db.volume_destroy(self.context, vol['id']) + + def test_create_image(self): + """Make sure that CreateImage works""" + # enforce periodic tasks run in short time to avoid wait for 60s. + self._restart_compute_service(periodic_interval=0.3) + + (volumes, snapshots) = self._setUpImageSet( + create_volumes_and_snapshots=True) + + kwargs = {'image_id': 'ami-1', + 'instance_type': FLAGS.default_instance_type, + 'max_count': 1} + ec2_instance_id = self._run_instance_wait(**kwargs) + + # TODO(yamahata): s3._s3_create() can't be tested easily by unit test + # as there is no unit test for s3.create() + ## result = self.cloud.create_image(self.context, ec2_instance_id, + ## no_reboot=True) + ## ec2_image_id = result['imageId'] + ## created_image = self.cloud.describe_images(self.context, + ## [ec2_image_id]) + + self.cloud.terminate_instances(self.context, [ec2_instance_id]) + for vol in volumes: + db.volume_destroy(self.context, vol) + for snap in snapshots: + db.snapshot_destroy(self.context, snap) + # TODO(yamahata): clean up snapshot created by CreateImage. + + self._restart_compute_service() + + @staticmethod + def _fake_bdm_get(ctxt, id): + return [{'volume_id': 87654321, + 'snapshot_id': None, + 'no_device': None, + 'virtual_name': None, + 'delete_on_termination': True, + 'device_name': '/dev/sdh'}, + {'volume_id': None, + 'snapshot_id': 98765432, + 'no_device': None, + 'virtual_name': None, + 'delete_on_termination': True, + 'device_name': '/dev/sdi'}, + {'volume_id': None, + 'snapshot_id': None, + 'no_device': True, + 'virtual_name': None, + 'delete_on_termination': None, + 'device_name': None}, + {'volume_id': None, + 'snapshot_id': None, + 'no_device': None, + 'virtual_name': 'ephemeral0', + 'delete_on_termination': None, + 'device_name': '/dev/sdb'}, + {'volume_id': None, + 'snapshot_id': None, + 'no_device': None, + 'virtual_name': 'swap', + 'delete_on_termination': None, + 'device_name': '/dev/sdc'}, + {'volume_id': None, + 'snapshot_id': None, + 'no_device': None, + 'virtual_name': 'ephemeral1', + 'delete_on_termination': None, + 'device_name': '/dev/sdd'}, + {'volume_id': None, + 'snapshot_id': None, + 'no_device': None, + 'virtual_name': 'ephemeral2', + 'delete_on_termination': None, + 'device_name': '/dev/sd3'}, + ] + + def test_get_instance_mapping(self): + """Make sure that _get_instance_mapping works""" + ctxt = None + instance_ref0 = {'id': 0, + 'root_device_name': None} + instance_ref1 = {'id': 0, + 'root_device_name': '/dev/sda1'} + + self.stubs.Set(db, 'block_device_mapping_get_all_by_instance', + self._fake_bdm_get) + + expected = {'ami': 'sda1', + 'root': '/dev/sda1', + 'ephemeral0': '/dev/sdb', + 'swap': '/dev/sdc', + 'ephemeral1': '/dev/sdd', + 'ephemeral2': '/dev/sd3'} + + self.assertEqual(self.cloud._format_instance_mapping(ctxt, + instance_ref0), + cloud._DEFAULT_MAPPINGS) + self.assertEqual(self.cloud._format_instance_mapping(ctxt, + instance_ref1), + expected) + + def test_describe_instance_attribute(self): + """Make sure that describe_instance_attribute works""" + self.stubs.Set(db, 'block_device_mapping_get_all_by_instance', + self._fake_bdm_get) + + def fake_get(ctxt, instance_id): + return { + 'id': 0, + 'root_device_name': '/dev/sdh', + 'security_groups': [{'name': 'fake0'}, {'name': 'fake1'}], + 'vm_state': vm_states.STOPPED, + 'instance_type': {'name': 'fake_type'}, + 'kernel_id': 1, + 'ramdisk_id': 2, + 'user_data': 'fake-user data', + } + self.stubs.Set(self.cloud.compute_api, 'get', fake_get) + + def fake_volume_get(ctxt, volume_id, session=None): + if volume_id == 87654321: + return {'id': volume_id, + 'attach_time': '13:56:24', + 'status': 'in-use'} + raise exception.VolumeNotFound(volume_id=volume_id) + self.stubs.Set(db.api, 'volume_get', fake_volume_get) + + get_attribute = functools.partial( + self.cloud.describe_instance_attribute, + self.context, 'i-12345678') + + bdm = get_attribute('blockDeviceMapping') + bdm['blockDeviceMapping'].sort() + + expected_bdm = {'instance_id': 'i-12345678', + 'rootDeviceType': 'ebs', + 'blockDeviceMapping': [ + {'deviceName': '/dev/sdh', + 'ebs': {'status': 'in-use', + 'deleteOnTermination': True, + 'volumeId': 87654321, + 'attachTime': '13:56:24'}}]} + expected_bdm['blockDeviceMapping'].sort() + self.assertEqual(bdm, expected_bdm) + # NOTE(yamahata): this isn't supported + # get_attribute('disableApiTermination') + groupSet = get_attribute('groupSet') + groupSet['groupSet'].sort() + expected_groupSet = {'instance_id': 'i-12345678', + 'groupSet': [{'groupId': 'fake0'}, + {'groupId': 'fake1'}]} + expected_groupSet['groupSet'].sort() + self.assertEqual(groupSet, expected_groupSet) + self.assertEqual(get_attribute('instanceInitiatedShutdownBehavior'), + {'instance_id': 'i-12345678', + 'instanceInitiatedShutdownBehavior': 'stopped'}) + self.assertEqual(get_attribute('instanceType'), + {'instance_id': 'i-12345678', + 'instanceType': 'fake_type'}) + self.assertEqual(get_attribute('kernel'), + {'instance_id': 'i-12345678', + 'kernel': 'aki-00000001'}) + self.assertEqual(get_attribute('ramdisk'), + {'instance_id': 'i-12345678', + 'ramdisk': 'ari-00000002'}) + self.assertEqual(get_attribute('rootDeviceName'), + {'instance_id': 'i-12345678', + 'rootDeviceName': '/dev/sdh'}) + # NOTE(yamahata): this isn't supported + # get_attribute('sourceDestCheck') + self.assertEqual(get_attribute('userData'), + {'instance_id': 'i-12345678', + 'userData': '}\xa9\x1e\xba\xc7\xabu\xabZ'}) diff --git a/nova/tests/public_key/dummy.fingerprint b/nova/tests/public_key/dummy.fingerprint deleted file mode 100644 index 715bca27a..000000000 --- a/nova/tests/public_key/dummy.fingerprint +++ /dev/null @@ -1 +0,0 @@ -1c:87:d1:d9:32:fd:62:3c:78:2b:c0:ad:c0:15:88:df diff --git a/nova/tests/public_key/dummy.pub b/nova/tests/public_key/dummy.pub deleted file mode 100644 index d4cf2bc0d..000000000 --- a/nova/tests/public_key/dummy.pub +++ /dev/null @@ -1 +0,0 @@ -ssh-dss AAAAB3NzaC1kc3MAAACBAMGJlY9XEIm2X234pdO5yFWMp2JuOQx8U0E815IVXhmKxYCBK9ZakgZOIQmPbXoGYyV+mziDPp6HJ0wKYLQxkwLEFr51fAZjWQvRss0SinURRuLkockDfGFtD4pYJthekr/rlqMKlBSDUSpGq8jUWW60UJ18FGooFpxR7ESqQRx/AAAAFQC96LRglaUeeP+E8U/yblEJocuiWwAAAIA3XiMR8Skiz/0aBm5K50SeQznQuMJTyzt9S9uaz5QZWiFu69hOyGSFGw8fqgxEkXFJIuHobQQpGYQubLW0NdaYRqyE/Vud3JUJUb8Texld6dz8vGemyB5d1YvtSeHIo8/BGv2msOqR3u5AZTaGCBD9DhpSGOKHEdNjTtvpPd8S8gAAAIBociGZ5jf09iHLVENhyXujJbxfGRPsyNTyARJfCOGl0oFV6hEzcQyw8U/ePwjgvjc2UizMWLl8tsb2FXKHRdc2v+ND3Us+XqKQ33X3ADP4FZ/+Oj213gMyhCmvFTP0u5FmHog9My4CB7YcIWRuUR42WlhQ2IfPvKwUoTk3R+T6Og== www-data@mk diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py deleted file mode 100644 index 7fe353b3d..000000000 --- a/nova/tests/test_cloud.py +++ /dev/null @@ -1,1623 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import mox - -import functools - -from base64 import b64decode -from M2Crypto import BIO -from M2Crypto import RSA -import os - -from eventlet import greenthread - -from nova import context -from nova import crypto -from nova import db -from nova import exception -from nova import flags -from nova import log as logging -from nova import network -from nova import rpc -from nova import test -from nova import utils -from nova.api.ec2 import cloud -from nova.api.ec2 import ec2utils -from nova.compute import vm_states -from nova.image import fake - - -FLAGS = flags.FLAGS -LOG = logging.getLogger('nova.tests.cloud') - - -class CloudTestCase(test.TestCase): - def setUp(self): - super(CloudTestCase, self).setUp() - self.flags(connection_type='fake', - stub_network=True) - - # set up our cloud - self.cloud = cloud.CloudController() - - # set up services - self.compute = self.start_service('compute') - self.scheduter = self.start_service('scheduler') - self.network = self.start_service('network') - self.volume = self.start_service('volume') - self.image_service = utils.import_object(FLAGS.image_service) - - self.user_id = 'fake' - self.project_id = 'fake' - self.context = context.RequestContext(self.user_id, - self.project_id, - True) - - def fake_show(meh, context, id): - return {'id': 1, 'container_format': 'ami', - 'properties': {'kernel_id': 1, 'ramdisk_id': 1, - 'type': 'machine', 'image_state': 'available'}} - - self.stubs.Set(fake._FakeImageService, 'show', fake_show) - self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show) - - # NOTE(vish): set up a manual wait so rpc.cast has a chance to finish - rpc_cast = rpc.cast - - def finish_cast(*args, **kwargs): - rpc_cast(*args, **kwargs) - greenthread.sleep(0.2) - - self.stubs.Set(rpc, 'cast', finish_cast) - - def _create_key(self, name): - # NOTE(vish): create depends on pool, so just call helper directly - return cloud._gen_key(self.context, self.context.user_id, name) - - def test_describe_regions(self): - """Makes sure describe regions runs without raising an exception""" - result = self.cloud.describe_regions(self.context) - self.assertEqual(len(result['regionInfo']), 1) - self.flags(region_list=["one=test_host1", "two=test_host2"]) - result = self.cloud.describe_regions(self.context) - self.assertEqual(len(result['regionInfo']), 2) - - def test_describe_addresses(self): - """Makes sure describe addresses runs without raising an exception""" - address = "10.10.10.10" - db.floating_ip_create(self.context, - {'address': address, - 'host': self.network.host}) - self.cloud.allocate_address(self.context) - self.cloud.describe_addresses(self.context) - self.cloud.release_address(self.context, - public_ip=address) - db.floating_ip_destroy(self.context, address) - - def test_allocate_address(self): - address = "10.10.10.10" - allocate = self.cloud.allocate_address - db.floating_ip_create(self.context, - {'address': address, - 'host': self.network.host}) - self.assertEqual(allocate(self.context)['publicIp'], address) - db.floating_ip_destroy(self.context, address) - self.assertRaises(exception.NoMoreFloatingIps, - allocate, - self.context) - - def test_release_address(self): - address = "10.10.10.10" - allocate = self.cloud.allocate_address - db.floating_ip_create(self.context, - {'address': address, - 'host': self.network.host}) - result = self.cloud.release_address(self.context, address) - self.assertEqual(result['releaseResponse'], ['Address released.']) - - def test_release_address_still_associated(self): - address = "10.10.10.10" - fixed_ip = {'instance': {'id': 1}} - floating_ip = {'id': 0, - 'address': address, - 'fixed_ip_id': 0, - 'fixed_ip': fixed_ip, - 'project_id': None, - 'auto_assigned': False} - network_api = network.api.API() - self.mox.StubOutWithMock(network_api.db, 'floating_ip_get_by_address') - network_api.db.floating_ip_get_by_address(mox.IgnoreArg(), - mox.IgnoreArg()).AndReturn(floating_ip) - self.mox.ReplayAll() - release = self.cloud.release_address - # ApiError: Floating ip is in use. Disassociate it before releasing. - self.assertRaises(exception.ApiError, release, self.context, address) - - def test_associate_disassociate_address(self): - """Verifies associate runs cleanly without raising an exception""" - address = "10.10.10.10" - db.floating_ip_create(self.context, {'address': address}) - self.cloud.allocate_address(self.context) - # TODO(jkoelker) Probably need to query for instance_type_id and - # make sure we get a valid one - inst = db.instance_create(self.context, {'host': self.compute.host, - 'instance_type_id': 1}) - networks = db.network_get_all(self.context) - for network in networks: - db.network_update(self.context, network['id'], - {'host': self.network.host}) - project_id = self.context.project_id - type_id = inst['instance_type_id'] - ips = self.network.allocate_for_instance(self.context, - instance_id=inst['id'], - host=inst['host'], - vpn=None, - instance_type_id=type_id, - project_id=project_id) - # TODO(jkoelker) Make this mas bueno - self.assertTrue(ips) - self.assertTrue('ips' in ips[0][1]) - self.assertTrue(ips[0][1]['ips']) - self.assertTrue('ip' in ips[0][1]['ips'][0]) - - fixed = ips[0][1]['ips'][0]['ip'] - - ec2_id = ec2utils.id_to_ec2_id(inst['id']) - self.cloud.associate_address(self.context, - instance_id=ec2_id, - public_ip=address) - self.cloud.disassociate_address(self.context, - public_ip=address) - self.cloud.release_address(self.context, - public_ip=address) - self.network.deallocate_fixed_ip(self.context, fixed) - db.instance_destroy(self.context, inst['id']) - db.floating_ip_destroy(self.context, address) - - def test_describe_security_groups(self): - """Makes sure describe_security_groups works and filters results.""" - sec = db.security_group_create(self.context, - {'project_id': self.context.project_id, - 'name': 'test'}) - result = self.cloud.describe_security_groups(self.context) - # NOTE(vish): should have the default group as well - self.assertEqual(len(result['securityGroupInfo']), 2) - result = self.cloud.describe_security_groups(self.context, - group_name=[sec['name']]) - self.assertEqual(len(result['securityGroupInfo']), 1) - self.assertEqual( - result['securityGroupInfo'][0]['groupName'], - sec['name']) - db.security_group_destroy(self.context, sec['id']) - - def test_describe_security_groups_by_id(self): - sec = db.security_group_create(self.context, - {'project_id': self.context.project_id, - 'name': 'test'}) - result = self.cloud.describe_security_groups(self.context, - group_id=[sec['id']]) - self.assertEqual(len(result['securityGroupInfo']), 1) - self.assertEqual( - result['securityGroupInfo'][0]['groupName'], - sec['name']) - default = db.security_group_get_by_name(self.context, - self.context.project_id, - 'default') - result = self.cloud.describe_security_groups(self.context, - group_id=[default['id']]) - self.assertEqual(len(result['securityGroupInfo']), 1) - self.assertEqual( - result['securityGroupInfo'][0]['groupName'], - 'default') - db.security_group_destroy(self.context, sec['id']) - - def test_create_delete_security_group(self): - descript = 'test description' - create = self.cloud.create_security_group - result = create(self.context, 'testgrp', descript) - group_descript = result['securityGroupSet'][0]['groupDescription'] - self.assertEqual(descript, group_descript) - delete = self.cloud.delete_security_group - self.assertTrue(delete(self.context, 'testgrp')) - - def test_delete_security_group_by_id(self): - sec = db.security_group_create(self.context, - {'project_id': self.context.project_id, - 'name': 'test'}) - delete = self.cloud.delete_security_group - self.assertTrue(delete(self.context, group_id=sec['id'])) - - def test_delete_security_group_with_bad_name(self): - delete = self.cloud.delete_security_group - notfound = exception.SecurityGroupNotFound - self.assertRaises(notfound, delete, self.context, 'badname') - - def test_delete_security_group_with_bad_group_id(self): - delete = self.cloud.delete_security_group - notfound = exception.SecurityGroupNotFound - self.assertRaises(notfound, delete, self.context, group_id=999) - - def test_delete_security_group_no_params(self): - delete = self.cloud.delete_security_group - self.assertRaises(exception.ApiError, delete, self.context) - - def test_authorize_security_group_ingress(self): - kwargs = {'project_id': self.context.project_id, 'name': 'test'} - sec = db.security_group_create(self.context, kwargs) - authz = self.cloud.authorize_security_group_ingress - kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'} - self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs)) - - def test_authorize_security_group_ingress_ip_permissions_ip_ranges(self): - kwargs = {'project_id': self.context.project_id, 'name': 'test'} - sec = db.security_group_create(self.context, kwargs) - authz = self.cloud.authorize_security_group_ingress - kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81, - 'ip_ranges': - {'1': {'cidr_ip': u'0.0.0.0/0'}, - '2': {'cidr_ip': u'10.10.10.10/32'}}, - 'ip_protocol': u'tcp'}]} - self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs)) - - def test_authorize_security_group_fail_missing_source_group(self): - kwargs = {'project_id': self.context.project_id, 'name': 'test'} - sec = db.security_group_create(self.context, kwargs) - authz = self.cloud.authorize_security_group_ingress - kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81, - 'ip_ranges':{'1': {'cidr_ip': u'0.0.0.0/0'}, - '2': {'cidr_ip': u'10.10.10.10/32'}}, - 'groups': {'1': {'user_id': u'someuser', - 'group_name': u'somegroup1'}}, - 'ip_protocol': u'tcp'}]} - self.assertRaises(exception.SecurityGroupNotFound, authz, - self.context, group_name=sec['name'], **kwargs) - - def test_authorize_security_group_ingress_ip_permissions_groups(self): - kwargs = {'project_id': self.context.project_id, 'name': 'test'} - sec = db.security_group_create(self.context, - {'project_id': 'someuser', - 'name': 'somegroup1'}) - sec = db.security_group_create(self.context, - {'project_id': 'someuser', - 'name': 'othergroup2'}) - sec = db.security_group_create(self.context, kwargs) - authz = self.cloud.authorize_security_group_ingress - kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81, - 'groups': {'1': {'user_id': u'someuser', - 'group_name': u'somegroup1'}, - '2': {'user_id': u'someuser', - 'group_name': u'othergroup2'}}, - 'ip_protocol': u'tcp'}]} - self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs)) - - def test_revoke_security_group_ingress(self): - kwargs = {'project_id': self.context.project_id, 'name': 'test'} - sec = db.security_group_create(self.context, kwargs) - authz = self.cloud.authorize_security_group_ingress - kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'} - authz(self.context, group_id=sec['id'], **kwargs) - revoke = self.cloud.revoke_security_group_ingress - self.assertTrue(revoke(self.context, group_name=sec['name'], **kwargs)) - - def test_authorize_revoke_security_group_ingress_by_id(self): - sec = db.security_group_create(self.context, - {'project_id': self.context.project_id, - 'name': 'test'}) - authz = self.cloud.authorize_security_group_ingress - kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'} - authz(self.context, group_id=sec['id'], **kwargs) - revoke = self.cloud.revoke_security_group_ingress - self.assertTrue(revoke(self.context, group_id=sec['id'], **kwargs)) - - def test_authorize_security_group_ingress_missing_protocol_params(self): - sec = db.security_group_create(self.context, - {'project_id': self.context.project_id, - 'name': 'test'}) - authz = self.cloud.authorize_security_group_ingress - self.assertRaises(exception.ApiError, authz, self.context, 'test') - - def test_authorize_security_group_ingress_missing_group_name_or_id(self): - kwargs = {'project_id': self.context.project_id, 'name': 'test'} - authz = self.cloud.authorize_security_group_ingress - self.assertRaises(exception.ApiError, authz, self.context, **kwargs) - - def test_authorize_security_group_ingress_already_exists(self): - kwargs = {'project_id': self.context.project_id, 'name': 'test'} - sec = db.security_group_create(self.context, kwargs) - authz = self.cloud.authorize_security_group_ingress - kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'} - authz(self.context, group_name=sec['name'], **kwargs) - self.assertRaises(exception.ApiError, authz, self.context, - group_name=sec['name'], **kwargs) - - def test_revoke_security_group_ingress_missing_group_name_or_id(self): - kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'} - revoke = self.cloud.revoke_security_group_ingress - self.assertRaises(exception.ApiError, revoke, self.context, **kwargs) - - def test_describe_volumes(self): - """Makes sure describe_volumes works and filters results.""" - vol1 = db.volume_create(self.context, {}) - vol2 = db.volume_create(self.context, {}) - result = self.cloud.describe_volumes(self.context) - self.assertEqual(len(result['volumeSet']), 2) - volume_id = ec2utils.id_to_ec2_vol_id(vol2['id']) - result = self.cloud.describe_volumes(self.context, - volume_id=[volume_id]) - self.assertEqual(len(result['volumeSet']), 1) - self.assertEqual( - ec2utils.ec2_id_to_id(result['volumeSet'][0]['volumeId']), - vol2['id']) - db.volume_destroy(self.context, vol1['id']) - db.volume_destroy(self.context, vol2['id']) - - def test_create_volume_from_snapshot(self): - """Makes sure create_volume works when we specify a snapshot.""" - vol = db.volume_create(self.context, {'size': 1}) - snap = db.snapshot_create(self.context, {'volume_id': vol['id'], - 'volume_size': vol['size'], - 'status': "available"}) - snapshot_id = ec2utils.id_to_ec2_snap_id(snap['id']) - - result = self.cloud.create_volume(self.context, - snapshot_id=snapshot_id) - volume_id = result['volumeId'] - result = self.cloud.describe_volumes(self.context) - self.assertEqual(len(result['volumeSet']), 2) - self.assertEqual(result['volumeSet'][1]['volumeId'], volume_id) - - db.volume_destroy(self.context, ec2utils.ec2_id_to_id(volume_id)) - db.snapshot_destroy(self.context, snap['id']) - db.volume_destroy(self.context, vol['id']) - - def test_describe_availability_zones(self): - """Makes sure describe_availability_zones works and filters results.""" - service1 = db.service_create(self.context, {'host': 'host1_zones', - 'binary': "nova-compute", - 'topic': 'compute', - 'report_count': 0, - 'availability_zone': "zone1"}) - service2 = db.service_create(self.context, {'host': 'host2_zones', - 'binary': "nova-compute", - 'topic': 'compute', - 'report_count': 0, - 'availability_zone': "zone2"}) - result = self.cloud.describe_availability_zones(self.context) - self.assertEqual(len(result['availabilityZoneInfo']), 3) - db.service_destroy(self.context, service1['id']) - db.service_destroy(self.context, service2['id']) - - def test_describe_snapshots(self): - """Makes sure describe_snapshots works and filters results.""" - vol = db.volume_create(self.context, {}) - snap1 = db.snapshot_create(self.context, {'volume_id': vol['id']}) - snap2 = db.snapshot_create(self.context, {'volume_id': vol['id']}) - result = self.cloud.describe_snapshots(self.context) - self.assertEqual(len(result['snapshotSet']), 2) - snapshot_id = ec2utils.id_to_ec2_snap_id(snap2['id']) - result = self.cloud.describe_snapshots(self.context, - snapshot_id=[snapshot_id]) - self.assertEqual(len(result['snapshotSet']), 1) - self.assertEqual( - ec2utils.ec2_id_to_id(result['snapshotSet'][0]['snapshotId']), - snap2['id']) - db.snapshot_destroy(self.context, snap1['id']) - db.snapshot_destroy(self.context, snap2['id']) - db.volume_destroy(self.context, vol['id']) - - def test_create_snapshot(self): - """Makes sure create_snapshot works.""" - vol = db.volume_create(self.context, {'status': "available"}) - volume_id = ec2utils.id_to_ec2_vol_id(vol['id']) - - result = self.cloud.create_snapshot(self.context, - volume_id=volume_id) - snapshot_id = result['snapshotId'] - result = self.cloud.describe_snapshots(self.context) - self.assertEqual(len(result['snapshotSet']), 1) - self.assertEqual(result['snapshotSet'][0]['snapshotId'], snapshot_id) - - db.snapshot_destroy(self.context, ec2utils.ec2_id_to_id(snapshot_id)) - db.volume_destroy(self.context, vol['id']) - - def test_delete_snapshot(self): - """Makes sure delete_snapshot works.""" - vol = db.volume_create(self.context, {'status': "available"}) - snap = db.snapshot_create(self.context, {'volume_id': vol['id'], - 'status': "available"}) - snapshot_id = ec2utils.id_to_ec2_snap_id(snap['id']) - - result = self.cloud.delete_snapshot(self.context, - snapshot_id=snapshot_id) - self.assertTrue(result) - - db.volume_destroy(self.context, vol['id']) - - def test_describe_instances(self): - """Makes sure describe_instances works and filters results.""" - inst1 = db.instance_create(self.context, {'reservation_id': 'a', - 'image_ref': 1, - 'host': 'host1'}) - inst2 = db.instance_create(self.context, {'reservation_id': 'a', - 'image_ref': 1, - 'host': 'host2'}) - comp1 = db.service_create(self.context, {'host': 'host1', - 'availability_zone': 'zone1', - 'topic': "compute"}) - comp2 = db.service_create(self.context, {'host': 'host2', - 'availability_zone': 'zone2', - 'topic': "compute"}) - result = self.cloud.describe_instances(self.context) - result = result['reservationSet'][0] - self.assertEqual(len(result['instancesSet']), 2) - instance_id = ec2utils.id_to_ec2_id(inst2['id']) - result = self.cloud.describe_instances(self.context, - instance_id=[instance_id]) - result = result['reservationSet'][0] - self.assertEqual(len(result['instancesSet']), 1) - self.assertEqual(result['instancesSet'][0]['instanceId'], - instance_id) - self.assertEqual(result['instancesSet'][0] - ['placement']['availabilityZone'], 'zone2') - db.instance_destroy(self.context, inst1['id']) - db.instance_destroy(self.context, inst2['id']) - db.service_destroy(self.context, comp1['id']) - db.service_destroy(self.context, comp2['id']) - - def test_describe_instances_deleted(self): - args1 = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'} - inst1 = db.instance_create(self.context, args1) - args2 = {'reservation_id': 'b', 'image_ref': 1, 'host': 'host1'} - inst2 = db.instance_create(self.context, args2) - db.instance_destroy(self.context, inst1.id) - result = self.cloud.describe_instances(self.context) - self.assertEqual(len(result['reservationSet']), 1) - result1 = result['reservationSet'][0]['instancesSet'] - self.assertEqual(result1[0]['instanceId'], - ec2utils.id_to_ec2_id(inst2.id)) - - def _block_device_mapping_create(self, instance_id, mappings): - volumes = [] - for bdm in mappings: - db.block_device_mapping_create(self.context, bdm) - if 'volume_id' in bdm: - values = {'id': bdm['volume_id']} - for bdm_key, vol_key in [('snapshot_id', 'snapshot_id'), - ('snapshot_size', 'volume_size'), - ('delete_on_termination', - 'delete_on_termination')]: - if bdm_key in bdm: - values[vol_key] = bdm[bdm_key] - vol = db.volume_create(self.context, values) - db.volume_attached(self.context, vol['id'], - instance_id, bdm['device_name']) - volumes.append(vol) - return volumes - - def _setUpBlockDeviceMapping(self): - inst1 = db.instance_create(self.context, - {'image_ref': 1, - 'root_device_name': '/dev/sdb1'}) - inst2 = db.instance_create(self.context, - {'image_ref': 2, - 'root_device_name': '/dev/sdc1'}) - - instance_id = inst1['id'] - mappings0 = [ - {'instance_id': instance_id, - 'device_name': '/dev/sdb1', - 'snapshot_id': '1', - 'volume_id': '2'}, - {'instance_id': instance_id, - 'device_name': '/dev/sdb2', - 'volume_id': '3', - 'volume_size': 1}, - {'instance_id': instance_id, - 'device_name': '/dev/sdb3', - 'delete_on_termination': True, - 'snapshot_id': '4', - 'volume_id': '5'}, - {'instance_id': instance_id, - 'device_name': '/dev/sdb4', - 'delete_on_termination': False, - 'snapshot_id': '6', - 'volume_id': '7'}, - {'instance_id': instance_id, - 'device_name': '/dev/sdb5', - 'snapshot_id': '8', - 'volume_id': '9', - 'volume_size': 0}, - {'instance_id': instance_id, - 'device_name': '/dev/sdb6', - 'snapshot_id': '10', - 'volume_id': '11', - 'volume_size': 1}, - {'instance_id': instance_id, - 'device_name': '/dev/sdb7', - 'no_device': True}, - {'instance_id': instance_id, - 'device_name': '/dev/sdb8', - 'virtual_name': 'swap'}, - {'instance_id': instance_id, - 'device_name': '/dev/sdb9', - 'virtual_name': 'ephemeral3'}] - - volumes = self._block_device_mapping_create(instance_id, mappings0) - return (inst1, inst2, volumes) - - def _tearDownBlockDeviceMapping(self, inst1, inst2, volumes): - for vol in volumes: - db.volume_destroy(self.context, vol['id']) - for id in (inst1['id'], inst2['id']): - for bdm in db.block_device_mapping_get_all_by_instance( - self.context, id): - db.block_device_mapping_destroy(self.context, bdm['id']) - db.instance_destroy(self.context, inst2['id']) - db.instance_destroy(self.context, inst1['id']) - - _expected_instance_bdm1 = { - 'instanceId': 'i-00000001', - 'rootDeviceName': '/dev/sdb1', - 'rootDeviceType': 'ebs'} - - _expected_block_device_mapping0 = [ - {'deviceName': '/dev/sdb1', - 'ebs': {'status': 'in-use', - 'deleteOnTermination': False, - 'volumeId': 2, - }}, - {'deviceName': '/dev/sdb2', - 'ebs': {'status': 'in-use', - 'deleteOnTermination': False, - 'volumeId': 3, - }}, - {'deviceName': '/dev/sdb3', - 'ebs': {'status': 'in-use', - 'deleteOnTermination': True, - 'volumeId': 5, - }}, - {'deviceName': '/dev/sdb4', - 'ebs': {'status': 'in-use', - 'deleteOnTermination': False, - 'volumeId': 7, - }}, - {'deviceName': '/dev/sdb5', - 'ebs': {'status': 'in-use', - 'deleteOnTermination': False, - 'volumeId': 9, - }}, - {'deviceName': '/dev/sdb6', - 'ebs': {'status': 'in-use', - 'deleteOnTermination': False, - 'volumeId': 11, }}] - # NOTE(yamahata): swap/ephemeral device case isn't supported yet. - - _expected_instance_bdm2 = { - 'instanceId': 'i-00000002', - 'rootDeviceName': '/dev/sdc1', - 'rootDeviceType': 'instance-store'} - - def test_format_instance_bdm(self): - (inst1, inst2, volumes) = self._setUpBlockDeviceMapping() - - result = {} - self.cloud._format_instance_bdm(self.context, inst1['id'], '/dev/sdb1', - result) - self.assertSubDictMatch( - {'rootDeviceType': self._expected_instance_bdm1['rootDeviceType']}, - result) - self._assertEqualBlockDeviceMapping( - self._expected_block_device_mapping0, result['blockDeviceMapping']) - - result = {} - self.cloud._format_instance_bdm(self.context, inst2['id'], '/dev/sdc1', - result) - self.assertSubDictMatch( - {'rootDeviceType': self._expected_instance_bdm2['rootDeviceType']}, - result) - - self._tearDownBlockDeviceMapping(inst1, inst2, volumes) - - def _assertInstance(self, instance_id): - ec2_instance_id = ec2utils.id_to_ec2_id(instance_id) - result = self.cloud.describe_instances(self.context, - instance_id=[ec2_instance_id]) - result = result['reservationSet'][0] - self.assertEqual(len(result['instancesSet']), 1) - result = result['instancesSet'][0] - self.assertEqual(result['instanceId'], ec2_instance_id) - return result - - def _assertEqualBlockDeviceMapping(self, expected, result): - self.assertEqual(len(expected), len(result)) - for x in expected: - found = False - for y in result: - if x['deviceName'] == y['deviceName']: - self.assertSubDictMatch(x, y) - found = True - break - self.assertTrue(found) - - def test_describe_instances_bdm(self): - """Make sure describe_instances works with root_device_name and - block device mappings - """ - (inst1, inst2, volumes) = self._setUpBlockDeviceMapping() - - result = self._assertInstance(inst1['id']) - self.assertSubDictMatch(self._expected_instance_bdm1, result) - self._assertEqualBlockDeviceMapping( - self._expected_block_device_mapping0, result['blockDeviceMapping']) - - result = self._assertInstance(inst2['id']) - self.assertSubDictMatch(self._expected_instance_bdm2, result) - - self._tearDownBlockDeviceMapping(inst1, inst2, volumes) - - def test_describe_images(self): - describe_images = self.cloud.describe_images - - def fake_detail(meh, context): - return [{'id': 1, 'container_format': 'ami', - 'properties': {'kernel_id': 1, 'ramdisk_id': 1, - 'type': 'machine'}}] - - def fake_show_none(meh, context, id): - raise exception.ImageNotFound(image_id='bad_image_id') - - self.stubs.Set(fake._FakeImageService, 'detail', fake_detail) - # list all - result1 = describe_images(self.context) - result1 = result1['imagesSet'][0] - self.assertEqual(result1['imageId'], 'ami-00000001') - # provided a valid image_id - result2 = describe_images(self.context, ['ami-00000001']) - self.assertEqual(1, len(result2['imagesSet'])) - # provide more than 1 valid image_id - result3 = describe_images(self.context, ['ami-00000001', - 'ami-00000002']) - self.assertEqual(2, len(result3['imagesSet'])) - # provide an non-existing image_id - self.stubs.UnsetAll() - self.stubs.Set(fake._FakeImageService, 'show', fake_show_none) - self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show_none) - self.assertRaises(exception.ImageNotFound, describe_images, - self.context, ['ami-fake']) - - def assertDictListUnorderedMatch(self, L1, L2, key): - self.assertEqual(len(L1), len(L2)) - for d1 in L1: - self.assertTrue(key in d1) - for d2 in L2: - self.assertTrue(key in d2) - if d1[key] == d2[key]: - self.assertDictMatch(d1, d2) - - def _setUpImageSet(self, create_volumes_and_snapshots=False): - mappings1 = [ - {'device': '/dev/sda1', 'virtual': 'root'}, - - {'device': 'sdb0', 'virtual': 'ephemeral0'}, - {'device': 'sdb1', 'virtual': 'ephemeral1'}, - {'device': 'sdb2', 'virtual': 'ephemeral2'}, - {'device': 'sdb3', 'virtual': 'ephemeral3'}, - {'device': 'sdb4', 'virtual': 'ephemeral4'}, - - {'device': 'sdc0', 'virtual': 'swap'}, - {'device': 'sdc1', 'virtual': 'swap'}, - {'device': 'sdc2', 'virtual': 'swap'}, - {'device': 'sdc3', 'virtual': 'swap'}, - {'device': 'sdc4', 'virtual': 'swap'}] - block_device_mapping1 = [ - {'device_name': '/dev/sdb1', 'snapshot_id': 01234567}, - {'device_name': '/dev/sdb2', 'volume_id': 01234567}, - {'device_name': '/dev/sdb3', 'virtual_name': 'ephemeral5'}, - {'device_name': '/dev/sdb4', 'no_device': True}, - - {'device_name': '/dev/sdc1', 'snapshot_id': 12345678}, - {'device_name': '/dev/sdc2', 'volume_id': 12345678}, - {'device_name': '/dev/sdc3', 'virtual_name': 'ephemeral6'}, - {'device_name': '/dev/sdc4', 'no_device': True}] - image1 = { - 'id': 1, - 'properties': { - 'kernel_id': 1, - 'type': 'machine', - 'image_state': 'available', - 'mappings': mappings1, - 'block_device_mapping': block_device_mapping1, - } - } - - mappings2 = [{'device': '/dev/sda1', 'virtual': 'root'}] - block_device_mapping2 = [{'device_name': '/dev/sdb1', - 'snapshot_id': 01234567}] - image2 = { - 'id': 2, - 'properties': { - 'kernel_id': 2, - 'type': 'machine', - 'root_device_name': '/dev/sdb1', - 'mappings': mappings2, - 'block_device_mapping': block_device_mapping2}} - - def fake_show(meh, context, image_id): - for i in [image1, image2]: - if i['id'] == image_id: - return i - raise exception.ImageNotFound(image_id=image_id) - - def fake_detail(meh, context): - return [image1, image2] - - self.stubs.Set(fake._FakeImageService, 'show', fake_show) - self.stubs.Set(fake._FakeImageService, 'detail', fake_detail) - - volumes = [] - snapshots = [] - if create_volumes_and_snapshots: - for bdm in block_device_mapping1: - if 'volume_id' in bdm: - vol = self._volume_create(bdm['volume_id']) - volumes.append(vol['id']) - if 'snapshot_id' in bdm: - snap = db.snapshot_create(self.context, - {'id': bdm['snapshot_id'], - 'volume_id': 76543210, - 'status': "available", - 'volume_size': 1}) - snapshots.append(snap['id']) - return (volumes, snapshots) - - def _assertImageSet(self, result, root_device_type, root_device_name): - self.assertEqual(1, len(result['imagesSet'])) - result = result['imagesSet'][0] - self.assertTrue('rootDeviceType' in result) - self.assertEqual(result['rootDeviceType'], root_device_type) - self.assertTrue('rootDeviceName' in result) - self.assertEqual(result['rootDeviceName'], root_device_name) - self.assertTrue('blockDeviceMapping' in result) - - return result - - _expected_root_device_name1 = '/dev/sda1' - # NOTE(yamahata): noDevice doesn't make sense when returning mapping - # It makes sense only when user overriding existing - # mapping. - _expected_bdms1 = [ - {'deviceName': '/dev/sdb0', 'virtualName': 'ephemeral0'}, - {'deviceName': '/dev/sdb1', 'ebs': {'snapshotId': - 'snap-00053977'}}, - {'deviceName': '/dev/sdb2', 'ebs': {'snapshotId': - 'vol-00053977'}}, - {'deviceName': '/dev/sdb3', 'virtualName': 'ephemeral5'}, - # {'deviceName': '/dev/sdb4', 'noDevice': True}, - - {'deviceName': '/dev/sdc0', 'virtualName': 'swap'}, - {'deviceName': '/dev/sdc1', 'ebs': {'snapshotId': - 'snap-00bc614e'}}, - {'deviceName': '/dev/sdc2', 'ebs': {'snapshotId': - 'vol-00bc614e'}}, - {'deviceName': '/dev/sdc3', 'virtualName': 'ephemeral6'}, - # {'deviceName': '/dev/sdc4', 'noDevice': True} - ] - - _expected_root_device_name2 = '/dev/sdb1' - _expected_bdms2 = [{'deviceName': '/dev/sdb1', - 'ebs': {'snapshotId': 'snap-00053977'}}] - - # NOTE(yamahata): - # InstanceBlockDeviceMappingItemType - # rootDeviceType - # rootDeviceName - # blockDeviceMapping - # deviceName - # virtualName - # ebs - # snapshotId - # volumeSize - # deleteOnTermination - # noDevice - def test_describe_image_mapping(self): - """test for rootDeviceName and blockDeiceMapping""" - describe_images = self.cloud.describe_images - self._setUpImageSet() - - result = describe_images(self.context, ['ami-00000001']) - result = self._assertImageSet(result, 'instance-store', - self._expected_root_device_name1) - - self.assertDictListUnorderedMatch(result['blockDeviceMapping'], - self._expected_bdms1, 'deviceName') - - result = describe_images(self.context, ['ami-00000002']) - result = self._assertImageSet(result, 'ebs', - self._expected_root_device_name2) - - self.assertDictListUnorderedMatch(result['blockDeviceMapping'], - self._expected_bdms2, 'deviceName') - - self.stubs.UnsetAll() - - def test_describe_image_attribute(self): - describe_image_attribute = self.cloud.describe_image_attribute - - def fake_show(meh, context, id): - return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, - 'type': 'machine'}, 'container_format': 'ami', - 'is_public': True} - - self.stubs.Set(fake._FakeImageService, 'show', fake_show) - self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show) - result = describe_image_attribute(self.context, 'ami-00000001', - 'launchPermission') - self.assertEqual([{'group': 'all'}], result['launchPermission']) - - def test_describe_image_attribute_root_device_name(self): - describe_image_attribute = self.cloud.describe_image_attribute - self._setUpImageSet() - - result = describe_image_attribute(self.context, 'ami-00000001', - 'rootDeviceName') - self.assertEqual(result['rootDeviceName'], - self._expected_root_device_name1) - result = describe_image_attribute(self.context, 'ami-00000002', - 'rootDeviceName') - self.assertEqual(result['rootDeviceName'], - self._expected_root_device_name2) - - def test_describe_image_attribute_block_device_mapping(self): - describe_image_attribute = self.cloud.describe_image_attribute - self._setUpImageSet() - - result = describe_image_attribute(self.context, 'ami-00000001', - 'blockDeviceMapping') - self.assertDictListUnorderedMatch(result['blockDeviceMapping'], - self._expected_bdms1, 'deviceName') - result = describe_image_attribute(self.context, 'ami-00000002', - 'blockDeviceMapping') - self.assertDictListUnorderedMatch(result['blockDeviceMapping'], - self._expected_bdms2, 'deviceName') - - def test_modify_image_attribute(self): - modify_image_attribute = self.cloud.modify_image_attribute - - fake_metadata = {'id': 1, 'container_format': 'ami', - 'properties': {'kernel_id': 1, 'ramdisk_id': 1, - 'type': 'machine'}, 'is_public': False} - - def fake_show(meh, context, id): - return fake_metadata - - def fake_update(meh, context, image_id, metadata, data=None): - fake_metadata.update(metadata) - return fake_metadata - - self.stubs.Set(fake._FakeImageService, 'show', fake_show) - self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show) - self.stubs.Set(fake._FakeImageService, 'update', fake_update) - result = modify_image_attribute(self.context, 'ami-00000001', - 'launchPermission', 'add', - user_group=['all']) - self.assertEqual(True, result['is_public']) - - def test_deregister_image(self): - deregister_image = self.cloud.deregister_image - - def fake_delete(self, context, id): - return None - - self.stubs.Set(fake._FakeImageService, 'delete', fake_delete) - # valid image - result = deregister_image(self.context, 'ami-00000001') - self.assertEqual(result['imageId'], 'ami-00000001') - # invalid image - self.stubs.UnsetAll() - - def fake_detail_empty(self, context): - return [] - - self.stubs.Set(fake._FakeImageService, 'detail', fake_detail_empty) - self.assertRaises(exception.ImageNotFound, deregister_image, - self.context, 'ami-bad001') - - def test_deregister_image_wrong_container_type(self): - deregister_image = self.cloud.deregister_image - - def fake_delete(self, context, id): - return None - - self.stubs.Set(fake._FakeImageService, 'delete', fake_delete) - self.assertRaises(exception.NotFound, deregister_image, self.context, - 'aki-00000001') - - def _run_instance(self, **kwargs): - rv = self.cloud.run_instances(self.context, **kwargs) - instance_id = rv['instancesSet'][0]['instanceId'] - return instance_id - - def _run_instance_wait(self, **kwargs): - ec2_instance_id = self._run_instance(**kwargs) - self._wait_for_running(ec2_instance_id) - return ec2_instance_id - - def test_console_output(self): - instance_id = self._run_instance( - image_id='ami-1', - instance_type=FLAGS.default_instance_type, - max_count=1) - output = self.cloud.get_console_output(context=self.context, - instance_id=[instance_id]) - self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE?OUTPUT') - # TODO(soren): We need this until we can stop polling in the rpc code - # for unit tests. - rv = self.cloud.terminate_instances(self.context, [instance_id]) - - def test_ajax_console(self): - instance_id = self._run_instance(image_id='ami-1') - output = self.cloud.get_ajax_console(context=self.context, - instance_id=[instance_id]) - self.assertEquals(output['url'], - '%s/?token=FAKETOKEN' % FLAGS.ajax_console_proxy_url) - # TODO(soren): We need this until we can stop polling in the rpc code - # for unit tests. - rv = self.cloud.terminate_instances(self.context, [instance_id]) - - def test_key_generation(self): - result = self._create_key('test') - private_key = result['private_key'] - key = RSA.load_key_string(private_key, callback=lambda: None) - bio = BIO.MemoryBuffer() - public_key = db.key_pair_get(self.context, - self.context.user_id, - 'test')['public_key'] - key.save_pub_key_bio(bio) - converted = crypto.ssl_pub_to_ssh_pub(bio.read()) - # assert key fields are equal - self.assertEqual(public_key.split(" ")[1].strip(), - converted.split(" ")[1].strip()) - - def test_describe_key_pairs(self): - self._create_key('test1') - self._create_key('test2') - result = self.cloud.describe_key_pairs(self.context) - keys = result["keySet"] - self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys)) - self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys)) - - def test_import_public_key(self): - # test when user provides all values - result1 = self.cloud.import_public_key(self.context, - 'testimportkey1', - 'mytestpubkey', - 'mytestfprint') - self.assertTrue(result1) - keydata = db.key_pair_get(self.context, - self.context.user_id, - 'testimportkey1') - self.assertEqual('mytestpubkey', keydata['public_key']) - self.assertEqual('mytestfprint', keydata['fingerprint']) - # test when user omits fingerprint - pubkey_path = os.path.join(os.path.dirname(__file__), 'public_key') - f = open(pubkey_path + '/dummy.pub', 'r') - dummypub = f.readline().rstrip() - f.close - f = open(pubkey_path + '/dummy.fingerprint', 'r') - dummyfprint = f.readline().rstrip() - f.close - result2 = self.cloud.import_public_key(self.context, - 'testimportkey2', - dummypub) - self.assertTrue(result2) - keydata = db.key_pair_get(self.context, - self.context.user_id, - 'testimportkey2') - self.assertEqual(dummypub, keydata['public_key']) - self.assertEqual(dummyfprint, keydata['fingerprint']) - - def test_delete_key_pair(self): - self._create_key('test') - self.cloud.delete_key_pair(self.context, 'test') - - def test_run_instances(self): - kwargs = {'image_id': FLAGS.default_image, - 'instance_type': FLAGS.default_instance_type, - 'max_count': 1} - run_instances = self.cloud.run_instances - result = run_instances(self.context, **kwargs) - instance = result['instancesSet'][0] - self.assertEqual(instance['imageId'], 'ami-00000001') - self.assertEqual(instance['displayName'], 'Server 1') - self.assertEqual(instance['instanceId'], 'i-00000001') - self.assertEqual(instance['instanceState']['name'], 'running') - self.assertEqual(instance['instanceType'], 'm1.small') - - def test_run_instances_image_state_none(self): - kwargs = {'image_id': FLAGS.default_image, - 'instance_type': FLAGS.default_instance_type, - 'max_count': 1} - run_instances = self.cloud.run_instances - - def fake_show_no_state(self, context, id): - return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, - 'type': 'machine'}, 'container_format': 'ami'} - - self.stubs.UnsetAll() - self.stubs.Set(fake._FakeImageService, 'show', fake_show_no_state) - self.assertRaises(exception.ApiError, run_instances, - self.context, **kwargs) - - def test_run_instances_image_state_invalid(self): - kwargs = {'image_id': FLAGS.default_image, - 'instance_type': FLAGS.default_instance_type, - 'max_count': 1} - run_instances = self.cloud.run_instances - - def fake_show_decrypt(self, context, id): - return {'id': 1, 'container_format': 'ami', - 'properties': {'kernel_id': 1, 'ramdisk_id': 1, - 'type': 'machine', 'image_state': 'decrypting'}} - - self.stubs.UnsetAll() - self.stubs.Set(fake._FakeImageService, 'show', fake_show_decrypt) - self.assertRaises(exception.ApiError, run_instances, - self.context, **kwargs) - - def test_run_instances_image_status_active(self): - kwargs = {'image_id': FLAGS.default_image, - 'instance_type': FLAGS.default_instance_type, - 'max_count': 1} - run_instances = self.cloud.run_instances - - def fake_show_stat_active(self, context, id): - return {'id': 1, 'container_format': 'ami', - 'properties': {'kernel_id': 1, 'ramdisk_id': 1, - 'type': 'machine'}, 'status': 'active'} - - self.stubs.Set(fake._FakeImageService, 'show', fake_show_stat_active) - - result = run_instances(self.context, **kwargs) - self.assertEqual(len(result['instancesSet']), 1) - - def test_terminate_instances(self): - inst1 = db.instance_create(self.context, {'reservation_id': 'a', - 'image_ref': 1, - 'host': 'host1'}) - terminate_instances = self.cloud.terminate_instances - # valid instance_id - result = terminate_instances(self.context, ['i-00000001']) - self.assertTrue(result) - # non-existing instance_id - self.assertRaises(exception.InstanceNotFound, terminate_instances, - self.context, ['i-2']) - db.instance_destroy(self.context, inst1['id']) - - def test_update_of_instance_display_fields(self): - inst = db.instance_create(self.context, {}) - ec2_id = ec2utils.id_to_ec2_id(inst['id']) - self.cloud.update_instance(self.context, ec2_id, - display_name='c00l 1m4g3') - inst = db.instance_get(self.context, inst['id']) - self.assertEqual('c00l 1m4g3', inst['display_name']) - db.instance_destroy(self.context, inst['id']) - - def test_update_of_instance_wont_update_private_fields(self): - inst = db.instance_create(self.context, {}) - host = inst['host'] - ec2_id = ec2utils.id_to_ec2_id(inst['id']) - self.cloud.update_instance(self.context, ec2_id, - display_name='c00l 1m4g3', - host='otherhost') - inst = db.instance_get(self.context, inst['id']) - self.assertEqual(host, inst['host']) - db.instance_destroy(self.context, inst['id']) - - def test_update_of_volume_display_fields(self): - vol = db.volume_create(self.context, {}) - self.cloud.update_volume(self.context, - ec2utils.id_to_ec2_vol_id(vol['id']), - display_name='c00l v0lum3') - vol = db.volume_get(self.context, vol['id']) - self.assertEqual('c00l v0lum3', vol['display_name']) - db.volume_destroy(self.context, vol['id']) - - def test_update_of_volume_wont_update_private_fields(self): - vol = db.volume_create(self.context, {}) - self.cloud.update_volume(self.context, - ec2utils.id_to_ec2_vol_id(vol['id']), - mountpoint='/not/here') - vol = db.volume_get(self.context, vol['id']) - self.assertEqual(None, vol['mountpoint']) - db.volume_destroy(self.context, vol['id']) - - def _restart_compute_service(self, periodic_interval=None): - """restart compute service. NOTE: fake driver forgets all instances.""" - self.compute.kill() - if periodic_interval: - self.compute = self.start_service( - 'compute', periodic_interval=periodic_interval) - else: - self.compute = self.start_service('compute') - - def _wait_for_state(self, ctxt, instance_id, predicate): - """Wait for a stopped instance to be a given state""" - id = ec2utils.ec2_id_to_id(instance_id) - while True: - info = self.cloud.compute_api.get(context=ctxt, instance_id=id) - LOG.debug(info) - if predicate(info): - break - greenthread.sleep(1) - - def _wait_for_running(self, instance_id): - def is_running(info): - vm_state = info["vm_state"] - task_state = info["task_state"] - return vm_state == vm_states.ACTIVE and task_state == None - self._wait_for_state(self.context, instance_id, is_running) - - def _wait_for_stopped(self, instance_id): - def is_stopped(info): - vm_state = info["vm_state"] - task_state = info["task_state"] - return vm_state == vm_states.STOPPED and task_state == None - self._wait_for_state(self.context, instance_id, is_stopped) - - def _wait_for_terminate(self, instance_id): - def is_deleted(info): - return info['deleted'] - elevated = self.context.elevated(read_deleted=True) - self._wait_for_state(elevated, instance_id, is_deleted) - - def test_stop_start_instance(self): - """Makes sure stop/start instance works""" - # enforce periodic tasks run in short time to avoid wait for 60s. - self._restart_compute_service(periodic_interval=0.3) - - kwargs = {'image_id': 'ami-1', - 'instance_type': FLAGS.default_instance_type, - 'max_count': 1, } - instance_id = self._run_instance_wait(**kwargs) - - # a running instance can't be started. It is just ignored. - result = self.cloud.start_instances(self.context, [instance_id]) - greenthread.sleep(0.3) - self.assertTrue(result) - - result = self.cloud.stop_instances(self.context, [instance_id]) - greenthread.sleep(0.3) - self.assertTrue(result) - self._wait_for_stopped(instance_id) - - result = self.cloud.start_instances(self.context, [instance_id]) - greenthread.sleep(0.3) - self.assertTrue(result) - self._wait_for_running(instance_id) - - result = self.cloud.stop_instances(self.context, [instance_id]) - greenthread.sleep(0.3) - self.assertTrue(result) - self._wait_for_stopped(instance_id) - - result = self.cloud.terminate_instances(self.context, [instance_id]) - greenthread.sleep(0.3) - self.assertTrue(result) - - self._restart_compute_service() - - def _volume_create(self, volume_id=None): - kwargs = {'status': 'available', - 'host': self.volume.host, - 'size': 1, - 'attach_status': 'detached', } - if volume_id: - kwargs['id'] = volume_id - return db.volume_create(self.context, kwargs) - - def _assert_volume_attached(self, vol, instance_id, mountpoint): - self.assertEqual(vol['instance_id'], instance_id) - self.assertEqual(vol['mountpoint'], mountpoint) - self.assertEqual(vol['status'], "in-use") - self.assertEqual(vol['attach_status'], "attached") - - def _assert_volume_detached(self, vol): - self.assertEqual(vol['instance_id'], None) - self.assertEqual(vol['mountpoint'], None) - self.assertEqual(vol['status'], "available") - self.assertEqual(vol['attach_status'], "detached") - - def test_stop_start_with_volume(self): - """Make sure run instance with block device mapping works""" - - # enforce periodic tasks run in short time to avoid wait for 60s. - self._restart_compute_service(periodic_interval=0.3) - - vol1 = self._volume_create() - vol2 = self._volume_create() - kwargs = {'image_id': 'ami-1', - 'instance_type': FLAGS.default_instance_type, - 'max_count': 1, - 'block_device_mapping': [{'device_name': '/dev/vdb', - 'volume_id': vol1['id'], - 'delete_on_termination': False}, - {'device_name': '/dev/vdc', - 'volume_id': vol2['id'], - 'delete_on_termination': True}, - ]} - ec2_instance_id = self._run_instance_wait(**kwargs) - instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) - - vols = db.volume_get_all_by_instance(self.context, instance_id) - self.assertEqual(len(vols), 2) - for vol in vols: - self.assertTrue(vol['id'] == vol1['id'] or vol['id'] == vol2['id']) - - vol = db.volume_get(self.context, vol1['id']) - self._assert_volume_attached(vol, instance_id, '/dev/vdb') - - vol = db.volume_get(self.context, vol2['id']) - self._assert_volume_attached(vol, instance_id, '/dev/vdc') - - result = self.cloud.stop_instances(self.context, [ec2_instance_id]) - self.assertTrue(result) - self._wait_for_stopped(ec2_instance_id) - - vol = db.volume_get(self.context, vol1['id']) - self._assert_volume_detached(vol) - vol = db.volume_get(self.context, vol2['id']) - self._assert_volume_detached(vol) - - self.cloud.start_instances(self.context, [ec2_instance_id]) - self._wait_for_running(ec2_instance_id) - vols = db.volume_get_all_by_instance(self.context, instance_id) - self.assertEqual(len(vols), 2) - for vol in vols: - self.assertTrue(vol['id'] == vol1['id'] or vol['id'] == vol2['id']) - self.assertTrue(vol['mountpoint'] == '/dev/vdb' or - vol['mountpoint'] == '/dev/vdc') - self.assertEqual(vol['instance_id'], instance_id) - self.assertEqual(vol['status'], "in-use") - self.assertEqual(vol['attach_status'], "attached") - - self.cloud.terminate_instances(self.context, [ec2_instance_id]) - greenthread.sleep(0.3) - - admin_ctxt = context.get_admin_context(read_deleted=False) - vol = db.volume_get(admin_ctxt, vol1['id']) - self.assertFalse(vol['deleted']) - db.volume_destroy(self.context, vol1['id']) - - greenthread.sleep(0.3) - admin_ctxt = context.get_admin_context(read_deleted=True) - vol = db.volume_get(admin_ctxt, vol2['id']) - self.assertTrue(vol['deleted']) - - self._restart_compute_service() - - def test_stop_with_attached_volume(self): - """Make sure attach info is reflected to block device mapping""" - # enforce periodic tasks run in short time to avoid wait for 60s. - self._restart_compute_service(periodic_interval=0.3) - - vol1 = self._volume_create() - vol2 = self._volume_create() - kwargs = {'image_id': 'ami-1', - 'instance_type': FLAGS.default_instance_type, - 'max_count': 1, - 'block_device_mapping': [{'device_name': '/dev/vdb', - 'volume_id': vol1['id'], - 'delete_on_termination': True}]} - ec2_instance_id = self._run_instance_wait(**kwargs) - instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) - - vols = db.volume_get_all_by_instance(self.context, instance_id) - self.assertEqual(len(vols), 1) - for vol in vols: - self.assertEqual(vol['id'], vol1['id']) - self._assert_volume_attached(vol, instance_id, '/dev/vdb') - - vol = db.volume_get(self.context, vol2['id']) - self._assert_volume_detached(vol) - - self.cloud.compute_api.attach_volume(self.context, - instance_id=instance_id, - volume_id=vol2['id'], - device='/dev/vdc') - greenthread.sleep(0.3) - vol = db.volume_get(self.context, vol2['id']) - self._assert_volume_attached(vol, instance_id, '/dev/vdc') - - self.cloud.compute_api.detach_volume(self.context, - volume_id=vol1['id']) - greenthread.sleep(0.3) - vol = db.volume_get(self.context, vol1['id']) - self._assert_volume_detached(vol) - - result = self.cloud.stop_instances(self.context, [ec2_instance_id]) - self.assertTrue(result) - self._wait_for_stopped(ec2_instance_id) - - for vol_id in (vol1['id'], vol2['id']): - vol = db.volume_get(self.context, vol_id) - self._assert_volume_detached(vol) - - self.cloud.start_instances(self.context, [ec2_instance_id]) - self._wait_for_running(ec2_instance_id) - vols = db.volume_get_all_by_instance(self.context, instance_id) - self.assertEqual(len(vols), 1) - for vol in vols: - self.assertEqual(vol['id'], vol2['id']) - self._assert_volume_attached(vol, instance_id, '/dev/vdc') - - vol = db.volume_get(self.context, vol1['id']) - self._assert_volume_detached(vol) - - self.cloud.terminate_instances(self.context, [ec2_instance_id]) - greenthread.sleep(0.3) - - for vol_id in (vol1['id'], vol2['id']): - vol = db.volume_get(self.context, vol_id) - self.assertEqual(vol['id'], vol_id) - self._assert_volume_detached(vol) - db.volume_destroy(self.context, vol_id) - - self._restart_compute_service() - - def _create_snapshot(self, ec2_volume_id): - result = self.cloud.create_snapshot(self.context, - volume_id=ec2_volume_id) - greenthread.sleep(0.3) - return result['snapshotId'] - - def test_run_with_snapshot(self): - """Makes sure run/stop/start instance with snapshot works.""" - vol = self._volume_create() - ec2_volume_id = ec2utils.id_to_ec2_vol_id(vol['id']) - - ec2_snapshot1_id = self._create_snapshot(ec2_volume_id) - snapshot1_id = ec2utils.ec2_id_to_id(ec2_snapshot1_id) - ec2_snapshot2_id = self._create_snapshot(ec2_volume_id) - snapshot2_id = ec2utils.ec2_id_to_id(ec2_snapshot2_id) - - kwargs = {'image_id': 'ami-1', - 'instance_type': FLAGS.default_instance_type, - 'max_count': 1, - 'block_device_mapping': [{'device_name': '/dev/vdb', - 'snapshot_id': snapshot1_id, - 'delete_on_termination': False, }, - {'device_name': '/dev/vdc', - 'snapshot_id': snapshot2_id, - 'delete_on_termination': True}]} - ec2_instance_id = self._run_instance_wait(**kwargs) - instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) - - vols = db.volume_get_all_by_instance(self.context, instance_id) - self.assertEqual(len(vols), 2) - vol1_id = None - vol2_id = None - for vol in vols: - snapshot_id = vol['snapshot_id'] - if snapshot_id == snapshot1_id: - vol1_id = vol['id'] - mountpoint = '/dev/vdb' - elif snapshot_id == snapshot2_id: - vol2_id = vol['id'] - mountpoint = '/dev/vdc' - else: - self.fail() - - self._assert_volume_attached(vol, instance_id, mountpoint) - - self.assertTrue(vol1_id) - self.assertTrue(vol2_id) - - self.cloud.terminate_instances(self.context, [ec2_instance_id]) - greenthread.sleep(0.3) - self._wait_for_terminate(ec2_instance_id) - - greenthread.sleep(0.3) - admin_ctxt = context.get_admin_context(read_deleted=False) - vol = db.volume_get(admin_ctxt, vol1_id) - self._assert_volume_detached(vol) - self.assertFalse(vol['deleted']) - db.volume_destroy(self.context, vol1_id) - - greenthread.sleep(0.3) - admin_ctxt = context.get_admin_context(read_deleted=True) - vol = db.volume_get(admin_ctxt, vol2_id) - self.assertTrue(vol['deleted']) - - for snapshot_id in (ec2_snapshot1_id, ec2_snapshot2_id): - self.cloud.delete_snapshot(self.context, snapshot_id) - greenthread.sleep(0.3) - db.volume_destroy(self.context, vol['id']) - - def test_create_image(self): - """Make sure that CreateImage works""" - # enforce periodic tasks run in short time to avoid wait for 60s. - self._restart_compute_service(periodic_interval=0.3) - - (volumes, snapshots) = self._setUpImageSet( - create_volumes_and_snapshots=True) - - kwargs = {'image_id': 'ami-1', - 'instance_type': FLAGS.default_instance_type, - 'max_count': 1} - ec2_instance_id = self._run_instance_wait(**kwargs) - - # TODO(yamahata): s3._s3_create() can't be tested easily by unit test - # as there is no unit test for s3.create() - ## result = self.cloud.create_image(self.context, ec2_instance_id, - ## no_reboot=True) - ## ec2_image_id = result['imageId'] - ## created_image = self.cloud.describe_images(self.context, - ## [ec2_image_id]) - - self.cloud.terminate_instances(self.context, [ec2_instance_id]) - for vol in volumes: - db.volume_destroy(self.context, vol) - for snap in snapshots: - db.snapshot_destroy(self.context, snap) - # TODO(yamahata): clean up snapshot created by CreateImage. - - self._restart_compute_service() - - @staticmethod - def _fake_bdm_get(ctxt, id): - return [{'volume_id': 87654321, - 'snapshot_id': None, - 'no_device': None, - 'virtual_name': None, - 'delete_on_termination': True, - 'device_name': '/dev/sdh'}, - {'volume_id': None, - 'snapshot_id': 98765432, - 'no_device': None, - 'virtual_name': None, - 'delete_on_termination': True, - 'device_name': '/dev/sdi'}, - {'volume_id': None, - 'snapshot_id': None, - 'no_device': True, - 'virtual_name': None, - 'delete_on_termination': None, - 'device_name': None}, - {'volume_id': None, - 'snapshot_id': None, - 'no_device': None, - 'virtual_name': 'ephemeral0', - 'delete_on_termination': None, - 'device_name': '/dev/sdb'}, - {'volume_id': None, - 'snapshot_id': None, - 'no_device': None, - 'virtual_name': 'swap', - 'delete_on_termination': None, - 'device_name': '/dev/sdc'}, - {'volume_id': None, - 'snapshot_id': None, - 'no_device': None, - 'virtual_name': 'ephemeral1', - 'delete_on_termination': None, - 'device_name': '/dev/sdd'}, - {'volume_id': None, - 'snapshot_id': None, - 'no_device': None, - 'virtual_name': 'ephemeral2', - 'delete_on_termination': None, - 'device_name': '/dev/sd3'}, - ] - - def test_get_instance_mapping(self): - """Make sure that _get_instance_mapping works""" - ctxt = None - instance_ref0 = {'id': 0, - 'root_device_name': None} - instance_ref1 = {'id': 0, - 'root_device_name': '/dev/sda1'} - - self.stubs.Set(db, 'block_device_mapping_get_all_by_instance', - self._fake_bdm_get) - - expected = {'ami': 'sda1', - 'root': '/dev/sda1', - 'ephemeral0': '/dev/sdb', - 'swap': '/dev/sdc', - 'ephemeral1': '/dev/sdd', - 'ephemeral2': '/dev/sd3'} - - self.assertEqual(self.cloud._format_instance_mapping(ctxt, - instance_ref0), - cloud._DEFAULT_MAPPINGS) - self.assertEqual(self.cloud._format_instance_mapping(ctxt, - instance_ref1), - expected) - - def test_describe_instance_attribute(self): - """Make sure that describe_instance_attribute works""" - self.stubs.Set(db, 'block_device_mapping_get_all_by_instance', - self._fake_bdm_get) - - def fake_get(ctxt, instance_id): - return { - 'id': 0, - 'root_device_name': '/dev/sdh', - 'security_groups': [{'name': 'fake0'}, {'name': 'fake1'}], - 'vm_state': vm_states.STOPPED, - 'instance_type': {'name': 'fake_type'}, - 'kernel_id': 1, - 'ramdisk_id': 2, - 'user_data': 'fake-user data', - } - self.stubs.Set(self.cloud.compute_api, 'get', fake_get) - - def fake_volume_get(ctxt, volume_id, session=None): - if volume_id == 87654321: - return {'id': volume_id, - 'attach_time': '13:56:24', - 'status': 'in-use'} - raise exception.VolumeNotFound(volume_id=volume_id) - self.stubs.Set(db.api, 'volume_get', fake_volume_get) - - get_attribute = functools.partial( - self.cloud.describe_instance_attribute, - self.context, 'i-12345678') - - bdm = get_attribute('blockDeviceMapping') - bdm['blockDeviceMapping'].sort() - - expected_bdm = {'instance_id': 'i-12345678', - 'rootDeviceType': 'ebs', - 'blockDeviceMapping': [ - {'deviceName': '/dev/sdh', - 'ebs': {'status': 'in-use', - 'deleteOnTermination': True, - 'volumeId': 87654321, - 'attachTime': '13:56:24'}}]} - expected_bdm['blockDeviceMapping'].sort() - self.assertEqual(bdm, expected_bdm) - # NOTE(yamahata): this isn't supported - # get_attribute('disableApiTermination') - groupSet = get_attribute('groupSet') - groupSet['groupSet'].sort() - expected_groupSet = {'instance_id': 'i-12345678', - 'groupSet': [{'groupId': 'fake0'}, - {'groupId': 'fake1'}]} - expected_groupSet['groupSet'].sort() - self.assertEqual(groupSet, expected_groupSet) - self.assertEqual(get_attribute('instanceInitiatedShutdownBehavior'), - {'instance_id': 'i-12345678', - 'instanceInitiatedShutdownBehavior': 'stopped'}) - self.assertEqual(get_attribute('instanceType'), - {'instance_id': 'i-12345678', - 'instanceType': 'fake_type'}) - self.assertEqual(get_attribute('kernel'), - {'instance_id': 'i-12345678', - 'kernel': 'aki-00000001'}) - self.assertEqual(get_attribute('ramdisk'), - {'instance_id': 'i-12345678', - 'ramdisk': 'ari-00000002'}) - self.assertEqual(get_attribute('rootDeviceName'), - {'instance_id': 'i-12345678', - 'rootDeviceName': '/dev/sdh'}) - # NOTE(yamahata): this isn't supported - # get_attribute('sourceDestCheck') - self.assertEqual(get_attribute('userData'), - {'instance_id': 'i-12345678', - 'userData': '}\xa9\x1e\xba\xc7\xabu\xabZ'}) -- cgit From a5c654884abee43760f4b43368f352272ae090dd Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Mon, 12 Sep 2011 18:23:46 -0400 Subject: fixing import --- nova/tests/test_direct.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/test_direct.py b/nova/tests/test_direct.py index 4ed0c2aa5..8d856dc4b 100644 --- a/nova/tests/test_direct.py +++ b/nova/tests/test_direct.py @@ -30,7 +30,7 @@ from nova import test from nova import volume from nova import utils from nova.api import direct -from nova.tests import test_cloud +from nova.tests.api.ec2 import test_cloud class ArbitraryObject(object): -- cgit From 837f611af4467c716a0585bb8f38345ceef32921 Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Mon, 12 Sep 2011 19:13:18 -0400 Subject: Made tests version version links more robust --- nova/tests/api/openstack/test_versions.py | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/nova/tests/api/openstack/test_versions.py b/nova/tests/api/openstack/test_versions.py index 686752509..0ad26135e 100644 --- a/nova/tests/api/openstack/test_versions.py +++ b/nova/tests/api/openstack/test_versions.py @@ -309,9 +309,8 @@ class VersionsTest(test.TestCase): self.assertEqual(f.feed.author, 'Rackspace') self.assertEqual(f.feed.author_detail.href, 'http://www.rackspace.com/') - self.assertEqual(f.feed.links, [{'href': 'http://localhost/v1.0/', - 'type': 'application/atom+xml', - 'rel': 'self'}]) + self.assertEqual(f.feed.links[0]['href'], 'http://localhost/v1.0/') + self.assertEqual(f.feed.links[0]['rel'], 'self') self.assertEqual(len(f.entries), 1) entry = f.entries[0] @@ -354,9 +353,8 @@ class VersionsTest(test.TestCase): self.assertEqual(f.feed.author, 'Rackspace') self.assertEqual(f.feed.author_detail.href, 'http://www.rackspace.com/') - self.assertEqual(f.feed.links, [{'href': 'http://localhost/v1.1/', - 'type': 'application/atom+xml', - 'rel': 'self'}]) + self.assertEqual(f.feed.links[0]['href'], 'http://localhost/v1.1/') + self.assertEqual(f.feed.links[0]['rel'], 'self') self.assertEqual(len(f.entries), 1) entry = f.entries[0] @@ -399,9 +397,8 @@ class VersionsTest(test.TestCase): self.assertEqual(f.feed.author, 'Rackspace') self.assertEqual(f.feed.author_detail.href, 'http://www.rackspace.com/') - self.assertEqual(f.feed.links, [{'href': 'http://localhost/', - 'type': 'application/atom+xml', - 'rel': 'self'}]) + self.assertEqual(f.feed.links[0]['href'], 'http://localhost/') + self.assertEqual(f.feed.links[0]['rel'], 'self') self.assertEqual(len(f.entries), 2) entry = f.entries[0] @@ -790,9 +787,9 @@ class VersionsSerializerTests(test.TestCase): self.assertEqual(f.feed.author, 'Rackspace') self.assertEqual(f.feed.author_detail.href, 'http://www.rackspace.com/') - self.assertEqual(f.feed.links, [{'href': 'http://test/', - 'type': 'application/atom+xml', - 'rel': 'self'}]) + self.assertEqual(f.feed.links[0]['href'], 'http://test/') + self.assertEqual(f.feed.links[0]['rel'], 'self') + self.assertEqual(len(f.entries), 1) entry = f.entries[0] self.assertEqual(entry.id, 'http://test/2.9.8') @@ -853,9 +850,9 @@ class VersionsSerializerTests(test.TestCase): self.assertEqual(f.feed.author, 'Rackspace') self.assertEqual(f.feed.author_detail.href, 'http://www.rackspace.com/') - self.assertEqual(f.feed.links, [{'href': 'http://localhost/v1.1/', - 'type': 'application/atom+xml', - 'rel': 'self'}]) + self.assertEqual(f.feed.links[0]['href'], 'http://localhost/v1.1/') + self.assertEqual(f.feed.links[0]['rel'], 'self') + self.assertEqual(len(f.entries), 1) entry = f.entries[0] self.assertEqual(entry.id, 'http://localhost/v1.1/') -- cgit From 4c2f8b561390358cbd1c8dbfbc7e4d8370b2b84e Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Tue, 13 Sep 2011 02:45:11 -0400 Subject: Same as last time. --- nova/tests/api/openstack/test_versions.py | 109 +++++++++++++----------------- 1 file changed, 46 insertions(+), 63 deletions(-) diff --git a/nova/tests/api/openstack/test_versions.py b/nova/tests/api/openstack/test_versions.py index 0ad26135e..0077cbe79 100644 --- a/nova/tests/api/openstack/test_versions.py +++ b/nova/tests/api/openstack/test_versions.py @@ -320,24 +320,19 @@ class VersionsTest(test.TestCase): self.assertEqual(len(entry.content), 1) self.assertEqual(entry.content[0].value, 'Version v1.0 DEPRECATED (2011-01-21T11:33:21Z)') - self.assertEqual(entry.links, [ - { - 'href': 'http://localhost/v1.0/', - 'type': 'application/atom+xml', - 'rel': 'self' - }, - { - 'href': 'http://docs.rackspacecloud.com/servers/api/v1.0/'\ - 'cs-devguide-20110125.pdf', - 'type': 'application/pdf', - 'rel': 'describedby' - }, - { - 'href': 'http://docs.rackspacecloud.com/servers/api/v1.0/'\ - 'application.wadl', - 'type': 'application/vnd.sun.wadl+xml', - 'rel': 'describedby' - }]) + self.assertEqual(len(entry.links), 3) + self.assertEqual(entry.links[0]['href'], 'http://localhost/v1.0/') + self.assertEqual(entry.links[0]['rel'], 'self') + self.assertEqual(entry.links[1], { + 'href': 'http://docs.rackspacecloud.com/servers/api/v1.0/'\ + 'cs-devguide-20110125.pdf', + 'type': 'application/pdf', + 'rel': 'describedby'}) + self.assertEqual(entry.links[2], { + 'href': 'http://docs.rackspacecloud.com/servers/api/v1.0/'\ + 'application.wadl', + 'type': 'application/vnd.sun.wadl+xml', + 'rel': 'describedby'}) def test_get_version_1_1_detail_atom(self): req = webob.Request.blank('/v1.1/') @@ -364,24 +359,19 @@ class VersionsTest(test.TestCase): self.assertEqual(len(entry.content), 1) self.assertEqual(entry.content[0].value, 'Version v1.1 CURRENT (2011-01-21T11:33:21Z)') - self.assertEqual(entry.links, [ - { - 'href': 'http://localhost/v1.1/', - 'type': 'application/atom+xml', - 'rel': 'self' - }, - { - 'href': 'http://docs.rackspacecloud.com/servers/api/v1.1/'\ - 'cs-devguide-20110125.pdf', - 'type': 'application/pdf', - 'rel': 'describedby' - }, - { - 'href': 'http://docs.rackspacecloud.com/servers/api/v1.1/'\ - 'application.wadl', - 'type': 'application/vnd.sun.wadl+xml', - 'rel': 'describedby' - }]) + self.assertEqual(len(entry.links), 3) + self.assertEqual(entry.links[0]['href'], 'http://localhost/v1.1/') + self.assertEqual(entry.links[0]['rel'], 'self') + self.assertEqual(entry.links[1], { + 'href': 'http://docs.rackspacecloud.com/servers/api/v1.1/'\ + 'cs-devguide-20110125.pdf', + 'type': 'application/pdf', + 'rel': 'describedby'}) + self.assertEqual(entry.links[2], { + 'href': 'http://docs.rackspacecloud.com/servers/api/v1.1/'\ + 'application.wadl', + 'type': 'application/vnd.sun.wadl+xml', + 'rel': 'describedby'}) def test_get_version_list_atom(self): req = webob.Request.blank('/') @@ -408,10 +398,9 @@ class VersionsTest(test.TestCase): self.assertEqual(len(entry.content), 1) self.assertEqual(entry.content[0].value, 'Version v1.0 DEPRECATED (2011-01-21T11:33:21Z)') - self.assertEqual(entry.links, [{ - 'href': 'http://localhost/v1.0/', - 'type': 'application/atom+xml', - 'rel': 'self'}]) + self.assertEqual(len(entry.links), 1) + self.assertEqual(entry.links[0]['href'], 'http://localhost/v1.0/') + self.assertEqual(entry.links[0]['rel'], 'self') entry = f.entries[1] self.assertEqual(entry.id, 'http://localhost/v1.1/') self.assertEqual(entry.title, 'Version v1.1') @@ -798,10 +787,9 @@ class VersionsSerializerTests(test.TestCase): self.assertEqual(len(entry.content), 1) self.assertEqual(entry.content[0].value, 'Version 2.9.8 CURRENT (2011-07-20T11:40:00Z)') - self.assertEqual(entry.links, [{ - 'href': 'http://test/2.9.8', - 'type': 'application/atom+xml', - 'rel': 'self'}]) + self.assertEqual(len(entry.links), 1) + self.assertEqual(entry.links[0]['href'], 'http://test/2.9.8') + self.assertEqual(entry.links[0]['rel'], 'self') def test_version_detail_atom_serializer(self): versions_data = { @@ -861,22 +849,17 @@ class VersionsSerializerTests(test.TestCase): self.assertEqual(len(entry.content), 1) self.assertEqual(entry.content[0].value, 'Version v1.1 CURRENT (2011-01-21T11:33:21Z)') - self.assertEqual(entry.links, [ - { - 'rel': 'self', - 'type': 'application/atom+xml', - 'href': 'http://localhost/v1.1/', - }, - { - 'rel': 'describedby', - 'type': 'application/pdf', - 'href': 'http://docs.rackspacecloud.com/' - 'servers/api/v1.1/cs-devguide-20110125.pdf', - }, - { - 'rel': 'describedby', - 'type': 'application/vnd.sun.wadl+xml', - 'href': 'http://docs.rackspacecloud.com/' - 'servers/api/v1.1/application.wadl', - }, - ]) + self.assertEqual(len(entry.links), 3) + self.assertEqual(entry.links[0]['href'], 'http://localhost/v1.1/') + self.assertEqual(entry.links[0]['rel'], 'self') + self.assertEqual(entry.links[1], { + 'rel': 'describedby', + 'type': 'application/pdf', + 'href': 'http://docs.rackspacecloud.com/' + 'servers/api/v1.1/cs-devguide-20110125.pdf'}) + self.assertEqual(entry.links[2], { + 'rel': 'describedby', + 'type': 'application/vnd.sun.wadl+xml', + 'href': 'http://docs.rackspacecloud.com/' + 'servers/api/v1.1/application.wadl', + }) -- cgit From d379cdcacc2191f557fc378ec6c40e2642261467 Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Tue, 13 Sep 2011 15:09:10 +0200 Subject: Update MANIFEST.in to match directory moves from rev1559 --- MANIFEST.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MANIFEST.in b/MANIFEST.in index 883aba8a1..5451ace4b 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -37,7 +37,7 @@ include nova/tests/bundle/1mb.manifest.xml include nova/tests/bundle/1mb.no_kernel_or_ramdisk.manifest.xml include nova/tests/bundle/1mb.part.0 include nova/tests/bundle/1mb.part.1 -include nova/tests/public_key/* +include nova/tests/api/ec2/public_key/* include nova/tests/db/nova.austin.sqlite include plugins/xenapi/README include plugins/xenapi/etc/xapi.d/plugins/objectstore -- cgit From 613bcfc45865c9d7c7577e124b187920970d031e Mon Sep 17 00:00:00 2001 From: Naveed Massjouni Date: Tue, 13 Sep 2011 11:22:45 -0400 Subject: And again. --- nova/tests/api/openstack/test_versions.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/nova/tests/api/openstack/test_versions.py b/nova/tests/api/openstack/test_versions.py index 0077cbe79..f69dbd316 100644 --- a/nova/tests/api/openstack/test_versions.py +++ b/nova/tests/api/openstack/test_versions.py @@ -408,10 +408,9 @@ class VersionsTest(test.TestCase): self.assertEqual(len(entry.content), 1) self.assertEqual(entry.content[0].value, 'Version v1.1 CURRENT (2011-01-21T11:33:21Z)') - self.assertEqual(entry.links, [{ - 'href': 'http://localhost/v1.1/', - 'type': 'application/atom+xml', - 'rel': 'self'}]) + self.assertEqual(len(entry.links), 1) + self.assertEqual(entry.links[0]['href'], 'http://localhost/v1.1/') + self.assertEqual(entry.links[0]['rel'], 'self') def test_multi_choice_image(self): req = webob.Request.blank('/images/1') -- cgit From 3f92fc99c23b0cc17f31ff9d988733abac98028a Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Tue, 13 Sep 2011 12:33:09 -0500 Subject: Inject hostname to xenstore upon creation. --- nova/virt/xenapi/vmops.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index fb9c602d9..15b942a82 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -253,6 +253,8 @@ class VMOps(object): self.create_vifs(vm_ref, instance, network_info) self.inject_network_info(instance, network_info, vm_ref) + self.inject_hostname(vm_ref, instance['hostname']) + return vm_ref def _attach_disks(self, instance, disk_image_type, vm_ref, first_vdi_ref, @@ -1158,6 +1160,12 @@ class VMOps(object): resp = self._make_plugin_call('agent', 'resetnetwork', instance, '', args, vm_ref) + def inject_hostname(self, vm_ref, hostname): + """Inject the hostname of the instance into the xenstore.""" + logging.debug(_("injecting hostname to xs for vm: |%s|"), vm_ref) + self._session.call_xenapi_request("VM.add_to_xenstore_data", + (vm_ref, "vm-data/hostname", hostname)) + def list_from_xenstore(self, vm, path): """ Runs the xenstore-ls command to get a listing of all records -- cgit From e411fcd647e3cdcf415465288e527aecfd026fc5 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 13 Sep 2011 14:15:29 -0400 Subject: add additional data to flavor's ViewBuilder This adds missing fields that were trying to be used by python-novaclient. Previously, 'nova flavor-list' would have empty columns for fields other than 'disk' and 'ram'. Now all columns are filled in appropriately. --- nova/api/openstack/views/flavors.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/nova/api/openstack/views/flavors.py b/nova/api/openstack/views/flavors.py index aea34b424..023acce0d 100644 --- a/nova/api/openstack/views/flavors.py +++ b/nova/api/openstack/views/flavors.py @@ -48,6 +48,10 @@ class ViewBuilder(object): detail = { "ram": flavor_obj["memory_mb"], "disk": flavor_obj["local_gb"], + "swap": flavor_obj["swap"], + "rxtx_quota": flavor_obj["rxtx_quota"], + "rxtx_cap": flavor_obj["rxtx_cap"], + "vcpus": flavor_obj["vcpus"], } detail.update(simple) -- cgit From 7d76c99526271077420e3f6a03e14bf1e037a3eb Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Tue, 13 Sep 2011 18:32:42 +0000 Subject: Test new content-types --- nova/tests/api/openstack/test_wsgi.py | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/nova/tests/api/openstack/test_wsgi.py b/nova/tests/api/openstack/test_wsgi.py index 6dea78d17..46e890cee 100644 --- a/nova/tests/api/openstack/test_wsgi.py +++ b/nova/tests/api/openstack/test_wsgi.py @@ -28,15 +28,14 @@ class RequestTest(test.TestCase): self.assertEqual(result, "application/json") def test_content_type_from_accept_xml(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Accept"] = "application/xml" - result = request.best_match_content_type() - self.assertEqual(result, "application/xml") - - request = wsgi.Request.blank('/tests/123') - request.headers["Accept"] = "application/json" - result = request.best_match_content_type() - self.assertEqual(result, "application/json") + for content_type in ('application/xml', + 'application/vnd.openstack.compute+xml', + 'application/json', + 'application/vnd.openstack.compute+json'): + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = content_type + result = request.best_match_content_type() + self.assertEqual(result, content_type) request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = "application/xml, application/json" @@ -231,7 +230,9 @@ class ResponseSerializerTest(test.TestCase): self.body_serializers = { 'application/json': JSONSerializer(), - 'application/XML': XMLSerializer(), + 'application/vnd.openstack.compute+json': JSONSerializer(), + 'application/xml': XMLSerializer(), + 'application/vnd.openstack.compute+xml': XMLSerializer(), } self.serializer = wsgi.ResponseSerializer(self.body_serializers, @@ -281,7 +282,9 @@ class RequestDeserializerTest(test.TestCase): self.body_deserializers = { 'application/json': JSONDeserializer(), - 'application/XML': XMLDeserializer(), + 'application/vnd.openstack.compute+json': JSONDeserializer(), + 'application/xml': XMLDeserializer(), + 'application/vnd.openstack.compute+xml': XMLDeserializer(), } self.deserializer = wsgi.RequestDeserializer(self.body_deserializers) -- cgit From 2b8463b664d450af045c1bd01accce4126933755 Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Tue, 13 Sep 2011 18:33:09 +0000 Subject: Actually test expected matches received --- nova/tests/api/openstack/test_versions.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nova/tests/api/openstack/test_versions.py b/nova/tests/api/openstack/test_versions.py index 1269f13c9..b3412f97c 100644 --- a/nova/tests/api/openstack/test_versions.py +++ b/nova/tests/api/openstack/test_versions.py @@ -534,6 +534,8 @@ class VersionsTest(test.TestCase): """.replace(" ", "").replace("\n", "") % (wsgi.XMLNS_V11, wsgi.XMLNS_ATOM) + self.assertEqual(expected, res.body) + def test_multi_choice_server_atom(self): """ Make sure multi choice responses do not have content-type -- cgit From b05121e1e1b2a8276d1dd21583c379dc4755b3dd Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Tue, 13 Sep 2011 15:48:10 -0400 Subject: Update test_libvirt so that flags and fakes are used instead of mocks for utils.import_class and utils.import_object. Fixes #lp849329. --- nova/tests/fake_network.py | 30 ++++++++++++++++ nova/tests/test_libvirt.py | 87 +++++++++++++++++----------------------------- 2 files changed, 62 insertions(+), 55 deletions(-) diff --git a/nova/tests/fake_network.py b/nova/tests/fake_network.py index 1ecb99b31..142206755 100644 --- a/nova/tests/fake_network.py +++ b/nova/tests/fake_network.py @@ -25,6 +25,36 @@ HOST = "testhost" FLAGS = flags.FLAGS +class FakeIptablesFirewallDriver(object): + def __init__(self, **kwargs): + pass + + def setattr(self, key, val): + self.__setattr__(key, val) + + def apply_instance_filter(self, instance, network_info): + pass + + +class FakeVIFDriver(object): + + def __init__(self, **kwargs): + pass + + def setattr(self, key, val): + self.__setattr__(key, val) + + def plug(self, instance, network, mapping): + return { + 'id': 'fake', + 'bridge_name': 'fake', + 'mac_address': 'fake', + 'ip_address': 'fake', + 'dhcp_server': 'fake', + 'extra_params': 'fake', + } + + class FakeModel(dict): """Represent a model from the db""" def __init__(self, *args, **kwargs): diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 233ee14de..8193d6ec2 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -51,6 +51,32 @@ def _concurrency(wait, done, target): done.send() +class FakeVirtDomain(object): + + def __init__(self, fake_xml=None): + if fake_xml: + self._fake_dom_xml = fake_xml + else: + self._fake_dom_xml=""" + + + + + + + + """ + + def snapshotCreateXML(self, *args): + return None + + def createWithFlags(self, launch_flags): + pass + + def XMLDesc(self, *args): + return self._fake_dom_xml + + class CacheConcurrencyTestCase(test.TestCase): def setUp(self): super(CacheConcurrencyTestCase, self).setUp() @@ -152,70 +178,23 @@ class LibvirtConnTestCase(test.TestCase): # A fake libvirt.virConnect class FakeLibvirtConnection(object): - pass - - # A fake connection.IptablesFirewallDriver - class FakeIptablesFirewallDriver(object): - - def __init__(self, **kwargs): - pass - - def setattr(self, key, val): - self.__setattr__(key, val) - - # A fake VIF driver - class FakeVIFDriver(object): - - def __init__(self, **kwargs): - pass - - def setattr(self, key, val): - self.__setattr__(key, val) - - def plug(self, instance, network, mapping): - return { - 'id': 'fake', - 'bridge_name': 'fake', - 'mac_address': 'fake', - 'ip_address': 'fake', - 'dhcp_server': 'fake', - 'extra_params': 'fake', - } + def defineXML(self, xml): + return FakeVirtDomain() # Creating mocks fake = FakeLibvirtConnection() - fakeip = FakeIptablesFirewallDriver - fakevif = FakeVIFDriver() # Customizing above fake if necessary for key, val in kwargs.items(): fake.__setattr__(key, val) - # Inevitable mocks for connection.LibvirtConnection - self.mox.StubOutWithMock(connection.utils, 'import_class') - connection.utils.import_class(mox.IgnoreArg()).AndReturn(fakeip) - self.mox.StubOutWithMock(connection.utils, 'import_object') - connection.utils.import_object(mox.IgnoreArg()).AndReturn(fakevif) + self.flags(image_service='nova.image.fake.FakeImageService') + self.flags(firewall_driver="nova.tests.fake_network.FakeIptablesFirewallDriver") + self.flags(libvirt_vif_driver="nova.tests.fake_network.FakeVIFDriver") + self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn') connection.LibvirtConnection._conn = fake def fake_lookup(self, instance_name): - - class FakeVirtDomain(object): - - def snapshotCreateXML(self, *args): - return None - - def XMLDesc(self, *args): - return """ - - - - - - - - """ - return FakeVirtDomain() def fake_execute(self, *args): @@ -797,8 +776,6 @@ class LibvirtConnTestCase(test.TestCase): shutil.rmtree(os.path.join(FLAGS.instances_path, instance.name)) shutil.rmtree(os.path.join(FLAGS.instances_path, '_base')) - self.assertTrue(count) - def test_get_host_ip_addr(self): conn = connection.LibvirtConnection(False) ip = conn.get_host_ip_addr() -- cgit From b815c3c6cadd72c496e087080a4a6652e0a5be72 Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Tue, 13 Sep 2011 20:06:18 +0000 Subject: Remove debugging print --- nova/tests/api/openstack/test_wsgi.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/tests/api/openstack/test_wsgi.py b/nova/tests/api/openstack/test_wsgi.py index 46e890cee..b06098da5 100644 --- a/nova/tests/api/openstack/test_wsgi.py +++ b/nova/tests/api/openstack/test_wsgi.py @@ -259,7 +259,6 @@ class ResponseSerializerTest(test.TestCase): def test_serialize_response_None(self): response = self.serializer.serialize(None, 'application/json') - print response self.assertEqual(response.headers['Content-Type'], 'application/json') self.assertEqual(response.body, '') self.assertEqual(response.status_int, 404) -- cgit From 9ced215b37dca65b3bf9cfe2d41518c2a563ff2d Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Tue, 13 Sep 2011 20:15:51 +0000 Subject: Split accept tests to better match the name of the test --- nova/tests/api/openstack/test_wsgi.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/nova/tests/api/openstack/test_wsgi.py b/nova/tests/api/openstack/test_wsgi.py index b06098da5..b73027643 100644 --- a/nova/tests/api/openstack/test_wsgi.py +++ b/nova/tests/api/openstack/test_wsgi.py @@ -27,7 +27,7 @@ class RequestTest(test.TestCase): result = request.get_content_type() self.assertEqual(result, "application/json") - def test_content_type_from_accept_xml(self): + def test_content_type_from_accept(self): for content_type in ('application/xml', 'application/vnd.openstack.compute+xml', 'application/json', @@ -37,6 +37,7 @@ class RequestTest(test.TestCase): result = request.best_match_content_type() self.assertEqual(result, content_type) + def test_content_type_from_accept_best(self): request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = "application/xml, application/json" result = request.best_match_content_type() @@ -230,9 +231,7 @@ class ResponseSerializerTest(test.TestCase): self.body_serializers = { 'application/json': JSONSerializer(), - 'application/vnd.openstack.compute+json': JSONSerializer(), 'application/xml': XMLSerializer(), - 'application/vnd.openstack.compute+xml': XMLSerializer(), } self.serializer = wsgi.ResponseSerializer(self.body_serializers, @@ -281,9 +280,7 @@ class RequestDeserializerTest(test.TestCase): self.body_deserializers = { 'application/json': JSONDeserializer(), - 'application/vnd.openstack.compute+json': JSONDeserializer(), 'application/xml': XMLDeserializer(), - 'application/vnd.openstack.compute+xml': XMLDeserializer(), } self.deserializer = wsgi.RequestDeserializer(self.body_deserializers) @@ -292,8 +289,9 @@ class RequestDeserializerTest(test.TestCase): pass def test_get_deserializer(self): - expected = self.deserializer.get_body_deserializer('application/json') - self.assertEqual(expected, self.body_deserializers['application/json']) + ctype = 'application/json' + expected = self.deserializer.get_body_deserializer(ctype) + self.assertEqual(expected, self.body_deserializers[ctype]) def test_get_deserializer_unknown_content_type(self): self.assertRaises(exception.InvalidContentType, @@ -301,10 +299,11 @@ class RequestDeserializerTest(test.TestCase): 'application/unknown') def test_get_expected_content_type(self): + ctype = 'application/json' request = wsgi.Request.blank('/') - request.headers['Accept'] = 'application/json' + request.headers['Accept'] = ctype self.assertEqual(self.deserializer.get_expected_content_type(request), - 'application/json') + ctype) def test_get_action_args(self): env = { -- cgit From 98e2fd764b33fa5a3af6ca982a171717a12ee206 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Tue, 13 Sep 2011 15:33:34 -0500 Subject: Only allow up to 15 chars for a Windows hostname. --- nova/tests/test_xenapi.py | 3 ++- nova/virt/xenapi/vmops.py | 8 ++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 4a83d139e..47c6a3c95 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -364,7 +364,7 @@ class XenAPIVMTestCase(test.TestCase): def _test_spawn(self, image_ref, kernel_id, ramdisk_id, instance_type_id="3", os_type="linux", - architecture="x86-64", instance_id=1, + hostname="test", architecture="x86-64", instance_id=1, check_injection=False, create_record=True, empty_dns=False): stubs.stubout_loopingcall_start(self.stubs) @@ -377,6 +377,7 @@ class XenAPIVMTestCase(test.TestCase): 'ramdisk_id': ramdisk_id, 'instance_type_id': instance_type_id, 'os_type': os_type, + 'hostname': hostname, 'architecture': architecture} instance = db.instance_create(self.context, values) else: diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 15b942a82..6b56d668e 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -253,7 +253,7 @@ class VMOps(object): self.create_vifs(vm_ref, instance, network_info) self.inject_network_info(instance, network_info, vm_ref) - self.inject_hostname(vm_ref, instance['hostname']) + self.inject_hostname(instance, vm_ref, instance['hostname']) return vm_ref @@ -1160,8 +1160,12 @@ class VMOps(object): resp = self._make_plugin_call('agent', 'resetnetwork', instance, '', args, vm_ref) - def inject_hostname(self, vm_ref, hostname): + def inject_hostname(self, instance, vm_ref, hostname): """Inject the hostname of the instance into the xenstore.""" + if instance.os_type == "windows": + # NOTE(jk0): Windows hostnames can only be <= 15 chars. + hostname = hostname[:15] + logging.debug(_("injecting hostname to xs for vm: |%s|"), vm_ref) self._session.call_xenapi_request("VM.add_to_xenstore_data", (vm_ref, "vm-data/hostname", hostname)) -- cgit From 660706ec03a55e5875b1af875f1a5f157d4a04f1 Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Tue, 13 Sep 2011 20:36:54 +0000 Subject: Test new vendor content types as well --- nova/tests/api/openstack/test_api.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/nova/tests/api/openstack/test_api.py b/nova/tests/api/openstack/test_api.py index 7321c329f..b7a0b01ef 100644 --- a/nova/tests/api/openstack/test_api.py +++ b/nova/tests/api/openstack/test_api.py @@ -20,6 +20,7 @@ import json import webob.exc import webob.dec +from lxml import etree from webob import Request from nova import test @@ -52,6 +53,30 @@ class APITest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 400) + def test_vendor_content_type_json(self): + ctype = 'application/vnd.openstack.compute+json' + + req = webob.Request.blank('/') + req.headers['Accept'] = ctype + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + self.assertEqual(res.content_type, ctype) + + body = json.loads(res.body) + + def test_vendor_content_type_xml(self): + ctype = 'application/vnd.openstack.compute+xml' + + req = webob.Request.blank('/') + req.headers['Accept'] = ctype + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + self.assertEqual(res.content_type, ctype) + + body = etree.XML(res.body) + def test_exceptions_are_converted_to_faults(self): @webob.dec.wsgify -- cgit From 877d92845a5d2002c4adc0c8398469e66fd0907e Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 13 Sep 2011 17:06:15 -0400 Subject: make tests pass --- nova/api/openstack/views/flavors.py | 7 +++---- nova/tests/api/openstack/test_flavors.py | 24 ++++++++++++++++++++++++ 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/nova/api/openstack/views/flavors.py b/nova/api/openstack/views/flavors.py index 023acce0d..4c5e2c1e6 100644 --- a/nova/api/openstack/views/flavors.py +++ b/nova/api/openstack/views/flavors.py @@ -48,12 +48,11 @@ class ViewBuilder(object): detail = { "ram": flavor_obj["memory_mb"], "disk": flavor_obj["local_gb"], - "swap": flavor_obj["swap"], - "rxtx_quota": flavor_obj["rxtx_quota"], - "rxtx_cap": flavor_obj["rxtx_cap"], - "vcpus": flavor_obj["vcpus"], } + for key in ( "vcpus", "swap", "rxtx_quota", "rxtx_cap"): + detail[key] = flavor_obj.get(key,"") + detail.update(simple) return detail diff --git a/nova/tests/api/openstack/test_flavors.py b/nova/tests/api/openstack/test_flavors.py index 812bece42..b7a5d0e09 100644 --- a/nova/tests/api/openstack/test_flavors.py +++ b/nova/tests/api/openstack/test_flavors.py @@ -107,12 +107,20 @@ class FlavorsTest(test.TestCase): "name": "flavor 1", "ram": "256", "disk": "10", + "rxtx_cap": "", + "rxtx_quota": "", + "swap": "", + "vcpus": "", }, { "id": "2", "name": "flavor 2", "ram": "256", "disk": "10", + "rxtx_cap": "", + "rxtx_quota": "", + "swap": "", + "vcpus": "", }, ] self.assertEqual(flavors, expected) @@ -127,6 +135,10 @@ class FlavorsTest(test.TestCase): "name": "flavor 12", "ram": "256", "disk": "10", + "rxtx_cap": "", + "rxtx_quota": "", + "swap": "", + "vcpus": "", } self.assertEqual(flavor, expected) @@ -149,6 +161,10 @@ class FlavorsTest(test.TestCase): "name": "flavor 12", "ram": "256", "disk": "10", + "rxtx_cap": "", + "rxtx_quota": "", + "swap": "", + "vcpus": "", "links": [ { "rel": "self", @@ -216,6 +232,10 @@ class FlavorsTest(test.TestCase): "name": "flavor 1", "ram": "256", "disk": "10", + "rxtx_cap": "", + "rxtx_quota": "", + "swap": "", + "vcpus": "", "links": [ { "rel": "self", @@ -232,6 +252,10 @@ class FlavorsTest(test.TestCase): "name": "flavor 2", "ram": "256", "disk": "10", + "rxtx_cap": "", + "rxtx_quota": "", + "swap": "", + "vcpus": "", "links": [ { "rel": "self", -- cgit From 147290d01389d72d3754bbaa088660f38a6871d8 Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Tue, 13 Sep 2011 21:09:25 +0000 Subject: Fix mismerge --- nova/tests/api/openstack/test_versions.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nova/tests/api/openstack/test_versions.py b/nova/tests/api/openstack/test_versions.py index 4f5024c87..f69dbd316 100644 --- a/nova/tests/api/openstack/test_versions.py +++ b/nova/tests/api/openstack/test_versions.py @@ -499,8 +499,6 @@ class VersionsTest(test.TestCase): self.assertTrue(common.compare_links(links, [{'rel': 'self', 'href': 'http://localhost/v1.0/images/1'}])) - self.assertEqual(expected, res.body) - def test_multi_choice_server_atom(self): """ Make sure multi choice responses do not have content-type -- cgit From de94312a584a25bd70e0410e69aef34bf7c275d4 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 13 Sep 2011 14:32:24 -0700 Subject: deprecate aoe --- nova/volume/driver.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 35e3ea8d0..e5bb498ed 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -221,7 +221,14 @@ class VolumeDriver(object): class AOEDriver(VolumeDriver): - """Implements AOE specific volume commands.""" + """WARNING! Deprecated. This driver will be removed in Essex. Its use + is not recommended. + + Implements AOE specific volume commands.""" + + def __init__(self, *args, **kwargs): + LOG.warn(_("AOEDriver is deprecated and will be removed in Essex")) + super(AOEDriver, self).__init__(*args, **kwargs) def ensure_export(self, context, volume): # NOTE(vish): we depend on vblade-persist for recreating exports -- cgit From de3d94726c980f40181693256c0f650d492451ef Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 13 Sep 2011 16:38:46 -0700 Subject: makes sure floating addresses are associated with host on associate so they come back --- nova/db/api.py | 8 +++++--- nova/db/sqlalchemy/api.py | 5 ++++- nova/network/manager.py | 3 ++- nova/tests/db/fakes.py | 4 +++- 4 files changed, 14 insertions(+), 6 deletions(-) diff --git a/nova/db/api.py b/nova/db/api.py index c03a86671..5b57b178e 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -261,11 +261,13 @@ def floating_ip_disassociate(context, address): return IMPL.floating_ip_disassociate(context, address) -def floating_ip_fixed_ip_associate(context, floating_address, fixed_address): +def floating_ip_fixed_ip_associate(context, floating_address, + fixed_address, host): """Associate an floating ip to a fixed_ip by address.""" return IMPL.floating_ip_fixed_ip_associate(context, floating_address, - fixed_address) + fixed_address, + host) def floating_ip_get_all(context): @@ -365,7 +367,7 @@ def fixed_ip_get_all(context): def fixed_ip_get_all_by_instance_host(context, host): """Get all allocated fixed ips filtered by instance host.""" - return IMPL.fixed_ip_get_all_instance_by_host(context, host) + return IMPL.fixed_ip_get_all_by_instance_host(context, host) def fixed_ip_get_by_address(context, address): diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 523258841..9c3d8413e 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -529,7 +529,8 @@ def floating_ip_count_by_project(context, project_id): @require_context -def floating_ip_fixed_ip_associate(context, floating_address, fixed_address): +def floating_ip_fixed_ip_associate(context, floating_address, + fixed_address, host): session = get_session() with session.begin(): # TODO(devcamcar): How to ensure floating_id belongs to user? @@ -540,6 +541,7 @@ def floating_ip_fixed_ip_associate(context, floating_address, fixed_address): fixed_address, session=session) floating_ip_ref.fixed_ip = fixed_ip_ref + floating_ip_ref.host = host floating_ip_ref.save(session=session) @@ -583,6 +585,7 @@ def floating_ip_disassociate(context, address): else: fixed_ip_address = None floating_ip_ref.fixed_ip = None + floating_ip_ref.host = None floating_ip_ref.save(session=session) return fixed_ip_address diff --git a/nova/network/manager.py b/nova/network/manager.py index 05d928fab..aa6711b48 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -289,7 +289,8 @@ class FloatingIP(object): self.db.floating_ip_fixed_ip_associate(context, floating_address, - fixed_address) + fixed_address, + self.host) self.driver.bind_floating_ip(floating_address) self.driver.ensure_floating_forward(floating_address, fixed_address) diff --git a/nova/tests/db/fakes.py b/nova/tests/db/fakes.py index 19028a451..cdbfba63a 100644 --- a/nova/tests/db/fakes.py +++ b/nova/tests/db/fakes.py @@ -125,10 +125,11 @@ def stub_out_db_network_api(stubs): if ips[0]['fixed_ip']: fixed_ip_address = ips[0]['fixed_ip']['address'] ips[0]['fixed_ip'] = None + ips[0]['host'] = None return fixed_ip_address def fake_floating_ip_fixed_ip_associate(context, floating_address, - fixed_address): + fixed_address, host): float = filter(lambda i: i['address'] == floating_address, floating_ips) fixed = filter(lambda i: i['address'] == fixed_address, @@ -136,6 +137,7 @@ def stub_out_db_network_api(stubs): if float and fixed: float[0]['fixed_ip'] = fixed[0] float[0]['fixed_ip_id'] = fixed[0]['id'] + float[0]['host'] = host def fake_floating_ip_get_all_by_host(context, host): # TODO(jkoelker): Once we get the patches that remove host from -- cgit From a85a2c2e82fa8820b04f669c92a3500c7c6cebe2 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Tue, 13 Sep 2011 20:38:26 -0400 Subject: pep8 fixes. --- nova/tests/test_libvirt.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 8193d6ec2..a30b00dbe 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -57,7 +57,7 @@ class FakeVirtDomain(object): if fake_xml: self._fake_dom_xml = fake_xml else: - self._fake_dom_xml=""" + self._fake_dom_xml = """ @@ -188,7 +188,8 @@ class LibvirtConnTestCase(test.TestCase): fake.__setattr__(key, val) self.flags(image_service='nova.image.fake.FakeImageService') - self.flags(firewall_driver="nova.tests.fake_network.FakeIptablesFirewallDriver") + fw_driver = "nova.tests.fake_network.FakeIptablesFirewallDriver" + self.flags(firewall_driver=fw_driver) self.flags(libvirt_vif_driver="nova.tests.fake_network.FakeVIFDriver") self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn') -- cgit From 4eb704e9024111c80f6f7c83810a08d7eec5c4af Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Tue, 13 Sep 2011 21:13:58 -0400 Subject: last of the api.openstack.test_images merge fixes. --- nova/tests/api/openstack/test_images.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 6890e0e9e..e054ffd03 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -821,7 +821,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): }], }, { - 'id': 128, + 'id': '128', 'name': 'deleted snapshot', 'metadata': { u'instance_ref': u'http://localhost/v1.1/servers/42', @@ -832,7 +832,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): 'status': 'DELETED', 'progress': 0, 'server': { - 'id': 42, + 'id': '42', "links": [{ "rel": "self", "href": server_href, @@ -852,7 +852,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): }], }, { - 'id': 129, + 'id': '129', 'name': 'pending_delete snapshot', 'metadata': { u'instance_ref': u'http://localhost/v1.1/servers/42', @@ -863,7 +863,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): 'status': 'DELETED', 'progress': 0, 'server': { - 'id': 42, + 'id': '42', "links": [{ "rel": "self", "href": server_href, @@ -914,7 +914,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): }], }, { - 'id': 132, + 'id': '132', 'name': None, 'metadata': {}, 'updated': self.NOW_API_FORMAT, @@ -1224,7 +1224,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): # Snapshots for User 1 server_ref = 'http://localhost/v1.1/servers/42' snapshot_properties = {'instance_ref': server_ref, 'user_id': 'fake'} - statuses = ('queued', 'saving', 'active','killed', + statuses = ('queued', 'saving', 'active', 'killed', 'deleted', 'pending_delete') for status in statuses: add_fixture(id=image_id, name='%s snapshot' % status, -- cgit From 7a02394009aae85f722430682f8360371121504b Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Wed, 14 Sep 2011 11:33:36 -0400 Subject: Make tests pass. --- nova/tests/api/openstack/test_images.py | 38 ++++++++++++++++----------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 45ca73073..27ae8f2bd 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -77,14 +77,14 @@ class ImagesTest(test.TestCase): response_dict = json.loads(response.body) response_list = response_dict["images"] - expected = [{'id': '123', 'name': 'public image'}, - {'id': '124', 'name': 'queued snapshot'}, - {'id': '125', 'name': 'saving snapshot'}, - {'id': '126', 'name': 'active snapshot'}, - {'id': '127', 'name': 'killed snapshot'}, - {'id': '128', 'name': 'deleted snapshot'}, - {'id': '129', 'name': 'pending_delete snapshot'}, - {'id': '130', 'name': None}] + expected = [{'id': 123, 'name': 'public image'}, + {'id': 124, 'name': 'queued snapshot'}, + {'id': 125, 'name': 'saving snapshot'}, + {'id': 126, 'name': 'active snapshot'}, + {'id': 127, 'name': 'killed snapshot'}, + {'id': 128, 'name': 'deleted snapshot'}, + {'id': 129, 'name': 'pending_delete snapshot'}, + {'id': 130, 'name': None}] self.assertDictListMatch(response_list, expected) @@ -99,7 +99,7 @@ class ImagesTest(test.TestCase): expected_image = { "image": { - "id": "123", + "id": 123, "name": "public image", "updated": NOW_API_FORMAT, "created": NOW_API_FORMAT, @@ -406,7 +406,7 @@ class ImagesTest(test.TestCase): response_list = response_dict["images"] expected = [{ - 'id': '123', + 'id': 123, 'name': 'public image', 'updated': NOW_API_FORMAT, 'created': NOW_API_FORMAT, @@ -414,7 +414,7 @@ class ImagesTest(test.TestCase): 'progress': 100, }, { - 'id': '124', + 'id': 124, 'name': 'queued snapshot', 'updated': NOW_API_FORMAT, 'created': NOW_API_FORMAT, @@ -422,7 +422,7 @@ class ImagesTest(test.TestCase): 'progress': 0, }, { - 'id': '125', + 'id': 125, 'name': 'saving snapshot', 'updated': NOW_API_FORMAT, 'created': NOW_API_FORMAT, @@ -430,7 +430,7 @@ class ImagesTest(test.TestCase): 'progress': 0, }, { - 'id': '126', + 'id': 126, 'name': 'active snapshot', 'updated': NOW_API_FORMAT, 'created': NOW_API_FORMAT, @@ -438,7 +438,7 @@ class ImagesTest(test.TestCase): 'progress': 100, }, { - 'id': '127', + 'id': 127, 'name': 'killed snapshot', 'updated': NOW_API_FORMAT, 'created': NOW_API_FORMAT, @@ -446,7 +446,7 @@ class ImagesTest(test.TestCase): 'progress': 0, }, { - 'id': '128', + 'id': 128, 'name': 'deleted snapshot', 'updated': NOW_API_FORMAT, 'created': NOW_API_FORMAT, @@ -454,7 +454,7 @@ class ImagesTest(test.TestCase): 'progress': 0, }, { - 'id': '129', + 'id': 129, 'name': 'pending_delete snapshot', 'updated': NOW_API_FORMAT, 'created': NOW_API_FORMAT, @@ -462,7 +462,7 @@ class ImagesTest(test.TestCase): 'progress': 0, }, { - 'id': '130', + 'id': 130, 'name': None, 'updated': NOW_API_FORMAT, 'created': NOW_API_FORMAT, @@ -914,7 +914,7 @@ class ImagesTest(test.TestCase): app = fakes.wsgi_app(fake_auth_context=self._get_fake_context()) res = req.get_response(app) image_meta = json.loads(res.body)['image'] - expected = {'id': '123', 'name': 'public image', + expected = {'id': 123, 'name': 'public image', 'updated': NOW_API_FORMAT, 'created': NOW_API_FORMAT, 'status': 'ACTIVE', 'progress': 100} @@ -934,7 +934,7 @@ class ImagesTest(test.TestCase): response = req.get_response(fakes.wsgi_app()) self.assertEqual(200, response.status_int) image_meta = json.loads(response.body)['image'] - self.assertEqual('123', image_meta['serverId']) + self.assertEqual(123, image_meta['serverId']) self.assertEqual('Snapshot 1', image_meta['name']) def test_create_snapshot_no_name(self): -- cgit From d36e59f4480265741018a1fd5160f5262b7e9331 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 14 Sep 2011 11:54:13 -0400 Subject: add extension description for FlavorExtraData --- nova/api/openstack/contrib/flavorextradata.py | 46 +++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 nova/api/openstack/contrib/flavorextradata.py diff --git a/nova/api/openstack/contrib/flavorextradata.py b/nova/api/openstack/contrib/flavorextradata.py new file mode 100644 index 000000000..d0554c7b4 --- /dev/null +++ b/nova/api/openstack/contrib/flavorextradata.py @@ -0,0 +1,46 @@ +# Copyright 2011 Canonical Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +The Flavor extra data extension +Openstack API version 1.1 lists "name", "ram", "disk", "vcpus" as flavor +attributes. This extension adds to that list: + rxtx_cap + rxtx_quota + swap +""" + +from nova.api.openstack import extensions + + +class Flavorextradata(extensions.ExtensionDescriptor): + """The Flavor extra data extension for the OpenStack API.""" + + def get_name(self): + return "FlavorExtraData" + + def get_alias(self): + return "os-flavor-extra-data" + + def get_description(self): + return "Provide additional data for flavors" + + def get_namespace(self): + return "http://docs.openstack.org/ext/flavor_extra_data/api/v1.1" + + def get_updated(self): + return "2011-09-14T00:00:00+00:00" + +# vim: tabstop=4 shiftwidth=4 softtabstop=4 -- cgit From efaa0e6590985e3d4bdeeb18e1d41a037856ea89 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 14 Sep 2011 11:54:56 -0400 Subject: fix test_extensions test to know of new extension FlavorExtraData --- nova/tests/api/openstack/test_extensions.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index 31443242b..44f4eb055 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -87,6 +87,7 @@ class ExtensionControllerTest(test.TestCase): self.ext_list = [ "Createserverext", "FlavorExtraSpecs", + "FlavorExtraData", "Floating_ips", "Fox In Socks", "Hosts", -- cgit From c20228123c1774a1e2aa1b4ee7155a62336f5934 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 14 Sep 2011 11:55:18 -0400 Subject: fix white space for pep8 --- nova/api/openstack/views/flavors.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/views/flavors.py b/nova/api/openstack/views/flavors.py index 4c5e2c1e6..def969a6c 100644 --- a/nova/api/openstack/views/flavors.py +++ b/nova/api/openstack/views/flavors.py @@ -50,8 +50,8 @@ class ViewBuilder(object): "disk": flavor_obj["local_gb"], } - for key in ( "vcpus", "swap", "rxtx_quota", "rxtx_cap"): - detail[key] = flavor_obj.get(key,"") + for key in ("vcpus", "swap", "rxtx_quota", "rxtx_cap"): + detail[key] = flavor_obj.get(key, "") detail.update(simple) -- cgit From 9f83b51ae2afeb45ed9bdcb8c3b63ced78f8050e Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Wed, 14 Sep 2011 16:19:18 +0000 Subject: Remove debugging --- nova/api/openstack/wsgi.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index 6eb1953b4..c342f6267 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -29,7 +29,6 @@ class Request(webob.Request): Based on the query extension then the Accept header. """ - LOG.info('supported = %s' % repr(supported_content_types)) supported_content_types = supported_content_types or \ ('application/json', 'application/vnd.openstack.compute+json', -- cgit From 092ff28b9f141368aed0d719140212e5fc8652f8 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 14 Sep 2011 12:25:47 -0400 Subject: add attributes to xml api --- nova/api/openstack/flavors.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nova/api/openstack/flavors.py b/nova/api/openstack/flavors.py index fd36060da..c6c9f096f 100644 --- a/nova/api/openstack/flavors.py +++ b/nova/api/openstack/flavors.py @@ -90,6 +90,9 @@ class FlavorXMLSerializer(wsgi.XMLDictSerializer): flavor_node.setAttribute('ram', str(flavor['ram'])) flavor_node.setAttribute('disk', str(flavor['disk'])) + for attr in ("vcpus", "swap", "rxtx_quota", "rxtx_cap"): + flavor_node.setAttribute(attr, str(flavor.get(attr,""))) + link_nodes = self._create_link_nodes(xml_doc, flavor['links']) for link_node in link_nodes: flavor_node.appendChild(link_node) -- cgit From 34a08d831418b934f4cceaae69dbf17d90ecd5e0 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Wed, 14 Sep 2011 12:32:00 -0400 Subject: removing toprettyxml --- nova/api/openstack/wsgi.py | 2 +- nova/tests/api/openstack/test_images.py | 16 ++++++---------- nova/tests/api/openstack/test_limits.py | 14 ++++++-------- 3 files changed, 13 insertions(+), 19 deletions(-) diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index bdcadcb99..588fc030b 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -316,7 +316,7 @@ class XMLDictSerializer(DictSerializer): def to_xml_string(self, node, has_atom=False): self._add_xmlns(node, has_atom) - return node.toprettyxml(indent=' ', encoding='UTF-8') + return node.toxml('UTF-8') #NOTE (ameade): the has_atom should be removed after all of the # xml serializers and view builders have been updated to the current diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 2aee1bc14..32ae7eeec 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -224,12 +224,10 @@ class ImagesTest(test.TestCase): expected = minidom.parseString(""" - - Image not found. - + xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"> + Image not found. - """.replace(" ", "")) + """.replace(" ", "").replace("\n", "")) actual = minidom.parseString(response.body.replace(" ", "")) @@ -261,12 +259,10 @@ class ImagesTest(test.TestCase): # because the element hasn't changed definition expected = minidom.parseString(""" - - Image not found. - + xmlns="http://docs.openstack.org/compute/api/v1.1"> + Image not found. - """.replace(" ", "")) + """.replace(" ", "").replace("\n", "")) actual = minidom.parseString(response.body.replace(" ", "")) diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py index 3db57ee86..6f0210c27 100644 --- a/nova/tests/api/openstack/test_limits.py +++ b/nova/tests/api/openstack/test_limits.py @@ -174,12 +174,11 @@ class LimitsControllerV10Test(BaseLimitTestSuite): response = request.get_response(self.controller) expected = minidom.parseString(""" - + - """.replace(" ", "")) + """.replace(" ", "").replace("\n", "")) body = minidom.parseString(response.body.replace(" ", "")) @@ -192,17 +191,16 @@ class LimitsControllerV10Test(BaseLimitTestSuite): response = request.get_response(self.controller) expected = minidom.parseString(""" - + + unit="MINUTE" value="10" verb="GET"/> + unit="HOUR" value="5" verb="POST"/> - """.replace(" ", "")) + """.replace(" ", "").replace("\n", "")) body = minidom.parseString(response.body.replace(" ", "")) self.assertEqual(expected.toxml(), body.toxml()) -- cgit From 95e06c5d88f5500805fb8d9505a4db61560bf8e1 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 14 Sep 2011 12:59:10 -0400 Subject: update variable name after merge: flavor_node -> flavor_elem --- nova/api/openstack/flavors.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/openstack/flavors.py b/nova/api/openstack/flavors.py index dd77f1a05..a419b7ec2 100644 --- a/nova/api/openstack/flavors.py +++ b/nova/api/openstack/flavors.py @@ -94,7 +94,7 @@ class FlavorXMLSerializer(wsgi.XMLDictSerializer): flavor_elem.set('disk', str(flavor_dict['disk'])) for attr in ("vcpus", "swap", "rxtx_quota", "rxtx_cap"): - flavor_node.setAttribute(attr, str(flavor.get(attr,""))) + flavor_elem.setAttribute(attr, str(flavor.get(attr,""))) for link in flavor_dict.get('links', []): elem = etree.SubElement(flavor_elem, -- cgit From f9cac7659a761fa32fac0153f8ecb0334149a486 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 14 Sep 2011 13:10:23 -0400 Subject: make xml-api tests pass --- nova/tests/api/openstack/test_flavors.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/nova/tests/api/openstack/test_flavors.py b/nova/tests/api/openstack/test_flavors.py index fa4e548ac..348042bfe 100644 --- a/nova/tests/api/openstack/test_flavors.py +++ b/nova/tests/api/openstack/test_flavors.py @@ -300,6 +300,10 @@ class FlavorsXMLSerializationTest(test.TestCase): "name": "asdf", "ram": "256", "disk": "10", + "rxtx_cap": "", + "rxtx_quota": "", + "swap": "", + "vcpus": "", "links": [ { "rel": "self", @@ -327,6 +331,10 @@ class FlavorsXMLSerializationTest(test.TestCase): "name": "asdf", "ram": "256", "disk": "10", + "rxtx_cap": "", + "rxtx_quota": "", + "swap": "", + "vcpus": "", "links": [ { "rel": "self", @@ -364,6 +372,10 @@ class FlavorsXMLSerializationTest(test.TestCase): "name": "asdf", "ram": 256, "disk": 10, + "rxtx_cap": "", + "rxtx_quota": "", + "swap": "", + "vcpus": "", "links": [ { "rel": "self", @@ -402,6 +414,10 @@ class FlavorsXMLSerializationTest(test.TestCase): "name": "flavor 23", "ram": "512", "disk": "20", + "rxtx_cap": "", + "rxtx_quota": "", + "swap": "", + "vcpus": "", "links": [ { "rel": "self", @@ -417,6 +433,10 @@ class FlavorsXMLSerializationTest(test.TestCase): "name": "flavor 13", "ram": "256", "disk": "10", + "rxtx_cap": "", + "rxtx_quota": "", + "swap": "", + "vcpus": "", "links": [ { "rel": "self", @@ -459,6 +479,10 @@ class FlavorsXMLSerializationTest(test.TestCase): "name": "flavor 23", "ram": "512", "disk": "20", + "rxtx_cap": "", + "rxtx_quota": "", + "swap": "", + "vcpus": "", "links": [ { "rel": "self", @@ -474,6 +498,10 @@ class FlavorsXMLSerializationTest(test.TestCase): "name": "flavor 13", "ram": "256", "disk": "10", + "rxtx_cap": "", + "rxtx_quota": "", + "swap": "", + "vcpus": "", "links": [ { "rel": "self", -- cgit From 2b41dd235b50e3003a42e0b860c5915d06d86071 Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Wed, 14 Sep 2011 17:23:40 +0000 Subject: Map vendor content types to their standard content type before serializing or deserializing. This is so we don't have to litter the code with both types when they are treated identically --- nova/api/openstack/wsgi.py | 36 ++++++++++++++++++++---------------- 1 file changed, 20 insertions(+), 16 deletions(-) diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index c342f6267..1c4724c8f 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -19,6 +19,21 @@ XMLNS_ATOM = 'http://www.w3.org/2005/Atom' LOG = logging.getLogger('nova.api.openstack.wsgi') +# The vendor content types should serialize identically to the non-vendor +# content types. So to avoid littering the code with both options, we +# map the vendor to the other when looking up the type +_CONTENT_TYPE_MAP = { + 'application/vnd.openstack.compute+json': 'application/json', + 'application/vnd.openstack.compute+xml': 'application/xml', +} + +_SUPPORTED_CONTENT_TYPES = ( + 'application/json', + 'application/vnd.openstack.compute+json', + 'application/xml', + 'application/vnd.openstack.compute+xml', +) + class Request(webob.Request): """Add some Openstack API-specific logic to the base webob.Request.""" @@ -30,10 +45,7 @@ class Request(webob.Request): """ supported_content_types = supported_content_types or \ - ('application/json', - 'application/vnd.openstack.compute+json', - 'application/xml', - 'application/vnd.openstack.compute+xml') + _SUPPORTED_CONTENT_TYPES parts = self.path.rsplit('.', 1) if len(parts) > 1: @@ -55,10 +67,7 @@ class Request(webob.Request): if not "Content-Type" in self.headers: return None - allowed_types = ('application/json', - 'application/vnd.openstack.compute+json', - 'application/xml', - 'application/vnd.openstack.compute+xml') + allowed_types = _SUPPORTED_CONTENT_TYPES content_type = self.content_type if content_type not in allowed_types: @@ -198,16 +207,11 @@ class RequestDeserializer(object): supported_content_types=None): self.supported_content_types = supported_content_types or \ - ('application/json', - 'application/vnd.openstack.compute+json', - 'application/xml', - 'application/vnd.openstack.compute+xml') + _SUPPORTED_CONTENT_TYPES self.body_deserializers = { 'application/xml': XMLDeserializer(), - 'application/vnd.openstack.compute+xml': XMLDeserializer(), 'application/json': JSONDeserializer(), - 'application/vnd.openstack.compute+json': JSONDeserializer(), } self.body_deserializers.update(body_deserializers or {}) @@ -261,6 +265,7 @@ class RequestDeserializer(object): def get_body_deserializer(self, content_type): try: + content_type = _CONTENT_TYPE_MAP.get(content_type, content_type) return self.body_deserializers[content_type] except (KeyError, TypeError): raise exception.InvalidContentType(content_type=content_type) @@ -425,9 +430,7 @@ class ResponseSerializer(object): def __init__(self, body_serializers=None, headers_serializer=None): self.body_serializers = { 'application/xml': XMLDictSerializer(), - 'application/vnd.openstack.compute+xml': XMLDictSerializer(), 'application/json': JSONDictSerializer(), - 'application/vnd.openstack.compute+json': JSONDictSerializer(), } self.body_serializers.update(body_serializers or {}) @@ -452,6 +455,7 @@ class ResponseSerializer(object): def serialize_body(self, response, data, content_type, action): response.headers['Content-Type'] = content_type if data is not None: + content_type = _CONTENT_TYPE_MAP.get(content_type, content_type) serializer = self.get_body_serializer(content_type) response.body = serializer.serialize(data, action) -- cgit From 1b836a4159bd324572f71dab4abcef5a8d3292e5 Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Wed, 14 Sep 2011 17:29:28 +0000 Subject: Add copyright --- nova/api/openstack/wsgi.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index 1c4724c8f..c68a57cb5 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -1,3 +1,19 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. import json from lxml import etree -- cgit From d678d8c4d024a4154ecd2ea77a42063ad1253364 Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Wed, 14 Sep 2011 17:32:33 +0000 Subject: Remove unnecessary vendor content types now that they are mapped to standard content types automatically --- nova/api/openstack/versions.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/nova/api/openstack/versions.py b/nova/api/openstack/versions.py index f4ae498b4..75a1d0ba4 100644 --- a/nova/api/openstack/versions.py +++ b/nova/api/openstack/versions.py @@ -101,8 +101,6 @@ class Versions(wsgi.Resource): body_serializers = { 'application/atom+xml': VersionsAtomSerializer(metadata=metadata), 'application/xml': VersionsXMLSerializer(metadata=metadata), - 'application/vnd.openstack.compute+xml': - VersionsXMLSerializer(metadata=metadata), } serializer = wsgi.ResponseSerializer( body_serializers=body_serializers, @@ -307,7 +305,6 @@ def create_resource(version='1.0'): body_serializers = { 'application/xml': VersionsXMLSerializer(), - 'application/vnd.openstack.compute+xml': VersionsXMLSerializer(), 'application/atom+xml': VersionsAtomSerializer(), } serializer = wsgi.ResponseSerializer(body_serializers) -- cgit From 4fa96895f1c32e09db31532886d67a675fe66208 Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Wed, 14 Sep 2011 17:44:22 +0000 Subject: Test both content types for JSON and XML --- nova/tests/api/openstack/test_wsgi.py | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/nova/tests/api/openstack/test_wsgi.py b/nova/tests/api/openstack/test_wsgi.py index b73027643..74b9ce853 100644 --- a/nova/tests/api/openstack/test_wsgi.py +++ b/nova/tests/api/openstack/test_wsgi.py @@ -250,11 +250,21 @@ class ResponseSerializerTest(test.TestCase): self.serializer.get_body_serializer, 'application/unknown') - def test_serialize_response(self): - response = self.serializer.serialize({}, 'application/json') - self.assertEqual(response.headers['Content-Type'], 'application/json') - self.assertEqual(response.body, 'pew_json') - self.assertEqual(response.status_int, 404) + def test_serialize_response_json(self): + for content_type in ('application/json', + 'application/vnd.openstack.compute+json'): + response = self.serializer.serialize({}, content_type) + self.assertEqual(response.headers['Content-Type'], content_type) + self.assertEqual(response.body, 'pew_json') + self.assertEqual(response.status_int, 404) + + def test_serialize_response_xml(self): + for content_type in ('application/xml', + 'application/vnd.openstack.compute+xml'): + response = self.serializer.serialize({}, content_type) + self.assertEqual(response.headers['Content-Type'], content_type) + self.assertEqual(response.body, 'pew_xml') + self.assertEqual(response.status_int, 404) def test_serialize_response_None(self): response = self.serializer.serialize(None, 'application/json') -- cgit From 8638db07c4ad2177249a70708969c7a1cba09037 Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Wed, 14 Sep 2011 17:55:15 +0000 Subject: Don't report the wrong content type if a mapped type doesn't exist --- nova/api/openstack/wsgi.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index c68a57cb5..fad516d4d 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -281,8 +281,8 @@ class RequestDeserializer(object): def get_body_deserializer(self, content_type): try: - content_type = _CONTENT_TYPE_MAP.get(content_type, content_type) - return self.body_deserializers[content_type] + ctype = _CONTENT_TYPE_MAP.get(content_type, content_type) + return self.body_deserializers[ctype] except (KeyError, TypeError): raise exception.InvalidContentType(content_type=content_type) @@ -471,13 +471,13 @@ class ResponseSerializer(object): def serialize_body(self, response, data, content_type, action): response.headers['Content-Type'] = content_type if data is not None: - content_type = _CONTENT_TYPE_MAP.get(content_type, content_type) serializer = self.get_body_serializer(content_type) response.body = serializer.serialize(data, action) def get_body_serializer(self, content_type): try: - return self.body_serializers[content_type] + ctype = _CONTENT_TYPE_MAP.get(content_type, content_type) + return self.body_serializers[ctype] except (KeyError, TypeError): raise exception.InvalidContentType(content_type=content_type) -- cgit From 9ce2e4f80f249d58622b7235aec55e823f9cd6c8 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 14 Sep 2011 14:37:32 -0400 Subject: flavor_elem.setAttribute -> flavor_elem.set, flavor -> flavor_dict --- nova/api/openstack/flavors.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/openstack/flavors.py b/nova/api/openstack/flavors.py index a419b7ec2..0c435ebeb 100644 --- a/nova/api/openstack/flavors.py +++ b/nova/api/openstack/flavors.py @@ -94,7 +94,7 @@ class FlavorXMLSerializer(wsgi.XMLDictSerializer): flavor_elem.set('disk', str(flavor_dict['disk'])) for attr in ("vcpus", "swap", "rxtx_quota", "rxtx_cap"): - flavor_elem.setAttribute(attr, str(flavor.get(attr,""))) + flavor_elem.set(attr, str(flavor_dict.get(attr,""))) for link in flavor_dict.get('links', []): elem = etree.SubElement(flavor_elem, -- cgit From 74a1045dd133fe708cb0f42bd4fae4198ee337ff Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 14 Sep 2011 14:51:55 -0400 Subject: add necessary fields to flavor.rng schema --- nova/api/openstack/schemas/v1.1/flavor.rng | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/nova/api/openstack/schemas/v1.1/flavor.rng b/nova/api/openstack/schemas/v1.1/flavor.rng index a00e4e9ee..6d3adc8dc 100644 --- a/nova/api/openstack/schemas/v1.1/flavor.rng +++ b/nova/api/openstack/schemas/v1.1/flavor.rng @@ -4,6 +4,10 @@ + + + + -- cgit From aa0361c41d2e2feff18915ac93107727f52b15ca Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 14 Sep 2011 14:58:47 -0400 Subject: fix pep8 whitespace error --- nova/api/openstack/flavors.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/openstack/flavors.py b/nova/api/openstack/flavors.py index 0c435ebeb..8a310c900 100644 --- a/nova/api/openstack/flavors.py +++ b/nova/api/openstack/flavors.py @@ -94,7 +94,7 @@ class FlavorXMLSerializer(wsgi.XMLDictSerializer): flavor_elem.set('disk', str(flavor_dict['disk'])) for attr in ("vcpus", "swap", "rxtx_quota", "rxtx_cap"): - flavor_elem.set(attr, str(flavor_dict.get(attr,""))) + flavor_elem.set(attr, str(flavor_dict.get(attr, ""))) for link in flavor_dict.get('links', []): elem = etree.SubElement(flavor_elem, -- cgit From cecc822c0bc10c8d5cc5168329ae04172c6e609e Mon Sep 17 00:00:00 2001 From: Josh Durgin Date: Wed, 14 Sep 2011 12:33:51 -0700 Subject: Use the correct method to get a builder. --- nova/api/openstack/contrib/volumes.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nova/api/openstack/contrib/volumes.py b/nova/api/openstack/contrib/volumes.py index d62225e58..9d4254f1f 100644 --- a/nova/api/openstack/contrib/volumes.py +++ b/nova/api/openstack/contrib/volumes.py @@ -372,8 +372,7 @@ class BootFromVolumeController(servers.ControllerV11): for key in ['instance_type', 'image_ref']: inst[key] = extra_values[key] - builder = self._get_view_builder(req) - server = builder.build(inst, is_detail=True) + server = self._build_view(req, inst, is_detail=True) server['server']['adminPass'] = extra_values['password'] return server -- cgit From 6cd4e1dadda93d2e8fa4ed26f3e8d83ea22292d3 Mon Sep 17 00:00:00 2001 From: Josh Durgin Date: Thu, 15 Sep 2011 11:05:57 -0700 Subject: Add a simple test for the OS boot from volume api. This would have detected lp:850305. --- nova/tests/api/openstack/contrib/test_volumes.py | 73 ++++++++++++++++++++++++ 1 file changed, 73 insertions(+) create mode 100644 nova/tests/api/openstack/contrib/test_volumes.py diff --git a/nova/tests/api/openstack/contrib/test_volumes.py b/nova/tests/api/openstack/contrib/test_volumes.py new file mode 100644 index 000000000..443ec399f --- /dev/null +++ b/nova/tests/api/openstack/contrib/test_volumes.py @@ -0,0 +1,73 @@ +# Copyright 2011 Josh Durgin +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +import json +import webob + +import nova +from nova import context +from nova import test +from nova.api.openstack.contrib.volumes import BootFromVolumeController +from nova.compute import instance_types +from nova.tests.api.openstack import fakes +from nova.tests.api.openstack.test_servers import fake_gen_uuid + + +def fake_compute_api_create(cls, context, instance_type, image_href, **kwargs): + inst_type = instance_types.get_instance_type_by_flavor_id(2) + return [{'id': 1, + 'display_name': 'test_server', + 'uuid': fake_gen_uuid(), + 'instance_type': dict(inst_type), + 'access_ip_v4': '1.2.3.4', + 'access_ip_v6': 'fead::1234', + 'image_ref': 3, + 'user_id': 'fake', + 'project_id': 'fake', + 'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0), + 'updated_at': datetime.datetime(2010, 11, 11, 11, 0, 0), + }] + + +class BootFromVolumeTest(test.TestCase): + + def setUp(self): + super(BootFromVolumeTest, self).setUp() + self.stubs.Set(nova.compute.API, 'create', fake_compute_api_create) + + def test_create_root_volume(self): + body = dict(server=dict( + name='test_server', imageRef=3, + flavorRef=2, min_count=1, max_count=1, + block_device_mapping=[dict( + volume_id=1, + device_name='/dev/vda', + virtual='root', + delete_on_termination=False, + )] + )) + req = webob.Request.blank('/v1.1/fake/os-volumes_boot') + req.method = 'POST' + req.body = json.dumps(body) + req.headers['content-type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + server = json.loads(res.body)['server'] + self.assertEqual(1, server['id']) + self.assertEqual(2, int(server['flavor']['id'])) + self.assertEqual(u'test_server', server['name']) + self.assertEqual(3, int(server['image']['id'])) + self.assertEqual(16, len(server['adminPass'])) -- cgit From 19736d4991ff55f300fddd0ab48af40a081f2272 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 15 Sep 2011 11:27:16 -0700 Subject: delete the internal libvirt snapshot after it is saved to glance --- nova/virt/libvirt/connection.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 363a20ed0..47e43a7c2 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -464,6 +464,7 @@ class LibvirtConnection(driver.ComputeDriver): # Clean up shutil.rmtree(temp_dir) + snapshot_ptr.delete(0) @exception.wrap_exception() def reboot(self, instance, network_info): -- cgit From caf0e0f10167fd31ea16a56d471380537a6ee03a Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 15 Sep 2011 16:58:22 -0500 Subject: NoMoreFixedIps now subclasses NovaException instead of Error --- nova/exception.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/exception.py b/nova/exception.py index a3cbb98cf..4f25d3721 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -506,7 +506,7 @@ class FixedIpInvalid(Invalid): message = _("Fixed IP address %(address)s is invalid.") -class NoMoreFixedIps(Error): +class NoMoreFixedIps(NovaException): message = _("Zero fixed ips available.") -- cgit