From 4887331a0d972b4d9e1fbd365c36edf1d7b076ec Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Tue, 2 Aug 2011 15:10:09 -0400 Subject: fix pylint errors --- nova/tests/scheduler/test_scheduler.py | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index 6a56a57db..a515636a3 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -51,10 +51,10 @@ FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' class TestDriver(driver.Scheduler): """Scheduler Driver for Tests""" - def schedule(context, topic, *args, **kwargs): + def schedule(self, context, topic, *args, **kwargs): return 'fallback_host' - def schedule_named_method(context, topic, num): + def schedule_named_method(self, context, topic, num): return 'named_host' @@ -301,7 +301,7 @@ class SimpleDriverTestCase(test.TestCase): db.compute_node_create(self.context, dic) return db.service_get(self.context, s_ref['id']) - def test_doesnt_report_disabled_hosts_as_up(self): + def test_doesnt_report_disabled_hosts_as_up_no_queue(self): """Ensures driver doesn't find hosts before they are enabled""" # NOTE(vish): constructing service without create method # because we are going to use it without queue @@ -324,7 +324,7 @@ class SimpleDriverTestCase(test.TestCase): compute1.kill() compute2.kill() - def test_reports_enabled_hosts_as_up(self): + def test_reports_enabled_hosts_as_up_no_queue(self): """Ensures driver can find the hosts that are up""" # NOTE(vish): constructing service without create method # because we are going to use it without queue @@ -343,7 +343,7 @@ class SimpleDriverTestCase(test.TestCase): compute1.kill() compute2.kill() - def test_least_busy_host_gets_instance(self): + def test_least_busy_host_gets_instance_no_queue(self): """Ensures the host with less cores gets the next one""" compute1 = service.Service('host1', 'nova-compute', @@ -366,7 +366,7 @@ class SimpleDriverTestCase(test.TestCase): compute1.kill() compute2.kill() - def test_specific_host_gets_instance(self): + def test_specific_host_gets_instance_no_queue(self): """Ensures if you set availability_zone it launches on that zone""" compute1 = service.Service('host1', 'nova-compute', @@ -389,7 +389,7 @@ class SimpleDriverTestCase(test.TestCase): compute1.kill() compute2.kill() - def test_wont_sechedule_if_specified_host_is_down(self): + def test_wont_sechedule_if_specified_host_is_down_no_queue(self): compute1 = service.Service('host1', 'nova-compute', 'compute', @@ -408,7 +408,7 @@ class SimpleDriverTestCase(test.TestCase): db.instance_destroy(self.context, instance_id2) compute1.kill() - def test_will_schedule_on_disabled_host_if_specified(self): + def test_will_schedule_on_disabled_host_if_specified_no_queue(self): compute1 = service.Service('host1', 'nova-compute', 'compute', @@ -423,7 +423,7 @@ class SimpleDriverTestCase(test.TestCase): db.instance_destroy(self.context, instance_id2) compute1.kill() - def test_too_many_cores(self): + def test_too_many_cores_no_queue(self): """Ensures we don't go over max cores""" compute1 = service.Service('host1', 'nova-compute', @@ -456,7 +456,7 @@ class SimpleDriverTestCase(test.TestCase): compute1.kill() compute2.kill() - def test_least_busy_host_gets_volume(self): + def test_least_busy_host_gets_volume_no_queue(self): """Ensures the host with less gigabytes gets the next one""" volume1 = service.Service('host1', 'nova-volume', @@ -477,7 +477,7 @@ class SimpleDriverTestCase(test.TestCase): volume1.delete_volume(self.context, volume_id1) db.volume_destroy(self.context, volume_id2) - def test_doesnt_report_disabled_hosts_as_up(self): + def test_doesnt_report_disabled_hosts_as_up2(self): """Ensures driver doesn't find hosts before they are enabled""" compute1 = self.start_service('compute', host='host1') compute2 = self.start_service('compute', host='host2') @@ -993,7 +993,7 @@ class ZoneRedirectTest(test.TestCase): decorator = FakeRerouteCompute("foo", id_to_return=FAKE_UUID_NOT_FOUND) try: result = decorator(go_boom)(None, None, 1) - self.assertFail(_("Should have rerouted.")) + self.fail(_("Should have rerouted.")) except api.RedirectResult, e: self.assertEquals(e.results['magic'], 'found me') @@ -1081,10 +1081,10 @@ class DynamicNovaClientTest(test.TestCase): class FakeZonesProxy(object): - def do_something(*args, **kwargs): + def do_something(self, *args, **kwargs): return 42 - def raises_exception(*args, **kwargs): + def raises_exception(self, *args, **kwargs): raise Exception('testing') -- cgit From 378571665538fb5c4667a9d068678378ddf8526f Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Tue, 2 Aug 2011 15:19:00 -0400 Subject: fix pylint errors --- nova/db/sqlalchemy/api.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 36614df1f..ad935fa44 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -3202,9 +3202,8 @@ def instance_metadata_update_or_create(context, instance_id, metadata): meta_ref = None for key, value in metadata.iteritems(): try: - meta_ref = instance_metadata_get_item(context, instance_id, key, - session) - except: + meta_ref = instance_metadata_get_item(context, instance_id, key) + except exception.InstanceMetadataNotFound, e: meta_ref = models.InstanceMetadata() meta_ref.update({"key": key, "value": value, "instance_id": instance_id, @@ -3322,11 +3321,9 @@ def instance_type_extra_specs_update_or_create(context, instance_type_id, spec_ref = None for key, value in specs.iteritems(): try: - spec_ref = instance_type_extra_specs_get_item(context, - instance_type_id, - key, - session) - except: + spec_ref = instance_type_extra_specs_get_item( + context, instance_type_id, key) + except exception.InstanceTypeExtraSpecsNotFound, e: spec_ref = models.InstanceTypeExtraSpecs() spec_ref.update({"key": key, "value": value, "instance_type_id": instance_type_id, -- cgit From d1891d2dd18a14535ec22a0363fd8234a01dbb8c Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Tue, 2 Aug 2011 15:30:27 -0400 Subject: remove unused parameter --- nova/virt/hyperv.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py index c26fe108b..315f635c7 100644 --- a/nova/virt/hyperv.py +++ b/nova/virt/hyperv.py @@ -373,7 +373,7 @@ class HyperVConnection(driver.ComputeDriver): raise exception.InstanceNotFound(instance_id=instance.id) self._set_vm_state(instance.name, 'Reboot') - def destroy(self, instance, network_info): + def destroy(self, instance): """Destroy the VM. Also destroy the associated VHD disk files""" LOG.debug(_("Got request to destroy vm %s"), instance.name) vm = self._lookup(instance.name) -- cgit From 614895b6c93904888aab99d1507d94271d763c04 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Tue, 2 Aug 2011 16:08:17 -0400 Subject: fix missing method call and add failing test --- nova/tests/test_xenapi.py | 9 +++++++++ nova/tests/xenapi/stubs.py | 4 ++++ nova/virt/xenapi_conn.py | 2 +- 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 6b7d5df76..9032c82ab 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -669,6 +669,14 @@ class XenAPIVMTestCase(test.TestCase): self.conn.spawn(instance, network_info) return instance + def test_revert_migration(self): + instance = self._create_instance() + stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests) + stubs.stubout_loopingcall_start(self.stubs) + conn = xenapi_conn.get_connection(False) + conn.revert_migration(instance) + + class XenAPIDiffieHellmanTestCase(test.TestCase): """Unit tests for Diffie-Hellman code.""" @@ -842,6 +850,7 @@ class XenAPIMigrateInstance(test.TestCase): network_info, resize_instance=False) + class XenAPIImageTypeTestCase(test.TestCase): """Test ImageType class.""" diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 66c79d465..195e4d038 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -230,10 +230,14 @@ def stub_out_vm_methods(stubs): def fake_spawn_rescue(self, inst): inst._rescue = False + def fake_revert_migration(self, inst): + pass + stubs.Set(vmops.VMOps, "_shutdown", fake_shutdown) stubs.Set(vmops.VMOps, "_acquire_bootlock", fake_acquire_bootlock) stubs.Set(vmops.VMOps, "_release_bootlock", fake_release_bootlock) stubs.Set(vmops.VMOps, "spawn_rescue", fake_spawn_rescue) + stubs.Set(vmops.VMOps, "revert_migration", fake_revert_migration) class FakeSessionForVolumeTests(fake.SessionBase): diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index a3d0abf9f..042bf2b02 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -190,7 +190,7 @@ class XenAPIConnection(driver.ComputeDriver): def revert_migration(self, instance): """Reverts a resize, powering back on the instance""" - self._vmops.revert_resize(instance) + self._vmops.revert_migration(instance) def finish_migration(self, instance, disk_info, network_info, resize_instance=False): -- cgit From b5a4a8d6cc68fec2dab205c5da494bce69241c33 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Tue, 2 Aug 2011 17:05:10 -0400 Subject: fix pep8 complaints --- nova/tests/test_xenapi.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 9032c82ab..8e3c88f8e 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -677,7 +677,6 @@ class XenAPIVMTestCase(test.TestCase): conn.revert_migration(instance) - class XenAPIDiffieHellmanTestCase(test.TestCase): """Unit tests for Diffie-Hellman code.""" def setUp(self): @@ -850,7 +849,6 @@ class XenAPIMigrateInstance(test.TestCase): network_info, resize_instance=False) - class XenAPIImageTypeTestCase(test.TestCase): """Test ImageType class.""" -- cgit From 9bac11d9ff9933460f7ddf1bd1dd77d4d3397e47 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Tue, 2 Aug 2011 17:05:21 -0400 Subject: fix failing tests --- nova/db/sqlalchemy/api.py | 17 +++++++++++------ nova/tests/scheduler/test_scheduler.py | 4 ++-- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index ad935fa44..60220cd68 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -3177,8 +3177,9 @@ def instance_metadata_delete_all(context, instance_id): @require_context @require_instance_exists -def instance_metadata_get_item(context, instance_id, key): - session = get_session() +def instance_metadata_get_item(context, instance_id, key, session=None): + if not session: + session = get_session() meta_result = session.query(models.InstanceMetadata).\ filter_by(instance_id=instance_id).\ @@ -3202,7 +3203,8 @@ def instance_metadata_update_or_create(context, instance_id, metadata): meta_ref = None for key, value in metadata.iteritems(): try: - meta_ref = instance_metadata_get_item(context, instance_id, key) + meta_ref = instance_metadata_get_item( + context, instance_id, key, session) except exception.InstanceMetadataNotFound, e: meta_ref = models.InstanceMetadata() meta_ref.update({"key": key, "value": value, @@ -3298,8 +3300,11 @@ def instance_type_extra_specs_delete(context, instance_type_id, key): @require_context -def instance_type_extra_specs_get_item(context, instance_type_id, key): - session = get_session() +def instance_type_extra_specs_get_item(context, instance_type_id, key, + session=None): + + if not session: + session = get_session() spec_result = session.query(models.InstanceTypeExtraSpecs).\ filter_by(instance_type_id=instance_type_id).\ @@ -3322,7 +3327,7 @@ def instance_type_extra_specs_update_or_create(context, instance_type_id, for key, value in specs.iteritems(): try: spec_ref = instance_type_extra_specs_get_item( - context, instance_type_id, key) + context, instance_type_id, key, session) except exception.InstanceTypeExtraSpecsNotFound, e: spec_ref = models.InstanceTypeExtraSpecs() spec_ref.update({"key": key, "value": value, diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index a515636a3..0d7634d0d 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -51,10 +51,10 @@ FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' class TestDriver(driver.Scheduler): """Scheduler Driver for Tests""" - def schedule(self, context, topic, *args, **kwargs): + def schedule(context, topic, *args, **kwargs): return 'fallback_host' - def schedule_named_method(self, context, topic, num): + def schedule_named_method(context, topic, num): return 'named_host' -- cgit From 8774c69c4a8d97ab2dac32c0f52e6963543a46f1 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Tue, 2 Aug 2011 17:11:53 -0400 Subject: raise correct error --- nova/scheduler/zone_aware_scheduler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index d99d7214c..8440421e1 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -264,7 +264,7 @@ class ZoneAwareScheduler(driver.Scheduler): """ if topic != "compute": - raise NotImplemented(_("Zone Aware Scheduler only understands " + raise NotImplementedError(_("Zone Aware Scheduler only understands " "Compute nodes (for now)")) num_instances = request_spec.get('num_instances', 1) -- cgit From e34a4c2feb7b2abef806ed720e1533e2e0fb94ef Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Wed, 3 Aug 2011 14:05:58 -0400 Subject: remove bit-rotted code. fixes #820062 --- bin/nova-import-canonical-imagestore | 110 ----------------------------------- 1 file changed, 110 deletions(-) delete mode 100755 bin/nova-import-canonical-imagestore diff --git a/bin/nova-import-canonical-imagestore b/bin/nova-import-canonical-imagestore deleted file mode 100755 index 404ae37f4..000000000 --- a/bin/nova-import-canonical-imagestore +++ /dev/null @@ -1,110 +0,0 @@ -#!/usr/bin/env python -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" - Download images from Canonical Image Store -""" - -import gettext -import json -import os -import tempfile -import shutil -import subprocess -import sys -import urllib2 - -# If ../nova/__init__.py exists, add ../ to Python search path, so that -# it will override what happens to be installed in /usr/(local/)lib/python... -possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), - os.pardir, - os.pardir)) -if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): - sys.path.insert(0, possible_topdir) - -gettext.install('nova', unicode=1) - -from nova import flags -from nova import log as logging -from nova import utils -from nova.objectstore import image - -FLAGS = flags.FLAGS - -API_URL = 'https://imagestore.canonical.com/api/dashboard' - - -def get_images(): - """Get a list of the images from the imagestore URL.""" - images = json.load(urllib2.urlopen(API_URL))['images'] - images = [img for img in images if img['title'].find('amd64') > -1] - return images - - -def download(img): - """Download an image to the local filesystem.""" - # FIXME(ja): add checksum/signature checks - tempdir = tempfile.mkdtemp(prefix='cis-') - - kernel_id = None - ramdisk_id = None - - for f in img['files']: - if f['kind'] == 'kernel': - dest = os.path.join(tempdir, 'kernel') - subprocess.call(['curl', '--fail', f['url'], '-o', dest]) - kernel_id = image.Image.add(dest, - description='kernel/' + img['title'], kernel=True) - - for f in img['files']: - if f['kind'] == 'ramdisk': - dest = os.path.join(tempdir, 'ramdisk') - subprocess.call(['curl', '--fail', f['url'], '-o', dest]) - ramdisk_id = image.Image.add(dest, - description='ramdisk/' + img['title'], ramdisk=True) - - for f in img['files']: - if f['kind'] == 'image': - dest = os.path.join(tempdir, 'image') - subprocess.call(['curl', '--fail', f['url'], '-o', dest]) - ramdisk_id = image.Image.add(dest, - description=img['title'], kernel=kernel_id, ramdisk=ramdisk_id) - - shutil.rmtree(tempdir) - - -def main(): - """Main entry point.""" - utils.default_flagfile() - argv = FLAGS(sys.argv) - logging.setup() - images = get_images() - - if len(argv) == 2: - for img in images: - if argv[1] == 'all' or argv[1] == img['title']: - download(img) - else: - print 'usage: %s (title|all)' - print 'available images:' - for img in images: - print img['title'] - -if __name__ == '__main__': - main() -- cgit From 916e0ce0997bdf3135684865eff6fadcda95752b Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Wed, 3 Aug 2011 14:13:53 -0400 Subject: remove unused imports --- nova/api/openstack/create_instance_helper.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index 53e814cd5..e4b897cd6 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -14,8 +14,6 @@ # under the License. import base64 -import re -import webob from webob import exc from xml.dom import minidom -- cgit From 91e16e057f083d1a0b8dffaa00b5979c12c23edc Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Wed, 3 Aug 2011 14:30:45 -0400 Subject: fix potential runtime exception The exception could occur if a client were to create an APIRouter object. The fix relies on more established OOP patterns. --- nova/api/openstack/__init__.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index 868b98a31..8d7f7a405 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -81,7 +81,10 @@ class APIRouter(base_wsgi.Router): self._setup_routes(mapper) super(APIRouter, self).__init__(mapper) - def _setup_routes(self, mapper, version): + def _setup_routes(self, mapper): + raise NotImplementedError("you must implement _setup_routes") + + def _setup_base_routes(self, mapper, version): """Routes common to all versions.""" server_members = self.server_members @@ -147,7 +150,7 @@ class APIRouterV10(APIRouter): """Define routes specific to OpenStack API V1.0.""" def _setup_routes(self, mapper): - super(APIRouterV10, self)._setup_routes(mapper, '1.0') + self._setup_base_routes(mapper, '1.0') mapper.resource("shared_ip_group", "shared_ip_groups", collection={'detail': 'GET'}, @@ -163,7 +166,7 @@ class APIRouterV11(APIRouter): """Define routes specific to OpenStack API V1.1.""" def _setup_routes(self, mapper): - super(APIRouterV11, self)._setup_routes(mapper, '1.1') + self._setup_base_routes(mapper, '1.1') image_metadata_controller = image_metadata.create_resource() mapper.resource("image_meta", "metadata", controller=image_metadata_controller, -- cgit From a049e682b54525d2814f20b08ca64320040675b8 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Wed, 3 Aug 2011 14:46:02 -0400 Subject: fix undefined variable error --- nova/scheduler/least_cost.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py index 8c400d476..329107efe 100644 --- a/nova/scheduler/least_cost.py +++ b/nova/scheduler/least_cost.py @@ -96,7 +96,8 @@ class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): cost_fn_str=cost_fn_str) try: - weight = getattr(FLAGS, "%s_weight" % cost_fn.__name__) + flag_name = "%s_weight" % cost_fn.__name__ + weight = getattr(FLAGS, flag_name) except AttributeError: raise exception.SchedulerWeightFlagNotFound( flag_name=flag_name) -- cgit From 933587d821797037bb765e9d0bd5ea063c07785b Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Wed, 3 Aug 2011 14:47:35 -0400 Subject: fix duplicate function name --- nova/tests/test_instance_types_extra_specs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/test_instance_types_extra_specs.py b/nova/tests/test_instance_types_extra_specs.py index 393ed1e36..205601277 100644 --- a/nova/tests/test_instance_types_extra_specs.py +++ b/nova/tests/test_instance_types_extra_specs.py @@ -136,7 +136,7 @@ class InstanceTypeExtraSpecsTestCase(test.TestCase): "m1.small") self.assertEquals(instance_type['extra_specs'], {}) - def test_instance_type_get_with_extra_specs(self): + def test_instance_type_get_by_flavor_id_with_extra_specs(self): instance_type = db.api.instance_type_get_by_flavor_id( context.get_admin_context(), 105) -- cgit From 98307324d7b1cce2f7312d1195c9674f0e0323b6 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Wed, 3 Aug 2011 14:49:14 -0400 Subject: use correct exception name --- nova/virt/vmwareapi/vif.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/vmwareapi/vif.py b/nova/virt/vmwareapi/vif.py index b3e43b209..fb6548b34 100644 --- a/nova/virt/vmwareapi/vif.py +++ b/nova/virt/vmwareapi/vif.py @@ -63,7 +63,7 @@ class VMWareVlanBridgeDriver(VIFDriver): vswitch_associated = network_utils.get_vswitch_for_vlan_interface( session, vlan_interface) if vswitch_associated is None: - raise exception.SwicthNotFoundForNetworkAdapter( + raise exception.SwitchNotFoundForNetworkAdapter( adapter=vlan_interface) # Check whether bridge already exists and retrieve the the ref of the # network whose name_label is "bridge" -- cgit From 4aada43cc0d1a3cb32094d6d6be1eb662f01d063 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Wed, 3 Aug 2011 14:51:47 -0400 Subject: use an existing exception --- nova/virt/xenapi/vm_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 60ef0df43..8a715af35 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -794,7 +794,7 @@ def get_vdi_for_vm_safely(session, vm_ref): else: num_vdis = len(vdi_refs) if num_vdis != 1: - raise exception.Exception(_("Unexpected number of VDIs" + raise exception.Error(_("Unexpected number of VDIs" "(%(num_vdis)s) found" " for VM %(vm_ref)s") % locals()) -- cgit From 6203ae5d5b285d62bd2c1bf1f2f11d3b64b53511 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Wed, 3 Aug 2011 15:05:57 -0400 Subject: pep8 fixes --- nova/scheduler/zone_aware_scheduler.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index 8440421e1..297922570 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -264,8 +264,8 @@ class ZoneAwareScheduler(driver.Scheduler): """ if topic != "compute": - raise NotImplementedError(_("Zone Aware Scheduler only understands " - "Compute nodes (for now)")) + raise NotImplementedError(_("Zone Aware Scheduler only understands" + " Compute nodes (for now)")) num_instances = request_spec.get('num_instances', 1) instance_type = request_spec['instance_type'] -- cgit From a5add8b30c96ad1d83eebcd9756b0f358c9cb725 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Wed, 3 Aug 2011 15:16:38 -0400 Subject: follow convention when raising exceptions --- nova/api/openstack/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index 8d7f7a405..0af39c2df 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -82,7 +82,7 @@ class APIRouter(base_wsgi.Router): super(APIRouter, self).__init__(mapper) def _setup_routes(self, mapper): - raise NotImplementedError("you must implement _setup_routes") + raise NotImplementedError(_("You must implement _setup_routes.")) def _setup_base_routes(self, mapper, version): """Routes common to all versions.""" -- cgit From e7cb0c70384fc247a7f330ed7c4db5a3b0de814c Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Wed, 3 Aug 2011 16:45:46 -0400 Subject: align multi-line string --- nova/scheduler/zone_aware_scheduler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index 297922570..9ac0f94ed 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -265,7 +265,7 @@ class ZoneAwareScheduler(driver.Scheduler): if topic != "compute": raise NotImplementedError(_("Zone Aware Scheduler only understands" - " Compute nodes (for now)")) + " Compute nodes (for now)")) num_instances = request_spec.get('num_instances', 1) instance_type = request_spec['instance_type'] -- cgit From 8b0ebd90edb47be344b355334ae16d6b7715087d Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Wed, 3 Aug 2011 17:02:45 -0400 Subject: all subclasses of ComputeDriver should fully implement the interface of the destroy method. --- nova/virt/fake.py | 2 +- nova/virt/hyperv.py | 2 +- nova/virt/vmwareapi_conn.py | 2 +- nova/virt/xenapi_conn.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 2898f23a4..b7396d61c 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -293,7 +293,7 @@ class FakeConnection(driver.ComputeDriver): """ pass - def destroy(self, instance, network_info): + def destroy(self, instance, network_info, cleanup=True): key = instance.name if key in self.instances: del self.instances[key] diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py index 315f635c7..1e4cce10c 100644 --- a/nova/virt/hyperv.py +++ b/nova/virt/hyperv.py @@ -373,7 +373,7 @@ class HyperVConnection(driver.ComputeDriver): raise exception.InstanceNotFound(instance_id=instance.id) self._set_vm_state(instance.name, 'Reboot') - def destroy(self, instance): + def destroy(self, instance, network_info, cleanup=True): """Destroy the VM. Also destroy the associated VHD disk files""" LOG.debug(_("Got request to destroy vm %s"), instance.name) vm = self._lookup(instance.name) diff --git a/nova/virt/vmwareapi_conn.py b/nova/virt/vmwareapi_conn.py index ce57847b2..10d80c0a0 100644 --- a/nova/virt/vmwareapi_conn.py +++ b/nova/virt/vmwareapi_conn.py @@ -136,7 +136,7 @@ class VMWareESXConnection(driver.ComputeDriver): """Reboot VM instance.""" self._vmops.reboot(instance, network_info) - def destroy(self, instance, network_info): + def destroy(self, instance, network_info, cleanup=True): """Destroy VM instance.""" self._vmops.destroy(instance, network_info) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 042bf2b02..571638496 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -216,7 +216,7 @@ class XenAPIConnection(driver.ComputeDriver): """ self._vmops.inject_file(instance, b64_path, b64_contents) - def destroy(self, instance, network_info): + def destroy(self, instance, network_info, cleanup=True): """Destroy VM instance""" self._vmops.destroy(instance, network_info) -- cgit From 634fe881223a7ea8e04b3054b39724207153be5b Mon Sep 17 00:00:00 2001 From: Tushar Patil Date: Wed, 3 Aug 2011 15:03:34 -0700 Subject: Initial version --- nova/api/openstack/contrib/security_groups.py | 489 +++++++++++++ nova/api/openstack/extensions.py | 11 +- nova/db/api.py | 5 + nova/exception.py | 4 + .../api/openstack/contrib/test_security_groups.py | 761 +++++++++++++++++++++ nova/tests/api/openstack/test_extensions.py | 6 +- 6 files changed, 1271 insertions(+), 5 deletions(-) create mode 100644 nova/api/openstack/contrib/security_groups.py create mode 100644 nova/tests/api/openstack/contrib/test_security_groups.py diff --git a/nova/api/openstack/contrib/security_groups.py b/nova/api/openstack/contrib/security_groups.py new file mode 100644 index 000000000..39f1959e0 --- /dev/null +++ b/nova/api/openstack/contrib/security_groups.py @@ -0,0 +1,489 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The security groups extension.""" + +import netaddr +import urllib +from webob import exc +import webob + +from nova import compute +from nova import db +from nova import exception +from nova import flags +from nova import log as logging +from nova.api.openstack import common +from nova.api.openstack import extensions +from nova.api.openstack import wsgi + + +from xml.dom import minidom + + +LOG = logging.getLogger("nova.api.contrib.security_groups") +FLAGS = flags.FLAGS + + +class SecurityGroupController(object): + """The Security group API controller for the OpenStack API.""" + + def __init__(self): + self.compute_api = compute.API() + super(SecurityGroupController, self).__init__() + + def _format_security_group_rule(self, context, rule): + r = {} + r['id'] = rule.id + r['parent_group_id'] = rule.parent_group_id + r['ip_protocol'] = rule.protocol + r['from_port'] = rule.from_port + r['to_port'] = rule.to_port + r['group'] = {} + r['ip_range'] = {} + if rule.group_id: + source_group = db.security_group_get(context, rule.group_id) + r['group'] = {'name': source_group.name, + 'tenant_id': source_group.project_id} + else: + r['ip_range'] = {'cidr': rule.cidr} + return r + + def _format_security_group(self, context, group): + g = {} + g['id'] = group.id + g['description'] = group.description + g['name'] = group.name + g['tenant_id'] = group.project_id + g['rules'] = [] + for rule in group.rules: + r = self._format_security_group_rule(context, rule) + g['rules'] += [r] + return g + + def show(self, req, id): + """Return data about the given security group.""" + context = req.environ['nova.context'] + try: + id = int(id) + security_group = db.security_group_get(context, id) + except ValueError: + msg = _("Security group id is not integer") + return exc.HTTPBadRequest(explanation=msg) + except exception.NotFound as exp: + return exc.HTTPNotFound(explanation=unicode(exp)) + + return {'security_group': self._format_security_group(context, + security_group)} + + def delete(self, req, id): + """Delete a security group.""" + context = req.environ['nova.context'] + try: + id = int(id) + security_group = db.security_group_get(context, id) + except ValueError: + msg = _("Security group id is not integer") + return exc.HTTPBadRequest(explanation=msg) + except exception.NotFound as exp: + return exc.HTTPNotFound(explanation=unicode(exp)) + + LOG.audit(_("Delete security group %s"), id, context=context) + db.security_group_destroy(context, security_group.id) + + return exc.HTTPAccepted() + + def index(self, req): + """Returns a list of security groups""" + context = req.environ['nova.context'] + + self.compute_api.ensure_default_security_group(context) + if context.is_admin: + groups = db.security_group_get_all(context) + else: + groups = db.security_group_get_by_project(context, + context.project_id) + + limited_list = common.limited(groups, req) + result = [self._format_security_group(context, group) + for group in limited_list] + + return {'security_groups': + list(sorted(result, + key=lambda k: (k['tenant_id'], k['name'])))} + + def create(self, req, body): + """Creates a new security group.""" + context = req.environ['nova.context'] + if not body: + return exc.HTTPUnprocessableEntity() + + security_group = body.get('security_group', None) + + if security_group is None: + return exc.HTTPUnprocessableEntity() + + group_name = security_group.get('name', None) + group_description = security_group.get('description', None) + + self._validate_security_group_name(group_name) + self._validate_security_group_description(group_description) + + LOG.audit(_("Create Security Group %s"), group_name, context=context) + self.compute_api.ensure_default_security_group(context) + if db.security_group_exists(context, context.project_id, group_name): + msg = _('Security group %s already exists') % group_name + raise exc.HTTPBadRequest(explanation=msg) + + group = {'user_id': context.user_id, + 'project_id': context.project_id, + 'name': group_name, + 'description': group_description} + group_ref = db.security_group_create(context, group) + + return {'security_group': self._format_security_group(context, + group_ref)} + + def _validate_security_group_name(self, value): + if value is None: + msg = _("Security group name is mandatory") + raise exc.HTTPBadRequest(explanation=msg) + + if not isinstance(value, basestring): + msg = _("Security group name is not a string or unicode") + raise exc.HTTPBadRequest(explanation=msg) + + if value.strip() == '': + msg = _("Security group name is an empty string") + raise exc.HTTPBadRequest(explanation=msg) + + if len(value.strip()) > 255: + msg = _("Security group name should not be greater " + "than 255 characters") + raise exc.HTTPBadRequest(explanation=msg) + + def _validate_security_group_description(self, value): + if value is None: + msg = _("Security group description is mandatory") + raise exc.HTTPBadRequest(explanation=msg) + + if not isinstance(value, basestring): + msg = _("Security group description is not a string or unicode") + raise exc.HTTPBadRequest(explanation=msg) + + if value.strip() == '': + msg = _("Security group description is an empty string") + raise exc.HTTPBadRequest(explanation=msg) + + if len(value.strip()) > 255: + msg = _("Security group description should not be " + "greater than 255 characters") + raise exc.HTTPBadRequest(explanation=msg) + + +class SecurityGroupRulesController(SecurityGroupController): + + def create(self, req, body): + context = req.environ['nova.context'] + + if not body: + raise exc.HTTPUnprocessableEntity() + + if not 'security_group_rule' in body: + raise exc.HTTPUnprocessableEntity() + + self.compute_api.ensure_default_security_group(context) + + sg_rule = body['security_group_rule'] + parent_group_id = sg_rule.get('parent_group_id', None) + try: + parent_group_id = int(parent_group_id) + security_group = db.security_group_get(context, parent_group_id) + except ValueError: + msg = _("Parent group id is not integer") + return exc.HTTPBadRequest(explanation=msg) + except exception.NotFound as exp: + msg = _("Security group (%s) not found") % parent_group_id + return exc.HTTPNotFound(explanation=msg) + + msg = "Authorize security group ingress %s" + LOG.audit(_(msg), security_group['name'], context=context) + + try: + values = self._rule_args_to_dict(context, + to_port=sg_rule.get('to_port'), + from_port=sg_rule.get('from_port'), + parent_group_id=sg_rule.get('parent_group_id'), + ip_protocol=sg_rule.get('ip_protocol'), + cidr=sg_rule.get('cidr'), + group_id=sg_rule.get('group_id')) + except Exception as exp: + raise exc.HTTPBadRequest(explanation=unicode(exp)) + + if values is None: + msg = _("Not enough parameters to build a " + "valid rule.") + raise exc.HTTPBadRequest(explanation=msg) + + values['parent_group_id'] = security_group.id + + if self._security_group_rule_exists(security_group, values): + msg = _('This rule already exists in group %s') % parent_group_id + raise exc.HTTPBadRequest(explanation=msg) + + security_group_rule = db.security_group_rule_create(context, values) + + self.compute_api.trigger_security_group_rules_refresh(context, + security_group_id=security_group['id']) + + return {'security_group_rule': self._format_security_group_rule( + context, + security_group_rule)} + + def _security_group_rule_exists(self, security_group, values): + """Indicates whether the specified rule values are already + defined in the given security group. + """ + for rule in security_group.rules: + if 'group_id' in values: + if rule['group_id'] == values['group_id']: + return True + else: + is_duplicate = True + for key in ('cidr', 'from_port', 'to_port', 'protocol'): + if rule[key] != values[key]: + is_duplicate = False + break + if is_duplicate: + return True + return False + + def _rule_args_to_dict(self, context, to_port=None, from_port=None, + parent_group_id=None, ip_protocol=None, + cidr=None, group_id=None): + values = {} + + if group_id: + try: + parent_group_id = int(parent_group_id) + group_id = int(group_id) + except ValueError: + msg = _("Parent or group id is not integer") + raise exception.InvalidInput(reason=msg) + + if parent_group_id == group_id: + msg = _("Parent group id and group id cannot be same") + raise exception.InvalidInput(reason=msg) + + values['group_id'] = group_id + #check if groupId exists + db.security_group_get(context, group_id) + elif cidr: + # If this fails, it throws an exception. This is what we want. + try: + cidr = urllib.unquote(cidr).decode() + netaddr.IPNetwork(cidr) + except Exception: + raise exception.InvalidCidr(cidr=cidr) + values['cidr'] = cidr + else: + values['cidr'] = '0.0.0.0/0' + + if ip_protocol and from_port and to_port: + + try: + from_port = int(from_port) + to_port = int(to_port) + except ValueError: + raise exception.InvalidPortRange(from_port=from_port, + to_port=to_port) + ip_protocol = str(ip_protocol) + if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']: + raise exception.InvalidIpProtocol(protocol=ip_protocol) + if ((min(from_port, to_port) < -1) or + (max(from_port, to_port) > 65535)): + raise exception.InvalidPortRange(from_port=from_port, + to_port=to_port) + + values['protocol'] = ip_protocol + values['from_port'] = from_port + values['to_port'] = to_port + else: + # If cidr based filtering, protocol and ports are mandatory + if 'cidr' in values: + return None + + return values + + def delete(self, req, id): + context = req.environ['nova.context'] + + self.compute_api.ensure_default_security_group(context) + try: + id = int(id) + rule = db.security_group_rule_get(context, id) + except ValueError: + msg = _("Rule id is not integer") + return exc.HTTPBadRequest(explanation=msg) + except exception.NotFound as exp: + msg = _("Rule (%s) not found") % id + return exc.HTTPNotFound(explanation=msg) + + group_id = rule.parent_group_id + self.compute_api.ensure_default_security_group(context) + + security_group = db.security_group_get(context, group_id) + if not security_group: + raise exception.SecurityGroupNotFound(security_group_id=group_id) + + msg = _("Revoke security group ingress %s") + LOG.audit(_(msg), security_group['name'], context=context) + + db.security_group_rule_destroy(context, rule['id']) + self.compute_api.trigger_security_group_rules_refresh(context, + security_group_id=security_group['id']) + + return exc.HTTPAccepted() + + +class Security_groups(extensions.ExtensionDescriptor): + def get_name(self): + return "SecurityGroups" + + def get_alias(self): + return "security_groups" + + def get_description(self): + return "Security group support" + + def get_namespace(self): + return "http://docs.openstack.org/ext/securitygroups/api/v1.1" + + def get_updated(self): + return "2011-07-21T00:00:00+00:00" + + def get_resources(self): + resources = [] + + metadata = _get_metadata() + body_serializers = { + 'application/xml': wsgi.XMLDictSerializer(metadata=metadata, + xmlns=wsgi.XMLNS_V11), + } + serializer = wsgi.ResponseSerializer(body_serializers, None) + + body_deserializers = { + 'application/xml': SecurityGroupXMLDeserializer(), + } + deserializer = wsgi.RequestDeserializer(body_deserializers) + + res = extensions.ResourceExtension('security_groups', + controller=SecurityGroupController(), + deserializer=deserializer, + serializer=serializer) + + resources.append(res) + + body_deserializers = { + 'application/xml': SecurityGroupRulesXMLDeserializer(), + } + deserializer = wsgi.RequestDeserializer(body_deserializers) + + res = extensions.ResourceExtension('security_group_rules', + controller=SecurityGroupRulesController(), + deserializer=deserializer, + serializer=serializer) + resources.append(res) + return resources + + +class SecurityGroupXMLDeserializer(wsgi.MetadataXMLDeserializer): + """ + Deserializer to handle xml-formatted security group requests. + """ + def create(self, string): + """Deserialize an xml-formatted security group create request""" + dom = minidom.parseString(string) + security_group = {} + sg_node = self.find_first_child_named(dom, + 'security_group') + if sg_node is not None: + if sg_node.hasAttribute('name'): + security_group['name'] = sg_node.getAttribute('name') + desc_node = self.find_first_child_named(sg_node, + "description") + if desc_node: + security_group['description'] = self.extract_text(desc_node) + return {'body': {'security_group': security_group}} + + +class SecurityGroupRulesXMLDeserializer(wsgi.MetadataXMLDeserializer): + """ + Deserializer to handle xml-formatted security group requests. + """ + + def create(self, string): + """Deserialize an xml-formatted security group create request""" + dom = minidom.parseString(string) + security_group_rule = self._extract_security_group_rule(dom) + return {'body': {'security_group_rule': security_group_rule}} + + def _extract_security_group_rule(self, node): + """Marshal the security group rule attribute of a parsed request""" + sg_rule = {} + sg_rule_node = self.find_first_child_named(node, + 'security_group_rule') + if sg_rule_node is not None: + ip_protocol_node = self.find_first_child_named(sg_rule_node, + "ip_protocol") + if ip_protocol_node is not None: + sg_rule['ip_protocol'] = self.extract_text(ip_protocol_node) + + from_port_node = self.find_first_child_named(sg_rule_node, + "from_port") + if from_port_node is not None: + sg_rule['from_port'] = self.extract_text(from_port_node) + + to_port_node = self.find_first_child_named(sg_rule_node, "to_port") + if to_port_node is not None: + sg_rule['to_port'] = self.extract_text(to_port_node) + + parent_group_id_node = self.find_first_child_named(sg_rule_node, + "parent_group_id") + if parent_group_id_node is not None: + sg_rule['parent_group_id'] = self.extract_text( + parent_group_id_node) + + group_id_node = self.find_first_child_named(sg_rule_node, + "group_id") + if group_id_node is not None: + sg_rule['group_id'] = self.extract_text(group_id_node) + + cidr_node = self.find_first_child_named(sg_rule_node, "cidr") + if cidr_node is not None: + sg_rule['cidr'] = self.extract_text(cidr_node) + + return sg_rule + + +def _get_metadata(): + metadata = { + "attributes": { + "security_group": ["id", "tenant_id", "name"], + "rule": ["id", "parent_group_id"], + "security_group_rule": ["id", "parent_group_id"], + } + } + return metadata diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py index cc889703e..15b3cfae4 100644 --- a/nova/api/openstack/extensions.py +++ b/nova/api/openstack/extensions.py @@ -265,9 +265,13 @@ class ExtensionMiddleware(base_wsgi.Middleware): for resource in ext_mgr.get_resources(): LOG.debug(_('Extended resource: %s'), resource.collection) + if resource.serializer is None: + resource.serializer = serializer + mapper.resource(resource.collection, resource.collection, controller=wsgi.Resource( - resource.controller, serializer=serializer), + resource.controller, resource.deserializer, + resource.serializer), collection=resource.collection_actions, member=resource.member_actions, parent_resource=resource.parent) @@ -460,12 +464,15 @@ class ResourceExtension(object): """Add top level resources to the OpenStack API in nova.""" def __init__(self, collection, controller, parent=None, - collection_actions={}, member_actions={}): + collection_actions={}, member_actions={}, + deserializer=None, serializer=None): self.collection = collection self.controller = controller self.parent = parent self.collection_actions = collection_actions self.member_actions = member_actions + self.deserializer = deserializer + self.serializer = serializer class ExtensionsXMLSerializer(wsgi.XMLDictSerializer): diff --git a/nova/db/api.py b/nova/db/api.py index 47308bdba..e189dfc16 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -1096,6 +1096,11 @@ def security_group_rule_destroy(context, security_group_rule_id): return IMPL.security_group_rule_destroy(context, security_group_rule_id) +def security_group_rule_get(context, security_group_rule_id): + """Gets a security group rule.""" + return IMPL.security_group_rule_get(context, security_group_rule_id) + + ################### diff --git a/nova/exception.py b/nova/exception.py index 68e6ac937..535410bd6 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -198,6 +198,10 @@ class InvalidContentType(Invalid): message = _("Invalid content type %(content_type)s.") +class InvalidCidr(Invalid): + message = _("Invalid cidr %(cidr)s.") + + class InstanceNotRunning(Invalid): message = _("Instance %(instance_id)s is not running.") diff --git a/nova/tests/api/openstack/contrib/test_security_groups.py b/nova/tests/api/openstack/contrib/test_security_groups.py new file mode 100644 index 000000000..eaef413a7 --- /dev/null +++ b/nova/tests/api/openstack/contrib/test_security_groups.py @@ -0,0 +1,761 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import unittest +import webob +from xml.dom import minidom + +from nova import test +from nova.api.openstack.contrib import security_groups +from nova.tests.api.openstack import fakes + + +def _get_create_request_json(body_dict): + req = webob.Request.blank('/v1.1/security_groups') + req.headers['Content-Type'] = 'application/json' + req.method = 'POST' + req.body = json.dumps(body_dict) + return req + + +def _create_security_group_json(security_group): + body_dict = _create_security_group_request_dict(security_group) + request = _get_create_request_json(body_dict) + response = request.get_response(fakes.wsgi_app()) + return response + + +def _create_security_group_request_dict(security_group): + sg = {} + if security_group is not None: + name = security_group.get('name', None) + description = security_group.get('description', None) + if name: + sg['name'] = security_group['name'] + if description: + sg['description'] = security_group['description'] + return {'security_group': sg} + + +class TestSecurityGroups(test.TestCase): + def setUp(self): + super(TestSecurityGroups, self).setUp() + + def tearDown(self): + super(TestSecurityGroups, self).tearDown() + + def _create_security_group_request_dict(self, security_group): + sg = {} + if security_group is not None: + name = security_group.get('name', None) + description = security_group.get('description', None) + if name: + sg['name'] = security_group['name'] + if description: + sg['description'] = security_group['description'] + return {'security_group': sg} + + def _format_create_xml_request_body(self, body_dict): + sg = body_dict['security_group'] + body_parts = [] + body_parts.extend([ + '', + '' % (sg['name'])]) + if 'description' in sg: + body_parts.append('%s' + % sg['description']) + body_parts.append('') + return ''.join(body_parts) + + def _get_create_request_xml(self, body_dict): + req = webob.Request.blank('/v1.1/security_groups') + req.headers['Content-Type'] = 'application/xml' + req.content_type = 'application/xml' + req.accept = 'application/xml' + req.method = 'POST' + req.body = self._format_create_xml_request_body(body_dict) + return req + + def _create_security_group_xml(self, security_group): + body_dict = self._create_security_group_request_dict(security_group) + request = self._get_create_request_xml(body_dict) + response = request.get_response(fakes.wsgi_app()) + return response + + def _delete_security_group(self, id): + request = webob.Request.blank('/v1.1/security_groups/%s' + % id) + request.method = 'DELETE' + response = request.get_response(fakes.wsgi_app()) + return response + + def test_create_security_group_json(self): + security_group = {} + security_group['name'] = "test" + security_group['description'] = "group-description" + response = _create_security_group_json(security_group) + res_dict = json.loads(response.body) + self.assertEqual(res_dict['security_group']['name'], "test") + self.assertEqual(res_dict['security_group']['description'], + "group-description") + self.assertEquals(response.status_int, 200) + + def test_create_security_group_xml(self): + security_group = {} + security_group['name'] = "test" + security_group['description'] = "group-description" + response = \ + self._create_security_group_xml(security_group) + + self.assertEquals(response.status_int, 200) + dom = minidom.parseString(response.body) + sg = dom.childNodes[0] + self.assertEquals(sg.nodeName, 'security_group') + self.assertEqual(security_group['name'], sg.getAttribute('name')) + + def test_create_security_group_with_no_name_json(self): + security_group = {} + security_group['description'] = "group-description" + response = _create_security_group_json(security_group) + self.assertEquals(response.status_int, 400) + + def test_create_security_group_with_no_description_json(self): + security_group = {} + security_group['name'] = "test" + response = _create_security_group_json(security_group) + self.assertEquals(response.status_int, 400) + + def test_create_security_group_with_blank_name_json(self): + security_group = {} + security_group['name'] = "" + security_group['description'] = "group-description" + response = _create_security_group_json(security_group) + self.assertEquals(response.status_int, 400) + + def test_create_security_group_with_whitespace_name_json(self): + security_group = {} + security_group['name'] = " " + security_group['description'] = "group-description" + response = _create_security_group_json(security_group) + self.assertEquals(response.status_int, 400) + + def test_create_security_group_with_blank_description_json(self): + security_group = {} + security_group['name'] = "test" + security_group['description'] = "" + response = _create_security_group_json(security_group) + self.assertEquals(response.status_int, 400) + + def test_create_security_group_with_whitespace_description_json(self): + security_group = {} + security_group['name'] = "name" + security_group['description'] = " " + response = _create_security_group_json(security_group) + self.assertEquals(response.status_int, 400) + + def test_create_security_group_with_duplicate_name_json(self): + security_group = {} + security_group['name'] = "test" + security_group['description'] = "group-description" + response = _create_security_group_json(security_group) + + self.assertEquals(response.status_int, 200) + response = _create_security_group_json(security_group) + self.assertEquals(response.status_int, 400) + + def test_create_security_group_with_no_body_json(self): + request = _get_create_request_json(body_dict=None) + response = request.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 422) + + def test_create_security_group_with_no_security_group(self): + body_dict = {} + body_dict['no-securityGroup'] = None + request = _get_create_request_json(body_dict) + response = request.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 422) + + def test_create_security_group_above_255_characters_name_json(self): + security_group = {} + security_group['name'] = ("1234567890123456" + "1234567890123456789012345678901234567890" + "1234567890123456789012345678901234567890" + "1234567890123456789012345678901234567890" + "1234567890123456789012345678901234567890" + "1234567890123456789012345678901234567890" + "1234567890123456789012345678901234567890") + security_group['description'] = "group-description" + response = _create_security_group_json(security_group) + + self.assertEquals(response.status_int, 400) + + def test_create_security_group_above_255_characters_description_json(self): + security_group = {} + security_group['name'] = "test" + security_group['description'] = ("1234567890123456" + "1234567890123456789012345678901234567890" + "1234567890123456789012345678901234567890" + "1234567890123456789012345678901234567890" + "1234567890123456789012345678901234567890" + "1234567890123456789012345678901234567890" + "1234567890123456789012345678901234567890") + response = _create_security_group_json(security_group) + self.assertEquals(response.status_int, 400) + + def test_create_security_group_non_string_name_json(self): + security_group = {} + security_group['name'] = 12 + security_group['description'] = "group-description" + response = _create_security_group_json(security_group) + self.assertEquals(response.status_int, 400) + + def test_create_security_group_non_string_description_json(self): + security_group = {} + security_group['name'] = "test" + security_group['description'] = 12 + response = _create_security_group_json(security_group) + self.assertEquals(response.status_int, 400) + + def test_get_security_group_list(self): + security_group = {} + security_group['name'] = "test" + security_group['description'] = "group-description" + response = _create_security_group_json(security_group) + + req = webob.Request.blank('/v1.1/security_groups') + req.headers['Content-Type'] = 'application/json' + req.method = 'GET' + response = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(response.body) + + expected = {'security_groups': [ + {'id': 1, + 'name':"default", + 'tenant_id': "fake", + "description":"default", + "rules": [] + }, + ] + } + expected['security_groups'].append( + { + 'id': 2, + 'name': "test", + 'tenant_id': "fake", + "description": "group-description", + "rules": [] + } + ) + self.assertEquals(response.status_int, 200) + self.assertEquals(res_dict, expected) + + def test_get_security_group_by_id(self): + security_group = {} + security_group['name'] = "test" + security_group['description'] = "group-description" + response = _create_security_group_json(security_group) + + res_dict = json.loads(response.body) + req = webob.Request.blank('/v1.1/security_groups/%s' % + res_dict['security_group']['id']) + req.headers['Content-Type'] = 'application/json' + req.method = 'GET' + response = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(response.body) + + expected = { + 'security_group': { + 'id': 2, + 'name': "test", + 'tenant_id': "fake", + 'description': "group-description", + 'rules': [] + } + } + self.assertEquals(response.status_int, 200) + self.assertEquals(res_dict, expected) + + def test_get_security_group_by_invalid_id(self): + req = webob.Request.blank('/v1.1/security_groups/invalid') + req.headers['Content-Type'] = 'application/json' + req.method = 'GET' + response = req.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 400) + + def test_get_security_group_by_non_existing_id(self): + req = webob.Request.blank('/v1.1/security_groups/111111111') + req.headers['Content-Type'] = 'application/json' + req.method = 'GET' + response = req.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 404) + + def test_delete_security_group_by_id(self): + security_group = {} + security_group['name'] = "test" + security_group['description'] = "group-description" + response = _create_security_group_json(security_group) + security_group = json.loads(response.body)['security_group'] + response = self._delete_security_group(security_group['id']) + self.assertEquals(response.status_int, 202) + + response = self._delete_security_group(security_group['id']) + self.assertEquals(response.status_int, 404) + + def test_delete_security_group_by_invalid_id(self): + response = self._delete_security_group('invalid') + self.assertEquals(response.status_int, 400) + + def test_delete_security_group_by_non_existing_id(self): + response = self._delete_security_group(11111111) + self.assertEquals(response.status_int, 404) + + +class TestSecurityGroupRules(test.TestCase): + def setUp(self): + super(TestSecurityGroupRules, self).setUp() + security_group = {} + security_group['name'] = "authorize-revoke" + security_group['description'] = ("Security group created for " + " authorize-revoke testing") + response = _create_security_group_json(security_group) + security_group = json.loads(response.body) + self.parent_security_group = security_group['security_group'] + + rules = { + "security_group_rule": { + "ip_protocol": "tcp", + "from_port": "22", + "to_port": "22", + "parent_group_id": self.parent_security_group['id'], + "cidr": "10.0.0.0/24" + } + } + res = self._create_security_group_rule_json(rules) + self.assertEquals(res.status_int, 200) + self.security_group_rule = json.loads(res.body)['security_group_rule'] + + def tearDown(self): + super(TestSecurityGroupRules, self).tearDown() + + def _create_security_group_rule_json(self, rules): + request = webob.Request.blank('/v1.1/security_group_rules') + request.headers['Content-Type'] = 'application/json' + request.method = 'POST' + request.body = json.dumps(rules) + response = request.get_response(fakes.wsgi_app()) + return response + + def _delete_security_group_rule(self, id): + request = webob.Request.blank('/v1.1/security_group_rules/%s' + % id) + request.method = 'DELETE' + response = request.get_response(fakes.wsgi_app()) + return response + + def test_create_by_cidr_json(self): + rules = { + "security_group_rule": { + "ip_protocol": "tcp", + "from_port": "22", + "to_port": "22", + "parent_group_id": 2, + "cidr": "10.2.3.124/24" + } + } + + response = self._create_security_group_rule_json(rules) + security_group_rule = json.loads(response.body)['security_group_rule'] + self.assertEquals(response.status_int, 200) + self.assertNotEquals(security_group_rule['id'], 0) + self.assertEquals(security_group_rule['parent_group_id'], 2) + self.assertEquals(security_group_rule['ip_range']['cidr'], + "10.2.3.124/24") + + def test_create_by_group_id_json(self): + rules = { + "security_group_rule": { + "ip_protocol": "tcp", + "from_port": "22", + "to_port": "22", + "group_id": "1", + "parent_group_id": "%s" + % self.parent_security_group['id'], + } + } + + response = self._create_security_group_rule_json(rules) + self.assertEquals(response.status_int, 200) + security_group_rule = json.loads(response.body)['security_group_rule'] + self.assertNotEquals(security_group_rule['id'], 0) + self.assertEquals(security_group_rule['parent_group_id'], 2) + + def test_create_add_existing_rules_json(self): + rules = { + "security_group_rule": { + "ip_protocol": "tcp", + "from_port": "22", + "to_port": "22", + "cidr": "10.0.0.0/24", + "parent_group_id": "%s" % self.parent_security_group['id'], + } + } + + response = self._create_security_group_rule_json(rules) + self.assertEquals(response.status_int, 400) + + def test_create_with_no_body_json(self): + request = webob.Request.blank('/v1.1/security_group_rules') + request.headers['Content-Type'] = 'application/json' + request.method = 'POST' + request.body = json.dumps(None) + response = request.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 422) + + def test_create_with_no_security_group_rule_in_body_json(self): + request = webob.Request.blank('/v1.1/security_group_rules') + request.headers['Content-Type'] = 'application/json' + request.method = 'POST' + body_dict = {'test': "test"} + request.body = json.dumps(body_dict) + response = request.get_response(fakes.wsgi_app()) + self.assertEquals(response.status_int, 422) + + def test_create_with_invalid_parent_group_id_json(self): + rules = { + "security_group_rule": { + "ip_protocol": "tcp", + "from_port": "22", + "to_port": "22", + "parent_group_id": "invalid" + } + } + + response = self._create_security_group_rule_json(rules) + self.assertEquals(response.status_int, 400) + + def test_create_with_non_existing_parent_group_id_json(self): + rules = { + "security_group_rule": { + "ip_protocol": "tcp", + "from_port": "22", + "to_port": "22", + "group_id": "invalid", + "parent_group_id": "1111111111111" + } + } + + response = self._create_security_group_rule_json(rules) + self.assertEquals(response.status_int, 404) + + def test_create_with_invalid_protocol_json(self): + rules = { + "security_group_rule": { + "ip_protocol": "invalid-protocol", + "from_port": "22", + "to_port": "22", + "cidr": "10.2.2.0/24", + "parent_group_id": "%s" % self.parent_security_group['id'], + } + } + + response = self._create_security_group_rule_json(rules) + self.assertEquals(response.status_int, 400) + + def test_create_with_no_protocol_json(self): + rules = { + "security_group_rule": { + "from_port": "22", + "to_port": "22", + "cidr": "10.2.2.0/24", + "parent_group_id": "%s" % self.parent_security_group['id'], + } + } + + response = self._create_security_group_rule_json(rules) + self.assertEquals(response.status_int, 400) + + def test_create_with_invalid_from_port_json(self): + rules = { + "security_group_rule": { + "ip_protocol": "tcp", + "from_port": "666666", + "to_port": "22", + "cidr": "10.2.2.0/24", + "parent_group_id": "%s" % self.parent_security_group['id'], + } + } + + response = self._create_security_group_rule_json(rules) + self.assertEquals(response.status_int, 400) + + def test_create_with_invalid_to_port_json(self): + rules = { + "security_group_rule": { + "ip_protocol": "tcp", + "from_port": "22", + "to_port": "666666", + "cidr": "10.2.2.0/24", + "parent_group_id": "%s" % self.parent_security_group['id'], + } + } + + response = self._create_security_group_rule_json(rules) + self.assertEquals(response.status_int, 400) + + def test_create_with_non_numerical_from_port_json(self): + rules = { + "security_group_rule": { + "ip_protocol": "tcp", + "from_port": "invalid", + "to_port": "22", + "cidr": "10.2.2.0/24", + "parent_group_id": "%s" % self.parent_security_group['id'], + } + } + + response = self._create_security_group_rule_json(rules) + self.assertEquals(response.status_int, 400) + + def test_create_with_non_numerical_to_port_json(self): + rules = { + "security_group_rule": { + "ip_protocol": "tcp", + "from_port": "22", + "to_port": "invalid", + "cidr": "10.2.2.0/24", + "parent_group_id": "%s" % self.parent_security_group['id'], + } + } + + response = self._create_security_group_rule_json(rules) + self.assertEquals(response.status_int, 400) + + def test_create_with_no_to_port_json(self): + rules = { + "security_group_rule": { + "ip_protocol": "tcp", + "from_port": "22", + "cidr": "10.2.2.0/24", + "parent_group_id": "%s" % self.parent_security_group['id'], + } + } + + response = self._create_security_group_rule_json(rules) + self.assertEquals(response.status_int, 400) + + def test_create_with_invalid_cidr_json(self): + rules = { + "security_group_rule": { + "ip_protocol": "tcp", + "from_port": "22", + "to_port": "22", + "cidr": "10.2.22222.0/24", + "parent_group_id": "%s" % self.parent_security_group['id'], + } + } + + response = self._create_security_group_rule_json(rules) + self.assertEquals(response.status_int, 400) + + def test_create_with_no_cidr_group_json(self): + rules = { + "security_group_rule": { + "ip_protocol": "tcp", + "from_port": "22", + "to_port": "22", + "parent_group_id": "%s" % self.parent_security_group['id'], + } + } + + response = self._create_security_group_rule_json(rules) + security_group_rule = json.loads(response.body)['security_group_rule'] + self.assertEquals(response.status_int, 200) + self.assertNotEquals(security_group_rule['id'], 0) + self.assertEquals(security_group_rule['parent_group_id'], + self.parent_security_group['id']) + self.assertEquals(security_group_rule['ip_range']['cidr'], + "0.0.0.0/0") + + def test_create_with_invalid_group_id_json(self): + rules = { + "security_group_rule": { + "ip_protocol": "tcp", + "from_port": "22", + "to_port": "22", + "group_id": "invalid", + "parent_group_id": "%s" % self.parent_security_group['id'], + } + } + + response = self._create_security_group_rule_json(rules) + self.assertEquals(response.status_int, 400) + + def test_create_with_empty_group_id_json(self): + rules = { + "security_group_rule": { + "ip_protocol": "tcp", + "from_port": "22", + "to_port": "22", + "group_id": "invalid", + "parent_group_id": "%s" % self.parent_security_group['id'], + } + } + + response = self._create_security_group_rule_json(rules) + self.assertEquals(response.status_int, 400) + + def test_create_with_invalid_group_id_json(self): + rules = { + "security_group_rule": { + "ip_protocol": "tcp", + "from_port": "22", + "to_port": "22", + "group_id": "222222", + "parent_group_id": "%s" % self.parent_security_group['id'], + } + } + + response = self._create_security_group_rule_json(rules) + self.assertEquals(response.status_int, 400) + + def test_create_rule_with_same_group_parent_id_json(self): + rules = { + "security_group_rule": { + "ip_protocol": "tcp", + "from_port": "22", + "to_port": "22", + "group_id": "%s" % self.parent_security_group['id'], + "parent_group_id": "%s" % self.parent_security_group['id'], + } + } + + response = self._create_security_group_rule_json(rules) + self.assertEquals(response.status_int, 400) + + def test_delete(self): + response = self._delete_security_group_rule( + self.security_group_rule['id']) + self.assertEquals(response.status_int, 202) + + response = self._delete_security_group_rule( + self.security_group_rule['id']) + self.assertEquals(response.status_int, 404) + + def test_delete_invalid_rule_id(self): + response = self._delete_security_group_rule('invalid') + self.assertEquals(response.status_int, 400) + + def test_delete_non_existing_rule_id(self): + response = self._delete_security_group_rule(22222222222222) + self.assertEquals(response.status_int, 404) + + +class TestSecurityGroupRulesXMLDeserializer(unittest.TestCase): + + def setUp(self): + self.deserializer = security_groups.SecurityGroupRulesXMLDeserializer() + + def test_create_request(self): + serial_request = """ + + 12 + 22 + 22 + + tcp + 10.0.0.0/24 +""" + request = self.deserializer.deserialize(serial_request, 'create') + expected = { + "security_group_rule": { + "parent_group_id": "12", + "from_port": "22", + "to_port": "22", + "ip_protocol": "tcp", + "group_id": "", + "cidr": "10.0.0.0/24", + }, + } + self.assertEquals(request['body'], expected) + + def test_create_no_protocol_request(self): + serial_request = """ + + 12 + 22 + 22 + + 10.0.0.0/24 +""" + request = self.deserializer.deserialize(serial_request, 'create') + expected = { + "security_group_rule": { + "parent_group_id": "12", + "from_port": "22", + "to_port": "22", + "group_id": "", + "cidr": "10.0.0.0/24", + }, + } + self.assertEquals(request['body'], expected) + + +class TestSecurityGroupXMLDeserializer(unittest.TestCase): + + def setUp(self): + self.deserializer = security_groups.SecurityGroupXMLDeserializer() + + def test_create_request(self): + serial_request = """ + + test +""" + request = self.deserializer.deserialize(serial_request, 'create') + expected = { + "security_group": { + "name": "test", + "description": "test", + }, + } + self.assertEquals(request['body'], expected) + + def test_create_no_description_request(self): + serial_request = """ + +""" + request = self.deserializer.deserialize(serial_request, 'create') + expected = { + "security_group": { + "name": "test", + }, + } + self.assertEquals(request['body'], expected) + + def test_create_no_name_request(self): + serial_request = """ + +test +""" + request = self.deserializer.deserialize(serial_request, 'create') + expected = { + "security_group": { + "description": "test", + }, + } + self.assertEquals(request['body'], expected) diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index 47c37225c..98a3a72d4 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -98,7 +98,7 @@ class ExtensionControllerTest(test.TestCase): names = [x['name'] for x in data['extensions']] names.sort() self.assertEqual(names, ["FlavorExtraSpecs", "Floating_ips", - "Fox In Socks", "Hosts", "Multinic", "Volumes"]) + "Fox In Socks", "Hosts", "Multinic", "SecurityGroups", "Volumes"]) # Make sure that at least Fox in Sox is correct. (fox_ext,) = [ @@ -109,7 +109,7 @@ class ExtensionControllerTest(test.TestCase): 'updated': '2011-01-22T13:25:27-06:00', 'description': 'The Fox In Socks Extension', 'alias': 'FOXNSOX', - 'links': [], + 'links': [] }, ) @@ -145,7 +145,7 @@ class ExtensionControllerTest(test.TestCase): # Make sure we have all the extensions. exts = root.findall('{0}extension'.format(NS)) - self.assertEqual(len(exts), 6) + self.assertEqual(len(exts), 7) # Make sure that at least Fox in Sox is correct. (fox_ext,) = [x for x in exts if x.get('alias') == 'FOXNSOX'] -- cgit From b94eb7bf4fd71a23cacc20def2b5a47dad053b56 Mon Sep 17 00:00:00 2001 From: Tushar Patil Date: Wed, 3 Aug 2011 16:42:23 -0700 Subject: Remove whitespaces from name and description before creating security group --- nova/api/openstack/contrib/security_groups.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nova/api/openstack/contrib/security_groups.py b/nova/api/openstack/contrib/security_groups.py index 39f1959e0..7da046b8f 100644 --- a/nova/api/openstack/contrib/security_groups.py +++ b/nova/api/openstack/contrib/security_groups.py @@ -140,6 +140,8 @@ class SecurityGroupController(object): self._validate_security_group_name(group_name) self._validate_security_group_description(group_description) + group_name = group_name.strip() + group_description = group_description.strip() LOG.audit(_("Create Security Group %s"), group_name, context=context) self.compute_api.ensure_default_security_group(context) -- cgit From 23f6ec84a93638e880f60bec00db8587b9e3e8d8 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Wed, 3 Aug 2011 20:26:37 -0400 Subject: fix pylint W0102 errors. --- nova/api/openstack/extensions.py | 6 +++++- nova/compute/api.py | 19 +++++++++++++++---- nova/objectstore/s3server.py | 5 ++++- nova/scheduler/manager.py | 4 +++- nova/tests/api/openstack/test_limits.py | 5 ++++- nova/tests/test_auth.py | 7 ++++++- nova/tests/test_compute.py | 10 ++++++++-- nova/tests/test_libvirt.py | 5 ++++- nova/virt/vmwareapi/io_util.py | 5 ++++- nova/virt/vmwareapi/vim_util.py | 10 ++++++++-- nova/virt/vmwareapi/vmware_images.py | 6 +++++- 11 files changed, 66 insertions(+), 16 deletions(-) diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py index cc889703e..b070dd61a 100644 --- a/nova/api/openstack/extensions.py +++ b/nova/api/openstack/extensions.py @@ -460,7 +460,11 @@ class ResourceExtension(object): """Add top level resources to the OpenStack API in nova.""" def __init__(self, collection, controller, parent=None, - collection_actions={}, member_actions={}): + collection_actions=None, member_actions=None): + if not collection_actions: + collection_actions = {} + if not member_actions: + member_actions = {} self.collection = collection self.controller = controller self.parent = parent diff --git a/nova/compute/api.py b/nova/compute/api.py index aae16d1da..fe62b50ce 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -121,8 +121,10 @@ class API(base.Base): if len(content) > content_limit: raise quota.QuotaError(code="OnsetFileContentLimitExceeded") - def _check_metadata_properties_quota(self, context, metadata={}): + def _check_metadata_properties_quota(self, context, metadata=None): """Enforce quota limits on metadata properties.""" + if not metadata: + metadata = {} num_metadata = len(metadata) quota_metadata = quota.allowed_metadata_items(context, num_metadata) if quota_metadata < num_metadata: @@ -148,7 +150,7 @@ class API(base.Base): min_count=None, max_count=None, display_name='', display_description='', key_name=None, key_data=None, security_group='default', - availability_zone=None, user_data=None, metadata={}, + availability_zone=None, user_data=None, metadata=None, injected_files=None, admin_password=None, zone_blob=None, reservation_id=None): """Verify all the input parameters regardless of the provisioning @@ -160,6 +162,8 @@ class API(base.Base): min_count = 1 if not max_count: max_count = min_count + if not metadata: + metadata = {} num_instances = quota.allowed_instances(context, max_count, instance_type) @@ -395,12 +399,16 @@ class API(base.Base): min_count=None, max_count=None, display_name='', display_description='', key_name=None, key_data=None, security_group='default', - availability_zone=None, user_data=None, metadata={}, + availability_zone=None, user_data=None, metadata=None, injected_files=None, admin_password=None, zone_blob=None, reservation_id=None, block_device_mapping=None): """Provision the instances by passing the whole request to the Scheduler for execution. Returns a Reservation ID related to the creation of all of these instances.""" + + if not metadata: + metadata = {} + num_instances, base_options, image = self._check_create_parameters( context, instance_type, image_href, kernel_id, ramdisk_id, @@ -424,7 +432,7 @@ class API(base.Base): min_count=None, max_count=None, display_name='', display_description='', key_name=None, key_data=None, security_group='default', - availability_zone=None, user_data=None, metadata={}, + availability_zone=None, user_data=None, metadata=None, injected_files=None, admin_password=None, zone_blob=None, reservation_id=None, block_device_mapping=None): """ @@ -439,6 +447,9 @@ class API(base.Base): Returns a list of instance dicts. """ + if not metadata: + metadata = {} + num_instances, base_options, image = self._check_create_parameters( context, instance_type, image_href, kernel_id, ramdisk_id, diff --git a/nova/objectstore/s3server.py b/nova/objectstore/s3server.py index 76025a1e3..1ab47b034 100644 --- a/nova/objectstore/s3server.py +++ b/nova/objectstore/s3server.py @@ -155,7 +155,10 @@ class BaseRequestHandler(object): self.finish('\n' + ''.join(parts)) - def _render_parts(self, value, parts=[]): + def _render_parts(self, value, parts=None): + if not parts: + parts = [] + if isinstance(value, basestring): parts.append(utils.xhtml_escape(value)) elif isinstance(value, int) or isinstance(value, long): diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index 749d66cad..c8b16b622 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -69,8 +69,10 @@ class SchedulerManager(manager.Manager): return self.zone_manager.get_zone_capabilities(context) def update_service_capabilities(self, context=None, service_name=None, - host=None, capabilities={}): + host=None, capabilities=None): """Process a capability update from a service node.""" + if not capability: + capability = {} self.zone_manager.update_service_capabilities(service_name, host, capabilities) diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py index 6c3d531e3..1dc3c3a17 100644 --- a/nova/tests/api/openstack/test_limits.py +++ b/nova/tests/api/openstack/test_limits.py @@ -819,12 +819,15 @@ class FakeHttplibConnection(object): self.app = app self.host = host - def request(self, method, path, body="", headers={}): + def request(self, method, path, body="", headers=None): """ Requests made via this connection actually get translated and routed into our WSGI app, we then wait for the response and turn it back into an `httplib.HTTPResponse`. """ + if not headers: + headers = {} + req = webob.Request.blank(path) req.method = method req.headers = headers diff --git a/nova/tests/test_auth.py b/nova/tests/test_auth.py index 7c0f783bb..0fb2fdcc6 100644 --- a/nova/tests/test_auth.py +++ b/nova/tests/test_auth.py @@ -62,7 +62,12 @@ class project_generator(object): class user_and_project_generator(object): - def __init__(self, manager, user_state={}, project_state={}): + def __init__(self, manager, user_state=None, project_state=None): + if not user_state: + user_state = {} + if not project_state: + project_state = {} + self.manager = manager if 'name' not in user_state: user_state['name'] = 'test1' diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 879e4b9cb..9be349d14 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -73,8 +73,11 @@ class ComputeTestCase(test.TestCase): self.stubs.Set(nova.image.fake._FakeImageService, 'show', fake_show) - def _create_instance(self, params={}): + def _create_instance(self, params=None): """Create a test instance""" + if not params: + params = {} + inst = {} inst['image_ref'] = 1 inst['reservation_id'] = 'r-fakeres' @@ -87,8 +90,11 @@ class ComputeTestCase(test.TestCase): inst.update(params) return db.instance_create(self.context, inst)['id'] - def _create_instance_type(self, params={}): + def _create_instance_type(self, params=None): """Create a test instance""" + if not params: + params = {} + context = self.context.elevated() inst = {} inst['name'] = 'm1.small' diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index c5c3151dc..aa77258cb 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -1162,8 +1162,11 @@ class NWFilterTestCase(test.TestCase): 'project_id': 'fake', 'instance_type_id': 1}) - def _create_instance_type(self, params={}): + def _create_instance_type(self, params=None): """Create a test instance""" + if not params: + params = {} + context = self.context.elevated() inst = {} inst['name'] = 'm1.small' diff --git a/nova/virt/vmwareapi/io_util.py b/nova/virt/vmwareapi/io_util.py index 2ec773b7b..409242800 100644 --- a/nova/virt/vmwareapi/io_util.py +++ b/nova/virt/vmwareapi/io_util.py @@ -68,7 +68,10 @@ class GlanceWriteThread(object): """Ensures that image data is written to in the glance client and that it is in correct ('active')state.""" - def __init__(self, input, glance_client, image_id, image_meta={}): + def __init__(self, input, glance_client, image_id, image_meta=None): + if not image_meta: + image_meta = {} + self.input = input self.glance_client = glance_client self.image_id = image_id diff --git a/nova/virt/vmwareapi/vim_util.py b/nova/virt/vmwareapi/vim_util.py index 11214231c..e03daddac 100644 --- a/nova/virt/vmwareapi/vim_util.py +++ b/nova/virt/vmwareapi/vim_util.py @@ -95,9 +95,12 @@ def build_recursive_traversal_spec(client_factory): def build_property_spec(client_factory, type="VirtualMachine", - properties_to_collect=["name"], + properties_to_collect=None, all_properties=False): """Builds the Property Spec.""" + if not properties_to_collect: + properties_to_collect = ["name"] + property_spec = client_factory.create('ns0:PropertySpec') property_spec.all = all_properties property_spec.pathSet = properties_to_collect @@ -155,8 +158,11 @@ def get_dynamic_property(vim, mobj, type, property_name): return property_value -def get_objects(vim, type, properties_to_collect=["name"], all=False): +def get_objects(vim, type, properties_to_collect=None, all=False): """Gets the list of objects of the type specified.""" + if not properties_to_collect: + properties_to_collect = ["name"] + client_factory = vim.client.factory object_spec = build_object_spec(client_factory, vim.get_service_content().rootFolder, diff --git a/nova/virt/vmwareapi/vmware_images.py b/nova/virt/vmwareapi/vmware_images.py index 70adba74f..f5f75dae2 100644 --- a/nova/virt/vmwareapi/vmware_images.py +++ b/nova/virt/vmwareapi/vmware_images.py @@ -33,11 +33,15 @@ QUEUE_BUFFER_SIZE = 10 def start_transfer(read_file_handle, data_size, write_file_handle=None, - glance_client=None, image_id=None, image_meta={}): + glance_client=None, image_id=None, image_meta=None): """Start the data transfer from the reader to the writer. Reader writes to the pipe and the writer reads from the pipe. This means that the total transfer time boils down to the slower of the read/write and not the addition of the two times.""" + + if not image_meta: + image_meta = {} + # The pipe that acts as an intermediate store of data for reader to write # to and writer to grab from. thread_safe_pipe = io_util.ThreadSafePipe(QUEUE_BUFFER_SIZE, data_size) -- cgit From 346454c7fb1156d8dff075042f1c45dbb22cded1 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Thu, 4 Aug 2011 16:02:28 -0400 Subject: Nuke hostname. We don't use it. --- bin/nova-dhcpbridge | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index 325642d52..621222d8f 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -53,7 +53,7 @@ flags.DEFINE_string('dnsmasq_interface', 'br0', 'Default Dnsmasq interface') LOG = logging.getLogger('nova.dhcpbridge') -def add_lease(mac, ip_address, _hostname, _interface): +def add_lease(mac, ip_address, _interface): """Set the IP that was assigned by the DHCP server.""" if FLAGS.fake_rabbit: LOG.debug(_("leasing ip")) @@ -67,13 +67,13 @@ def add_lease(mac, ip_address, _hostname, _interface): "args": {"address": ip_address}}) -def old_lease(mac, ip_address, hostname, interface): +def old_lease(mac, ip_address, interface): """Update just as add lease.""" - LOG.debug(_("Adopted old lease or got a change of mac/hostname")) - add_lease(mac, ip_address, hostname, interface) + LOG.debug(_("Adopted old lease or got a change of mac")) + add_lease(mac, ip_address, interface) -def del_lease(mac, ip_address, _hostname, _interface): +def del_lease(mac, ip_address, _interface): """Called when a lease expires.""" if FLAGS.fake_rabbit: LOG.debug(_("releasing ip")) @@ -115,11 +115,10 @@ def main(): if action in ['add', 'del', 'old']: mac = argv[2] ip = argv[3] - hostname = argv[4] - msg = _("Called %(action)s for mac %(mac)s with ip %(ip)s and" - " hostname %(hostname)s on interface %(interface)s") % locals() + msg = _("Called %(action)s for mac %(mac)s with ip %(ip)s" + " on interface %(interface)s") % locals() LOG.debug(msg) - globals()[action + '_lease'](mac, ip, hostname, interface) + globals()[action + '_lease'](mac, ip, interface) else: print init_leases(interface) -- cgit From 2a89883297f6b5398abb7486e9e26c12ab0fc0ec Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Fri, 5 Aug 2011 14:02:55 +0200 Subject: Remove spurious direct use of subprocess --- nova/console/xvp.py | 1 - nova/utils.py | 4 ++++ nova/virt/libvirt/connection.py | 3 +-- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/nova/console/xvp.py b/nova/console/xvp.py index 3cd287183..2d6842044 100644 --- a/nova/console/xvp.py +++ b/nova/console/xvp.py @@ -20,7 +20,6 @@ import fcntl import os import signal -import subprocess from Cheetah import Template diff --git a/nova/utils.py b/nova/utils.py index 1e2dbebb1..3a2049464 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -137,6 +137,8 @@ def execute(*cmd, **kwargs): :delay_on_retry True | False. Defaults to True. If set to True, wait a short amount of time before retrying. :attempts How many times to retry cmd. + :shell True | False. Defaults to False. If set to True, + Popen command is called shell=True. :raises exception.Error on receiving unknown arguments :raises exception.ProcessExecutionError @@ -147,6 +149,7 @@ def execute(*cmd, **kwargs): check_exit_code = kwargs.pop('check_exit_code', 0) delay_on_retry = kwargs.pop('delay_on_retry', True) attempts = kwargs.pop('attempts', 1) + shell = kwargs.pop('shell', False) if len(kwargs): raise exception.Error(_('Got unknown keyword args ' 'to utils.execute: %r') % kwargs) @@ -164,6 +167,7 @@ def execute(*cmd, **kwargs): stdin=_PIPE, stdout=_PIPE, stderr=_PIPE, + shell=shell, env=env) result = None if process_input is not None: diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 0b93a8399..ec202a4ef 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -43,7 +43,6 @@ import os import random import re import shutil -import subprocess import sys import tempfile import time @@ -684,7 +683,7 @@ class LibvirtConnection(driver.ComputeDriver): cmd = '%s/tools/ajaxterm/ajaxterm.py --command "%s" -t %s -p %s' \ % (utils.novadir(), ajaxterm_cmd, token, port) - subprocess.Popen(cmd, shell=True) + utils.execute(cmd, shell=True) return {'token': token, 'host': host, 'port': port} def get_host_ip_addr(self): -- cgit From ecdb1c87fb3904080d48f3fd3b3619f6c4a40df9 Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Fri, 5 Aug 2011 14:33:12 +0200 Subject: Add run_as_root parameter to utils.execute, uses new sudo_helper FLAG to prefix command --- nova/flags.py | 3 +++ nova/utils.py | 8 ++++++++ 2 files changed, 11 insertions(+) diff --git a/nova/flags.py b/nova/flags.py index 12c6d1356..e30c0d6be 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -392,3 +392,6 @@ DEFINE_bool('start_guests_on_host_boot', False, 'Whether to restart guests when the host reboots') DEFINE_bool('resume_guests_state_on_host_boot', False, 'Whether to start guests, that was running before the host reboot') + +DEFINE_string('sudo_helper', 'sudo', + 'Command prefix to use for running commands as root') diff --git a/nova/utils.py b/nova/utils.py index 3a2049464..0820a2bde 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -28,6 +28,7 @@ import netaddr import os import random import re +import shlex import socket import struct import sys @@ -139,6 +140,9 @@ def execute(*cmd, **kwargs): :attempts How many times to retry cmd. :shell True | False. Defaults to False. If set to True, Popen command is called shell=True. + :run_as_root True | False. Defaults to False. If set to True, + the command is prefixed by the command specified + in the sudo_helper FLAG. :raises exception.Error on receiving unknown arguments :raises exception.ProcessExecutionError @@ -150,9 +154,13 @@ def execute(*cmd, **kwargs): delay_on_retry = kwargs.pop('delay_on_retry', True) attempts = kwargs.pop('attempts', 1) shell = kwargs.pop('shell', False) + run_as_root = kwargs.pop('run_as_root', False) if len(kwargs): raise exception.Error(_('Got unknown keyword args ' 'to utils.execute: %r') % kwargs) + + if run_as_root: + cmd = shlex.split(FLAGS.sudo_helper) + cmd cmd = map(str, cmd) while attempts > 0: -- cgit From 38756955417e5c2fad7c8848252c5a2334912e02 Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Fri, 5 Aug 2011 16:23:48 +0200 Subject: Removed most direct sudo calls, make them use run_as_root=True instead --- nova/network/linux_net.py | 78 ++++++++++++++++-------------- nova/tests/test_libvirt.py | 10 ++-- nova/tests/test_volume.py | 10 ++-- nova/tests/test_xenapi.py | 10 ++-- nova/virt/disk.py | 47 +++++++++--------- nova/virt/libvirt/connection.py | 7 +-- nova/virt/libvirt/vif.py | 16 ++++--- nova/virt/xenapi/vm_utils.py | 15 +++--- nova/virt/xenapi/volume_utils.py | 4 +- nova/volume/driver.py | 101 +++++++++++++++++++++------------------ 10 files changed, 161 insertions(+), 137 deletions(-) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 8ace07884..3c62c75cd 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -296,14 +296,14 @@ class IptablesManager(object): for cmd, tables in s: for table in tables: - current_table, _ = self.execute('sudo', - '%s-save' % (cmd,), + current_table, _ = self.execute('%s-save' % (cmd,), '-t', '%s' % (table,), + run_as_root=True, attempts=5) current_lines = current_table.split('\n') new_filter = self._modify_rules(current_lines, tables[table]) - self.execute('sudo', '%s-restore' % (cmd,), + self.execute('%s-restore' % (cmd,), run_as_root=True, process_input='\n'.join(new_filter), attempts=5) @@ -396,21 +396,22 @@ def init_host(): def bind_floating_ip(floating_ip, check_exit_code=True): """Bind ip to public interface.""" - _execute('sudo', 'ip', 'addr', 'add', floating_ip, + _execute('ip', 'addr', 'add', floating_ip, 'dev', FLAGS.public_interface, - check_exit_code=check_exit_code) + run_as_root=True, check_exit_code=check_exit_code) def unbind_floating_ip(floating_ip): """Unbind a public ip from public interface.""" - _execute('sudo', 'ip', 'addr', 'del', floating_ip, - 'dev', FLAGS.public_interface) + _execute('ip', 'addr', 'del', floating_ip, + 'dev', FLAGS.public_interface, run_as_root=True) def ensure_metadata_ip(): """Sets up local metadata ip.""" - _execute('sudo', 'ip', 'addr', 'add', '169.254.169.254/32', - 'scope', 'link', 'dev', 'lo', check_exit_code=False) + _execute('ip', 'addr', 'add', '169.254.169.254/32', + 'scope', 'link', 'dev', 'lo', + run_as_root=True, check_exit_code=False) def ensure_vlan_forward(public_ip, port, private_ip): @@ -464,9 +465,11 @@ def ensure_vlan(vlan_num, bridge_interface): interface = 'vlan%s' % vlan_num if not _device_exists(interface): LOG.debug(_('Starting VLAN inteface %s'), interface) - _execute('sudo', 'vconfig', 'set_name_type', 'VLAN_PLUS_VID_NO_PAD') - _execute('sudo', 'vconfig', 'add', bridge_interface, vlan_num) - _execute('sudo', 'ip', 'link', 'set', interface, 'up') + _execute('vconfig', 'set_name_type', 'VLAN_PLUS_VID_NO_PAD', + run_as_root=True) + _execute('vconfig', 'add', bridge_interface, vlan_num, + run_as_root=True) + _execute('ip', 'link', 'set', interface, 'up', run_as_root=True) return interface @@ -487,58 +490,63 @@ def ensure_bridge(bridge, interface, net_attrs=None): """ if not _device_exists(bridge): LOG.debug(_('Starting Bridge interface for %s'), interface) - _execute('sudo', 'brctl', 'addbr', bridge) - _execute('sudo', 'brctl', 'setfd', bridge, 0) + _execute('brctl', 'addbr', bridge, run_as_root=True) + _execute('brctl', 'setfd', bridge, 0, run_as_root=True) # _execute('sudo brctl setageing %s 10' % bridge) - _execute('sudo', 'brctl', 'stp', bridge, 'off') - _execute('sudo', 'ip', 'link', 'set', bridge, 'up') + _execute('brctl', 'stp', bridge, 'off', run_as_root=True) + _execute('ip', 'link', 'set', bridge, 'up', run_as_root=True) if net_attrs: # NOTE(vish): The ip for dnsmasq has to be the first address on the # bridge for it to respond to reqests properly suffix = net_attrs['cidr'].rpartition('/')[2] - out, err = _execute('sudo', 'ip', 'addr', 'add', + out, err = _execute('ip', 'addr', 'add', '%s/%s' % (net_attrs['dhcp_server'], suffix), 'brd', net_attrs['broadcast'], 'dev', bridge, + run_as_root=True, check_exit_code=False) if err and err != 'RTNETLINK answers: File exists\n': raise exception.Error('Failed to add ip: %s' % err) if(FLAGS.use_ipv6): - _execute('sudo', 'ip', '-f', 'inet6', 'addr', + _execute('ip', '-f', 'inet6', 'addr', 'change', net_attrs['cidr_v6'], - 'dev', bridge) + 'dev', bridge, run_as_root=True) # NOTE(vish): If the public interface is the same as the # bridge, then the bridge has to be in promiscuous # to forward packets properly. if(FLAGS.public_interface == bridge): - _execute('sudo', 'ip', 'link', 'set', - 'dev', bridge, 'promisc', 'on') + _execute('ip', 'link', 'set', + 'dev', bridge, 'promisc', 'on', run_as_root=True) if interface: # NOTE(vish): This will break if there is already an ip on the # interface, so we move any ips to the bridge gateway = None - out, err = _execute('sudo', 'route', '-n') + out, err = _execute('route', '-n', run_as_root=True) for line in out.split('\n'): fields = line.split() if fields and fields[0] == '0.0.0.0' and fields[-1] == interface: gateway = fields[1] - _execute('sudo', 'route', 'del', 'default', 'gw', gateway, - 'dev', interface, check_exit_code=False) - out, err = _execute('sudo', 'ip', 'addr', 'show', 'dev', interface, - 'scope', 'global') + _execute('route', 'del', 'default', 'gw', gateway, + 'dev', interface, + check_exit_code=False, run_as_root=True) + out, err = _execute('ip', 'addr', 'show', 'dev', interface, + 'scope', 'global', run_as_root=True) for line in out.split('\n'): fields = line.split() if fields and fields[0] == 'inet': params = fields[1:-1] - _execute(*_ip_bridge_cmd('del', params, fields[-1])) - _execute(*_ip_bridge_cmd('add', params, bridge)) + _execute(*_ip_bridge_cmd('del', params, fields[-1]), + run_as_root=True) + _execute(*_ip_bridge_cmd('add', params, bridge), + run_as_root=True) if gateway: - _execute('sudo', 'route', 'add', 'default', 'gw', gateway) - out, err = _execute('sudo', 'brctl', 'addif', bridge, interface, - check_exit_code=False) + _execute('route', 'add', 'default', 'gw', gateway, + run_as_root=True) + out, err = _execute('brctl', 'addif', bridge, interface, + check_exit_code=False, run_as_root=True) if (err and err != "device %s is already a member of a bridge; can't " "enslave it to bridge %s.\n" % (interface, bridge)): @@ -602,7 +610,7 @@ def update_dhcp(context, network_ref): check_exit_code=False) if conffile in out: try: - _execute('sudo', 'kill', '-HUP', pid) + _execute('kill', '-HUP', pid, run_as_root=True) return except Exception as exc: # pylint: disable=W0703 LOG.debug(_('Hupping dnsmasq threw %s'), exc) @@ -646,7 +654,7 @@ interface %s % pid, check_exit_code=False) if conffile in out: try: - _execute('sudo', 'kill', pid) + _execute('kill', pid, run_as_root=True) except Exception as exc: # pylint: disable=W0703 LOG.debug(_('killing radvd threw %s'), exc) else: @@ -732,7 +740,7 @@ def _stop_dnsmasq(network): if pid: try: - _execute('sudo', 'kill', '-TERM', pid) + _execute('kill', '-TERM', pid, run_as_root=True) except Exception as exc: # pylint: disable=W0703 LOG.debug(_('Killing dnsmasq threw %s'), exc) @@ -788,7 +796,7 @@ def _ra_pid_for(bridge): def _ip_bridge_cmd(action, params, device): """Build commands to add/del ips to bridges/devices.""" - cmd = ['sudo', 'ip', 'addr', action] + cmd = ['ip', 'addr', action] cmd.extend(params) cmd.extend(['dev', device]) return cmd diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index f8b866985..1baf373bf 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -884,18 +884,18 @@ class IptablesFirewallTestCase(test.TestCase): # self.fw.add_instance(instance_ref) def fake_iptables_execute(*cmd, **kwargs): process_input = kwargs.get('process_input', None) - if cmd == ('sudo', 'ip6tables-save', '-t', 'filter'): + if cmd == ('ip6tables-save', '-t', 'filter'): return '\n'.join(self.in6_filter_rules), None - if cmd == ('sudo', 'iptables-save', '-t', 'filter'): + if cmd == ('iptables-save', '-t', 'filter'): return '\n'.join(self.in_filter_rules), None - if cmd == ('sudo', 'iptables-save', '-t', 'nat'): + if cmd == ('iptables-save', '-t', 'nat'): return '\n'.join(self.in_nat_rules), None - if cmd == ('sudo', 'iptables-restore'): + if cmd == ('iptables-restore',): lines = process_input.split('\n') if '*filter' in lines: self.out_rules = lines return '', '' - if cmd == ('sudo', 'ip6tables-restore'): + if cmd == ('ip6tables-restore',): lines = process_input.split('\n') if '*filter' in lines: self.out6_rules = lines diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py index c0f89601f..7888b6b0b 100644 --- a/nova/tests/test_volume.py +++ b/nova/tests/test_volume.py @@ -414,8 +414,9 @@ class ISCSITestCase(DriverTestCase): self.mox.StubOutWithMock(self.volume.driver, '_execute') for i in volume_id_list: tid = db.volume_get_iscsi_target_num(self.context, i) - self.volume.driver._execute("sudo", "ietadm", "--op", "show", - "--tid=%(tid)d" % locals()) + self.volume.driver._execute("ietadm", "--op", "show", + "--tid=%(tid)d" % locals(), + run_as_root=True) self.stream.truncate(0) self.mox.ReplayAll() @@ -433,8 +434,9 @@ class ISCSITestCase(DriverTestCase): # the first vblade process isn't running tid = db.volume_get_iscsi_target_num(self.context, volume_id_list[0]) self.mox.StubOutWithMock(self.volume.driver, '_execute') - self.volume.driver._execute("sudo", "ietadm", "--op", "show", - "--tid=%(tid)d" % locals()).AndRaise( + self.volume.driver._execute("ietadm", "--op", "show", + "--tid=%(tid)d" % locals(), + run_as_root=True).AndRaise( exception.ProcessExecutionError()) self.mox.ReplayAll() diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 39ab23d9b..84480d26f 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -548,8 +548,8 @@ class XenAPIVMTestCase(test.TestCase): return '', '' fake_utils.fake_execute_set_repliers([ - # Capture the sudo tee .../etc/network/interfaces command - (r'(sudo\s+)?tee.*interfaces', _tee_handler), + # Capture the tee .../etc/network/interfaces command + (r'tee.*interfaces', _tee_handler), ]) self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE, glance_stubs.FakeGlance.IMAGE_KERNEL, @@ -592,9 +592,9 @@ class XenAPIVMTestCase(test.TestCase): return '', '' fake_utils.fake_execute_set_repliers([ - (r'(sudo\s+)?mount', _mount_handler), - (r'(sudo\s+)?umount', _umount_handler), - (r'(sudo\s+)?tee.*interfaces', _tee_handler)]) + (r'mount', _mount_handler), + (r'umount', _umount_handler), + (r'tee.*interfaces', _tee_handler)]) self._test_spawn(1, 2, 3, check_injection=True) # tee must not run in this case, where an injection-capable diff --git a/nova/virt/disk.py b/nova/virt/disk.py index f8aea1f34..19f3ec185 100644 --- a/nova/virt/disk.py +++ b/nova/virt/disk.py @@ -73,7 +73,7 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False): try: if not partition is None: # create partition - out, err = utils.execute('sudo', 'kpartx', '-a', device) + out, err = utils.execute('kpartx', '-a', device, run_as_root=True) if err: raise exception.Error(_('Failed to load partition: %s') % err) mapped_device = '/dev/mapper/%sp%s' % (device.split('/')[-1], @@ -90,14 +90,14 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False): mapped_device) # Configure ext2fs so that it doesn't auto-check every N boots - out, err = utils.execute('sudo', 'tune2fs', - '-c', 0, '-i', 0, mapped_device) + out, err = utils.execute('tune2fs', '-c', 0, '-i', 0, + mapped_device, run_as_root=True) tmpdir = tempfile.mkdtemp() try: # mount loopback to dir - out, err = utils.execute( - 'sudo', 'mount', mapped_device, tmpdir) + out, err = utils.execute('mount', mapped_device, tmpdir, + run_as_root=True) if err: raise exception.Error(_('Failed to mount filesystem: %s') % err) @@ -106,14 +106,14 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False): inject_data_into_fs(tmpdir, key, net, utils.execute) finally: # unmount device - utils.execute('sudo', 'umount', mapped_device) + utils.execute('umount', mapped_device, run_as_root=True) finally: # remove temporary directory utils.execute('rmdir', tmpdir) finally: if not partition is None: # remove partitions - utils.execute('sudo', 'kpartx', '-d', device) + utils.execute('kpartx', '-d', device, run_as_root=True) finally: _unlink_device(device, nbd) @@ -128,7 +128,7 @@ def setup_container(image, container_dir=None, nbd=False): """ try: device = _link_device(image, nbd) - utils.execute('sudo', 'mount', device, container_dir) + utils.execute('mount', device, container_dir, run_as_root=True) except Exception, exn: LOG.exception(_('Failed to mount filesystem: %s'), exn) _unlink_device(device, nbd) @@ -144,9 +144,9 @@ def destroy_container(target, instance, nbd=False): """ try: container_dir = '%s/rootfs' % target - utils.execute('sudo', 'umount', container_dir) + utils.execute('umount', container_dir, run_as_root=True) finally: - out, err = utils.execute('sudo', 'losetup', '-a') + out, err = utils.execute('losetup', '-a', run_as_root=True) for loop in out.splitlines(): if instance['name'] in loop: device = loop.split(loop, ':') @@ -157,7 +157,7 @@ def _link_device(image, nbd): """Link image to device using loopback or nbd""" if nbd: device = _allocate_device() - utils.execute('sudo', 'qemu-nbd', '-c', device, image) + utils.execute('qemu-nbd', '-c', device, image, run_as_root=True) # NOTE(vish): this forks into another process, so give it a chance # to set up before continuuing for i in xrange(FLAGS.timeout_nbd): @@ -166,7 +166,8 @@ def _link_device(image, nbd): time.sleep(1) raise exception.Error(_('nbd device %s did not show up') % device) else: - out, err = utils.execute('sudo', 'losetup', '--find', '--show', image) + out, err = utils.execute('losetup', '--find', '--show', image, + run_as_root=True) if err: raise exception.Error(_('Could not attach image to loopback: %s') % err) @@ -176,10 +177,10 @@ def _link_device(image, nbd): def _unlink_device(device, nbd): """Unlink image from device using loopback or nbd""" if nbd: - utils.execute('sudo', 'qemu-nbd', '-d', device) + utils.execute('qemu-nbd', '-d', device, run_as_root=True) _free_device(device) else: - utils.execute('sudo', 'losetup', '--detach', device) + utils.execute('losetup', '--detach', device, run_as_root=True) _DEVICES = ['/dev/nbd%s' % i for i in xrange(FLAGS.max_nbd_devices)] @@ -220,12 +221,12 @@ def _inject_key_into_fs(key, fs, execute=None): fs is the path to the base of the filesystem into which to inject the key. """ sshdir = os.path.join(fs, 'root', '.ssh') - utils.execute('sudo', 'mkdir', '-p', sshdir) # existing dir doesn't matter - utils.execute('sudo', 'chown', 'root', sshdir) - utils.execute('sudo', 'chmod', '700', sshdir) + utils.execute('mkdir', '-p', sshdir, run_as_root=True) + utils.execute('chown', 'root', sshdir, run_as_root=True) + utils.execute('chmod', '700', sshdir, run_as_root=True) keyfile = os.path.join(sshdir, 'authorized_keys') - utils.execute('sudo', 'tee', '-a', keyfile, - process_input='\n' + key.strip() + '\n') + utils.execute('tee', '-a', keyfile, + process_input='\n' + key.strip() + '\n', run_as_root=True) def _inject_net_into_fs(net, fs, execute=None): @@ -234,8 +235,8 @@ def _inject_net_into_fs(net, fs, execute=None): net is the contents of /etc/network/interfaces. """ netdir = os.path.join(os.path.join(fs, 'etc'), 'network') - utils.execute('sudo', 'mkdir', '-p', netdir) # existing dir doesn't matter - utils.execute('sudo', 'chown', 'root:root', netdir) - utils.execute('sudo', 'chmod', 755, netdir) + utils.execute('mkdir', '-p', netdir, run_as_root=True) + utils.execute('chown', 'root:root', netdir, run_as_root=True) + utils.execute('chmod', 755, netdir, run_as_root=True) netfile = os.path.join(netdir, 'interfaces') - utils.execute('sudo', 'tee', netfile, process_input=net) + utils.execute('tee', netfile, process_input=net, run_as_root=True) diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index ec202a4ef..5053daf8f 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -607,9 +607,10 @@ class LibvirtConnection(driver.ComputeDriver): if virsh_output.startswith('/dev/'): LOG.info(_("cool, it's a device")) - out, err = utils.execute('sudo', 'dd', + out, err = utils.execute('dd', "if=%s" % virsh_output, 'iflag=nonblock', + run_as_root=True, check_exit_code=False) return out else: @@ -632,7 +633,7 @@ class LibvirtConnection(driver.ComputeDriver): console_log = os.path.join(FLAGS.instances_path, instance['name'], 'console.log') - utils.execute('sudo', 'chown', os.getuid(), console_log) + utils.execute('chown', os.getuid(), console_log, run_as_root=True) if FLAGS.libvirt_type == 'xen': # Xen is special @@ -914,7 +915,7 @@ class LibvirtConnection(driver.ComputeDriver): ' data into image %(img_id)s (%(e)s)') % locals()) if FLAGS.libvirt_type == 'uml': - utils.execute('sudo', 'chown', 'root', basepath('disk')) + utils.execute('chown', 'root', basepath('disk'), run_as_root=True) root_mount_device = 'vda' # FIXME for now. it's hard coded. local_mount_device = 'vdb' # FIXME for now. it's hard coded. diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py index 711b05bae..e243d4fa0 100644 --- a/nova/virt/libvirt/vif.py +++ b/nova/virt/libvirt/vif.py @@ -103,16 +103,18 @@ class LibvirtOpenVswitchDriver(VIFDriver): dev = "tap-%s" % vif_id iface_id = "nova-" + vif_id if not linux_net._device_exists(dev): - utils.execute('sudo', 'ip', 'tuntap', 'add', dev, 'mode', 'tap') - utils.execute('sudo', 'ip', 'link', 'set', dev, 'up') - utils.execute('sudo', 'ovs-vsctl', '--', '--may-exist', 'add-port', + utils.execute('ip', 'tuntap', 'add', dev, 'mode', 'tap', + run_as_root=True) + utils.execute('ip', 'link', 'set', dev, 'up', run_as_root=True) + utils.execute('ovs-vsctl', '--', '--may-exist', 'add-port', FLAGS.libvirt_ovs_bridge, dev, '--', 'set', 'Interface', dev, "external-ids:iface-id=%s" % iface_id, '--', 'set', 'Interface', dev, "external-ids:iface-status=active", '--', 'set', 'Interface', dev, - "external-ids:attached-mac=%s" % mapping['mac']) + "external-ids:attached-mac=%s" % mapping['mac'], + run_as_root=True) result = { 'script': '', @@ -126,9 +128,9 @@ class LibvirtOpenVswitchDriver(VIFDriver): vif_id = str(instance['id']) + "-" + str(network['id']) dev = "tap-%s" % vif_id try: - utils.execute('sudo', 'ovs-vsctl', 'del-port', - network['bridge'], dev) - utils.execute('sudo', 'ip', 'link', 'delete', dev) + utils.execute('ovs-vsctl', 'del-port', + network['bridge'], dev, run_as_root=True) + utils.execute('ip', 'link', 'delete', dev, run_as_root=True) except exception.ProcessExecutionError: LOG.warning(_("Failed while unplugging vif of instance '%s'"), instance['name']) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 6d2340ccd..7b79e129c 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -967,7 +967,7 @@ def _stream_disk(dev, image_type, virtual_size, image_file): offset = MBR_SIZE_BYTES _write_partition(virtual_size, dev) - utils.execute('sudo', 'chown', os.getuid(), '/dev/%s' % dev) + utils.execute('chown', os.getuid(), '/dev/%s' % dev, run_as_root=True) with open('/dev/%s' % dev, 'wb') as f: f.seek(offset) @@ -986,10 +986,11 @@ def _write_partition(virtual_size, dev): def execute(*cmd, **kwargs): return utils.execute(*cmd, **kwargs) - execute('sudo', 'parted', '--script', dest, 'mklabel', 'msdos') - execute('sudo', 'parted', '--script', dest, 'mkpart', 'primary', + execute('parted', '--script', dest, 'mklabel', 'msdos', run_as_root=True) + execute('parted', '--script', dest, 'mkpart', 'primary', '%ds' % primary_first, - '%ds' % primary_last) + '%ds' % primary_last, + run_as_root=True) LOG.debug(_('Writing partition table %s done.'), dest) @@ -1002,9 +1003,9 @@ def get_name_label_for_image(image): def _mount_filesystem(dev_path, dir): """mounts the device specified by dev_path in dir""" try: - out, err = utils.execute('sudo', 'mount', + out, err = utils.execute('mount', '-t', 'ext2,ext3', - dev_path, dir) + dev_path, dir, run_as_root=True) except exception.ProcessExecutionError as e: err = str(e) return err @@ -1056,7 +1057,7 @@ def _mounted_processing(device, key, net): disk.inject_data_into_fs(tmpdir, key, net, utils.execute) finally: - utils.execute('sudo', 'umount', dev_path) + utils.execute('umount', dev_path, run_as_root=True) else: LOG.info(_('Failed to mount filesystem (expected for ' 'non-linux instances): %s') % err) diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 7821a4f7e..5d5eb824f 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -252,10 +252,10 @@ def _get_target(volume_id): volume_id) result = (None, None) try: - (r, _e) = utils.execute('sudo', 'iscsiadm', + (r, _e) = utils.execute('iscsiadm', '-m', 'discovery', '-t', 'sendtargets', - '-p', volume_ref['host']) + '-p', volume_ref['host'], run_as_root=True) except exception.ProcessExecutionError, exc: LOG.exception(exc) else: diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 23e845deb..c99534c07 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -65,14 +65,14 @@ class VolumeDriver(object): self._execute = execute self._sync_exec = sync_exec - def _try_execute(self, *command): + def _try_execute(self, *command, **kwargs): # NOTE(vish): Volume commands can partially fail due to timing, but # running them a second time on failure will usually # recover nicely. tries = 0 while True: try: - self._execute(*command) + self._execute(*command, **kwargs) return True except exception.ProcessExecutionError: tries = tries + 1 @@ -84,24 +84,26 @@ class VolumeDriver(object): def check_for_setup_error(self): """Returns an error if prerequisites aren't met""" - out, err = self._execute('sudo', 'vgs', '--noheadings', '-o', 'name') + out, err = self._execute('vgs', '--noheadings', '-o', 'name', + run_as_root=True) volume_groups = out.split() if not FLAGS.volume_group in volume_groups: raise exception.Error(_("volume group %s doesn't exist") % FLAGS.volume_group) def _create_volume(self, volume_name, sizestr): - self._try_execute('sudo', 'lvcreate', '-L', sizestr, '-n', - volume_name, FLAGS.volume_group) + self._try_execute('lvcreate', '-L', sizestr, '-n', + volume_name, FLAGS.volume_group, run_as_root=True) def _copy_volume(self, srcstr, deststr, size_in_g): - self._execute('sudo', 'dd', 'if=%s' % srcstr, 'of=%s' % deststr, - 'count=%d' % (size_in_g * 1024), 'bs=1M') + self._execute('dd', 'if=%s' % srcstr, 'of=%s' % deststr, + 'count=%d' % (size_in_g * 1024), 'bs=1M', + run_as_root=True) def _volume_not_present(self, volume_name): path_name = '%s/%s' % (FLAGS.volume_group, volume_name) try: - self._try_execute('sudo', 'lvdisplay', path_name) + self._try_execute('lvdisplay', path_name, run_as_root=True) except Exception as e: # If the volume isn't present return True @@ -112,9 +114,10 @@ class VolumeDriver(object): # zero out old volumes to prevent data leaking between users # TODO(ja): reclaiming space should be done lazy and low priority self._copy_volume('/dev/zero', self.local_path(volume), size_in_g) - self._try_execute('sudo', 'lvremove', '-f', "%s/%s" % + self._try_execute('lvremove', '-f', "%s/%s" % (FLAGS.volume_group, - self._escape_snapshot(volume['name']))) + self._escape_snapshot(volume['name'])), + run_as_root=True) def _sizestr(self, size_in_g): if int(size_in_g) == 0: @@ -147,10 +150,11 @@ class VolumeDriver(object): # TODO(yamahata): lvm can't delete origin volume only without # deleting derived snapshots. Can we do something fancy? - out, err = self._execute('sudo', 'lvdisplay', '--noheading', + out, err = self._execute('lvdisplay', '--noheading', '-C', '-o', 'Attr', '%s/%s' % (FLAGS.volume_group, - volume['name'])) + volume['name']), + run_as_root=True) # fake_execute returns None resulting unit test error if out: out = out.strip() @@ -162,10 +166,10 @@ class VolumeDriver(object): def create_snapshot(self, snapshot): """Creates a snapshot.""" orig_lv_name = "%s/%s" % (FLAGS.volume_group, snapshot['volume_name']) - self._try_execute('sudo', 'lvcreate', '-L', + self._try_execute('lvcreate', '-L', self._sizestr(snapshot['volume_size']), '--name', self._escape_snapshot(snapshot['name']), - '--snapshot', orig_lv_name) + '--snapshot', orig_lv_name, run_as_root=True) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" @@ -233,13 +237,14 @@ class AOEDriver(VolumeDriver): blade_id) = self.db.volume_allocate_shelf_and_blade(context, volume['id']) self._try_execute( - 'sudo', 'vblade-persist', 'setup', + 'vblade-persist', 'setup', shelf_id, blade_id, FLAGS.aoe_eth_dev, "/dev/%s/%s" % (FLAGS.volume_group, - volume['name'])) + volume['name']), + run_as_root=True) # NOTE(vish): The standard _try_execute does not work here # because these methods throw errors if other # volumes on this host are in the process of @@ -248,28 +253,29 @@ class AOEDriver(VolumeDriver): # just wait a bit for the current volume to # be ready and ignore any errors. time.sleep(2) - self._execute('sudo', 'vblade-persist', 'auto', 'all', - check_exit_code=False) - self._execute('sudo', 'vblade-persist', 'start', 'all', - check_exit_code=False) + self._execute('vblade-persist', 'auto', 'all', + check_exit_code=False, run_as_root=True) + self._execute('vblade-persist', 'start', 'all', + check_exit_code=False, run_as_root=True) def remove_export(self, context, volume): """Removes an export for a logical volume.""" (shelf_id, blade_id) = self.db.volume_get_shelf_and_blade(context, volume['id']) - self._try_execute('sudo', 'vblade-persist', 'stop', - shelf_id, blade_id) - self._try_execute('sudo', 'vblade-persist', 'destroy', - shelf_id, blade_id) + self._try_execute('vblade-persist', 'stop', + shelf_id, blade_id, run_as_root=True) + self._try_execute('vblade-persist', 'destroy', + shelf_id, blade_id, run_as_root=True) def discover_volume(self, context, _volume): """Discover volume on a remote host.""" (shelf_id, blade_id) = self.db.volume_get_shelf_and_blade(context, _volume['id']) - self._execute('sudo', 'aoe-discover') - out, err = self._execute('sudo', 'aoe-stat', check_exit_code=False) + self._execute('aoe-discover', run_as_root=True) + out, err = self._execute('aoe-stat', check_exit_code=False, + run_as_root=True) device_path = 'e%(shelf_id)d.%(blade_id)d' % locals() if out.find(device_path) >= 0: return "/dev/etherd/%s" % device_path @@ -285,8 +291,8 @@ class AOEDriver(VolumeDriver): (shelf_id, blade_id) = self.db.volume_get_shelf_and_blade(context, volume_id) - cmd = ('sudo', 'vblade-persist', 'ls', '--no-header') - out, _err = self._execute(*cmd) + cmd = ('vblade-persist', 'ls', '--no-header') + out, _err = self._execute(*cmd, run_as_root=True) exported = False for line in out.split('\n'): param = line.split(' ') @@ -348,16 +354,18 @@ class ISCSIDriver(VolumeDriver): iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name']) volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name']) - self._sync_exec('sudo', 'ietadm', '--op', 'new', + self._sync_exec('ietadm', '--op', 'new', "--tid=%s" % iscsi_target, '--params', "Name=%s" % iscsi_name, + run_as_root=True, check_exit_code=False) - self._sync_exec('sudo', 'ietadm', '--op', 'new', + self._sync_exec('ietadm', '--op', 'new', "--tid=%s" % iscsi_target, '--lun=0', '--params', "Path=%s,Type=fileio" % volume_path, + run_as_root=True, check_exit_code=False) def _ensure_iscsi_targets(self, context, host): @@ -378,13 +386,13 @@ class ISCSIDriver(VolumeDriver): volume['host']) iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name']) volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name']) - self._execute('sudo', 'ietadm', '--op', 'new', + self._execute('ietadm', '--op', 'new', '--tid=%s' % iscsi_target, - '--params', 'Name=%s' % iscsi_name) - self._execute('sudo', 'ietadm', '--op', 'new', + '--params', 'Name=%s' % iscsi_name, run_as_root=True) + self._execute('ietadm', '--op', 'new', '--tid=%s' % iscsi_target, '--lun=0', '--params', - 'Path=%s,Type=fileio' % volume_path) + 'Path=%s,Type=fileio' % volume_path, run_as_root=True) def remove_export(self, context, volume): """Removes an export for a logical volume.""" @@ -399,18 +407,18 @@ class ISCSIDriver(VolumeDriver): try: # ietadm show will exit with an error # this export has already been removed - self._execute('sudo', 'ietadm', '--op', 'show', - '--tid=%s' % iscsi_target) + self._execute('ietadm', '--op', 'show', + '--tid=%s' % iscsi_target, run_as_root=True) except Exception as e: LOG.info(_("Skipping remove_export. No iscsi_target " + "is presently exported for volume: %d"), volume['id']) return - self._execute('sudo', 'ietadm', '--op', 'delete', + self._execute('ietadm', '--op', 'delete', '--tid=%s' % iscsi_target, - '--lun=0') - self._execute('sudo', 'ietadm', '--op', 'delete', - '--tid=%s' % iscsi_target) + '--lun=0', run_as_root=True) + self._execute('ietadm', '--op', 'delete', + '--tid=%s' % iscsi_target, run_as_root=True) def _do_iscsi_discovery(self, volume): #TODO(justinsb): Deprecate discovery and use stored info @@ -419,8 +427,9 @@ class ISCSIDriver(VolumeDriver): volume_name = volume['name'] - (out, _err) = self._execute('sudo', 'iscsiadm', '-m', 'discovery', - '-t', 'sendtargets', '-p', volume['host']) + (out, _err) = self._execute('iscsiadm', '-m', 'discovery', + '-t', 'sendtargets', '-p', volume['host'], + run_as_root=True) for target in out.splitlines(): if FLAGS.iscsi_ip_prefix in target and volume_name in target: return target @@ -483,10 +492,10 @@ class ISCSIDriver(VolumeDriver): return properties def _run_iscsiadm(self, iscsi_properties, iscsi_command): - (out, err) = self._execute('sudo', 'iscsiadm', '-m', 'node', '-T', + (out, err) = self._execute('iscsiadm', '-m', 'node', '-T', iscsi_properties['target_iqn'], '-p', iscsi_properties['target_portal'], - iscsi_command) + iscsi_command, run_as_root=True) LOG.debug("iscsiadm %s: stdout=%s stderr=%s" % (iscsi_command, out, err)) return (out, err) @@ -560,8 +569,8 @@ class ISCSIDriver(VolumeDriver): tid = self.db.volume_get_iscsi_target_num(context, volume_id) try: - self._execute('sudo', 'ietadm', '--op', 'show', - '--tid=%(tid)d' % locals()) + self._execute('ietadm', '--op', 'show', + '--tid=%(tid)d' % locals(), run_as_root=True) except exception.ProcessExecutionError, e: # Instances remount read-only in this case. # /etc/init.d/iscsitarget restart and rebooting nova-volume -- cgit From 2fe0c5fe95487df8827db10f38065e3c8ac3800f Mon Sep 17 00:00:00 2001 From: Tushar Patil Date: Fri, 5 Aug 2011 12:09:46 -0700 Subject: Fixed review comments --- nova/api/openstack/contrib/security_groups.py | 101 ++++++++------------- .../api/openstack/contrib/test_security_groups.py | 8 +- 2 files changed, 44 insertions(+), 65 deletions(-) diff --git a/nova/api/openstack/contrib/security_groups.py b/nova/api/openstack/contrib/security_groups.py index 7da046b8f..e2fed7965 100644 --- a/nova/api/openstack/contrib/security_groups.py +++ b/nova/api/openstack/contrib/security_groups.py @@ -45,33 +45,33 @@ class SecurityGroupController(object): super(SecurityGroupController, self).__init__() def _format_security_group_rule(self, context, rule): - r = {} - r['id'] = rule.id - r['parent_group_id'] = rule.parent_group_id - r['ip_protocol'] = rule.protocol - r['from_port'] = rule.from_port - r['to_port'] = rule.to_port - r['group'] = {} - r['ip_range'] = {} + sg_rule = {} + sg_rule['id'] = rule.id + sg_rule['parent_group_id'] = rule.parent_group_id + sg_rule['ip_protocol'] = rule.protocol + sg_rule['from_port'] = rule.from_port + sg_rule['to_port'] = rule.to_port + sg_rule['group'] = {} + sg_rule['ip_range'] = {} if rule.group_id: source_group = db.security_group_get(context, rule.group_id) - r['group'] = {'name': source_group.name, + sg_rule['group'] = {'name': source_group.name, 'tenant_id': source_group.project_id} else: - r['ip_range'] = {'cidr': rule.cidr} - return r + sg_rule['ip_range'] = {'cidr': rule.cidr} + return sg_rule def _format_security_group(self, context, group): - g = {} - g['id'] = group.id - g['description'] = group.description - g['name'] = group.name - g['tenant_id'] = group.project_id - g['rules'] = [] + security_group = {} + security_group['id'] = group.id + security_group['description'] = group.description + security_group['name'] = group.name + security_group['tenant_id'] = group.project_id + security_group['rules'] = [] for rule in group.rules: - r = self._format_security_group_rule(context, rule) - g['rules'] += [r] - return g + security_group['rules'] += [self._format_security_group_rule( + context, rule)] + return security_group def show(self, req, id): """Return data about the given security group.""" @@ -97,7 +97,7 @@ class SecurityGroupController(object): except ValueError: msg = _("Security group id is not integer") return exc.HTTPBadRequest(explanation=msg) - except exception.NotFound as exp: + except exception.SecurityGroupNotFound as exp: return exc.HTTPNotFound(explanation=unicode(exp)) LOG.audit(_("Delete security group %s"), id, context=context) @@ -138,8 +138,9 @@ class SecurityGroupController(object): group_name = security_group.get('name', None) group_description = security_group.get('description', None) - self._validate_security_group_name(group_name) - self._validate_security_group_description(group_description) + self._validate_security_group_property(group_name, "name") + self._validate_security_group_property(group_description, + "description") group_name = group_name.strip() group_description = group_description.strip() @@ -158,40 +159,21 @@ class SecurityGroupController(object): return {'security_group': self._format_security_group(context, group_ref)} - def _validate_security_group_name(self, value): - if value is None: - msg = _("Security group name is mandatory") - raise exc.HTTPBadRequest(explanation=msg) - - if not isinstance(value, basestring): - msg = _("Security group name is not a string or unicode") - raise exc.HTTPBadRequest(explanation=msg) - - if value.strip() == '': - msg = _("Security group name is an empty string") - raise exc.HTTPBadRequest(explanation=msg) - - if len(value.strip()) > 255: - msg = _("Security group name should not be greater " - "than 255 characters") - raise exc.HTTPBadRequest(explanation=msg) - - def _validate_security_group_description(self, value): - if value is None: - msg = _("Security group description is mandatory") - raise exc.HTTPBadRequest(explanation=msg) - - if not isinstance(value, basestring): - msg = _("Security group description is not a string or unicode") + def _validate_security_group_property(self, value, typ): + """ typ will be either 'name' or 'description', + depending on the caller + """ + try: + val = value.strip() + except AttributeError: + msg = _("Security group %s is not a string or unicode") % typ raise exc.HTTPBadRequest(explanation=msg) - - if value.strip() == '': - msg = _("Security group description is an empty string") + if not val: + msg = _("Security group %s cannot be empty.") % typ raise exc.HTTPBadRequest(explanation=msg) - - if len(value.strip()) > 255: - msg = _("Security group description should not be " - "greater than 255 characters") + if len(val) > 255: + msg = _("Security group %s should not be greater " + "than 255 characters.") % typ raise exc.HTTPBadRequest(explanation=msg) @@ -220,7 +202,7 @@ class SecurityGroupRulesController(SecurityGroupController): msg = _("Security group (%s) not found") % parent_group_id return exc.HTTPNotFound(explanation=msg) - msg = "Authorize security group ingress %s" + msg = _("Authorize security group ingress %s") LOG.audit(_(msg), security_group['name'], context=context) try: @@ -315,7 +297,7 @@ class SecurityGroupRulesController(SecurityGroupController): if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']: raise exception.InvalidIpProtocol(protocol=ip_protocol) if ((min(from_port, to_port) < -1) or - (max(from_port, to_port) > 65535)): + (max(from_port, to_port) > 65535)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port) @@ -345,17 +327,14 @@ class SecurityGroupRulesController(SecurityGroupController): group_id = rule.parent_group_id self.compute_api.ensure_default_security_group(context) - security_group = db.security_group_get(context, group_id) - if not security_group: - raise exception.SecurityGroupNotFound(security_group_id=group_id) msg = _("Revoke security group ingress %s") LOG.audit(_(msg), security_group['name'], context=context) db.security_group_rule_destroy(context, rule['id']) self.compute_api.trigger_security_group_rules_refresh(context, - security_group_id=security_group['id']) + security_group_id=security_group['id']) return exc.HTTPAccepted() diff --git a/nova/tests/api/openstack/contrib/test_security_groups.py b/nova/tests/api/openstack/contrib/test_security_groups.py index eaef413a7..9a86a60f8 100644 --- a/nova/tests/api/openstack/contrib/test_security_groups.py +++ b/nova/tests/api/openstack/contrib/test_security_groups.py @@ -73,10 +73,10 @@ class TestSecurityGroups(test.TestCase): sg = body_dict['security_group'] body_parts = [] body_parts.extend([ - '', - '' % (sg['name'])]) + '', + '' % (sg['name'])]) if 'description' in sg: body_parts.append('%s' % sg['description']) -- cgit From 2a329ff0734bc4413723322e289a0ac486ed7e2f Mon Sep 17 00:00:00 2001 From: Tushar Patil Date: Fri, 5 Aug 2011 12:43:27 -0700 Subject: Fixed localization review comment --- nova/api/openstack/contrib/security_groups.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/contrib/security_groups.py b/nova/api/openstack/contrib/security_groups.py index e2fed7965..d3a8e21b8 100644 --- a/nova/api/openstack/contrib/security_groups.py +++ b/nova/api/openstack/contrib/security_groups.py @@ -203,7 +203,7 @@ class SecurityGroupRulesController(SecurityGroupController): return exc.HTTPNotFound(explanation=msg) msg = _("Authorize security group ingress %s") - LOG.audit(_(msg), security_group['name'], context=context) + LOG.audit(msg, security_group['name'], context=context) try: values = self._rule_args_to_dict(context, @@ -330,7 +330,7 @@ class SecurityGroupRulesController(SecurityGroupController): security_group = db.security_group_get(context, group_id) msg = _("Revoke security group ingress %s") - LOG.audit(_(msg), security_group['name'], context=context) + LOG.audit(msg, security_group['name'], context=context) db.security_group_rule_destroy(context, rule['id']) self.compute_api.trigger_security_group_rules_refresh(context, -- cgit From 4c7a33ee907ae7abcff020c75dde8ed320fe7aeb Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Mon, 8 Aug 2011 15:30:44 -0400 Subject: assert that vmops.revert_migration is called --- nova/tests/test_xenapi.py | 25 ++++++++++++++++++------- nova/tests/xenapi/stubs.py | 4 ---- 2 files changed, 18 insertions(+), 11 deletions(-) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 8e3c88f8e..34aba140b 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -637,6 +637,24 @@ class XenAPIVMTestCase(test.TestCase): # Ensure that it will not unrescue a non-rescued instance. self.assertRaises(Exception, conn.unrescue, instance, None) + def test_revert_migration(self): + instance = self._create_instance() + + class VMOpsMock(): + + def __init__(self): + self.revert_migration_called = False + + def revert_migration(self, instance): + self.revert_migration_called = True + + stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests) + + conn = xenapi_conn.get_connection(False) + conn._vmops = VMOpsMock() + conn.revert_migration(instance) + self.assertTrue(conn._vmops.revert_migration_called) + def _create_instance(self, instance_id=1, spawn=True): """Creates and spawns a test instance.""" stubs.stubout_loopingcall_start(self.stubs) @@ -669,13 +687,6 @@ class XenAPIVMTestCase(test.TestCase): self.conn.spawn(instance, network_info) return instance - def test_revert_migration(self): - instance = self._create_instance() - stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests) - stubs.stubout_loopingcall_start(self.stubs) - conn = xenapi_conn.get_connection(False) - conn.revert_migration(instance) - class XenAPIDiffieHellmanTestCase(test.TestCase): """Unit tests for Diffie-Hellman code.""" diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 195e4d038..66c79d465 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -230,14 +230,10 @@ def stub_out_vm_methods(stubs): def fake_spawn_rescue(self, inst): inst._rescue = False - def fake_revert_migration(self, inst): - pass - stubs.Set(vmops.VMOps, "_shutdown", fake_shutdown) stubs.Set(vmops.VMOps, "_acquire_bootlock", fake_acquire_bootlock) stubs.Set(vmops.VMOps, "_release_bootlock", fake_release_bootlock) stubs.Set(vmops.VMOps, "spawn_rescue", fake_spawn_rescue) - stubs.Set(vmops.VMOps, "revert_migration", fake_revert_migration) class FakeSessionForVolumeTests(fake.SessionBase): -- cgit From 1fc7164ecaa42c33ff510ec8ef3ca92f183c96e3 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Mon, 8 Aug 2011 15:37:23 -0400 Subject: remove obsolete script from setup.py --- setup.py | 1 - 1 file changed, 1 deletion(-) diff --git a/setup.py b/setup.py index 8121e250b..118ac1abd 100644 --- a/setup.py +++ b/setup.py @@ -123,7 +123,6 @@ setup(name='nova', 'bin/nova-console', 'bin/nova-dhcpbridge', 'bin/nova-direct-api', - 'bin/nova-import-canonical-imagestore', 'bin/nova-logspool', 'bin/nova-manage', 'bin/nova-network', -- cgit From 2a8cff40af58d6d2b2fc3a818816eb2a913cccfb Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Tue, 9 Aug 2011 11:22:32 +0100 Subject: Fix ajaxterm's use of shell=True, prevent vmops.py from running its own version of utils.execute --- nova/utils.py | 4 ---- nova/virt/libvirt/connection.py | 6 +++--- nova/virt/xenapi/vmops.py | 26 ++++++++------------------ 3 files changed, 11 insertions(+), 25 deletions(-) diff --git a/nova/utils.py b/nova/utils.py index 0820a2bde..7f27c5fd5 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -138,8 +138,6 @@ def execute(*cmd, **kwargs): :delay_on_retry True | False. Defaults to True. If set to True, wait a short amount of time before retrying. :attempts How many times to retry cmd. - :shell True | False. Defaults to False. If set to True, - Popen command is called shell=True. :run_as_root True | False. Defaults to False. If set to True, the command is prefixed by the command specified in the sudo_helper FLAG. @@ -153,7 +151,6 @@ def execute(*cmd, **kwargs): check_exit_code = kwargs.pop('check_exit_code', 0) delay_on_retry = kwargs.pop('delay_on_retry', True) attempts = kwargs.pop('attempts', 1) - shell = kwargs.pop('shell', False) run_as_root = kwargs.pop('run_as_root', False) if len(kwargs): raise exception.Error(_('Got unknown keyword args ' @@ -175,7 +172,6 @@ def execute(*cmd, **kwargs): stdin=_PIPE, stdout=_PIPE, stderr=_PIPE, - shell=shell, env=env) result = None if process_input is not None: diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 5053daf8f..a01fc3317 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -681,10 +681,10 @@ class LibvirtConnection(driver.ComputeDriver): ajaxterm_cmd = 'sudo socat - %s' \ % get_pty_for_instance(instance['name']) - cmd = '%s/tools/ajaxterm/ajaxterm.py --command "%s" -t %s -p %s' \ - % (utils.novadir(), ajaxterm_cmd, token, port) + cmd = ['%s/tools/ajaxterm/ajaxterm.py' % utils.novadir(), + '--command', ajaxterm_cmd, '-t', token, '-p', port] - utils.execute(cmd, shell=True) + utils.execute(cmd) return {'token': token, 'host': host, 'port': port} def get_host_ip_addr(self): diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index a78413370..230ecd560 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -1320,12 +1320,6 @@ class VMOps(object): ######################################################################## -def _runproc(cmd): - pipe = subprocess.PIPE - return subprocess.Popen([cmd], shell=True, stdin=pipe, stdout=pipe, - stderr=pipe, close_fds=True) - - class SimpleDH(object): """ This class wraps all the functionality needed to implement @@ -1382,22 +1376,18 @@ class SimpleDH(object): mpi = M2Crypto.m2.bn_to_mpi(bn) return mpi - def _run_ssl(self, text, extra_args=None): - if not extra_args: - extra_args = '' - cmd = 'enc -aes-128-cbc -A -a -pass pass:%s -nosalt %s' % ( - self._shared, extra_args) - proc = _runproc('openssl %s' % cmd) - proc.stdin.write(text) - proc.stdin.close() - proc.wait() - err = proc.stderr.read() + def _run_ssl(self, text, decrypt=False): + cmd = ['openssl', 'aes-128-cbc', '-A', '-a', '-pass', + 'pass:%s' % self._shared, '-nosalt'] + if decrypt: + cmd.append('-d') + out, err = utils.execute(*cmd, process_input=text) if err: raise RuntimeError(_('OpenSSL error: %s') % err) - return proc.stdout.read() + return out def encrypt(self, text): return self._run_ssl(text).strip('\n') def decrypt(self, text): - return self._run_ssl(text, '-d') + return self._run_ssl(text, decrypt=True) -- cgit From ed9ea0848e4c8e8220a7f3bc175d7855c88c84d0 Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Tue, 9 Aug 2011 12:05:05 +0100 Subject: Use close_fds by default since it's good for you --- nova/utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/utils.py b/nova/utils.py index 7f27c5fd5..d691481f8 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -172,6 +172,7 @@ def execute(*cmd, **kwargs): stdin=_PIPE, stdout=_PIPE, stderr=_PIPE, + close_fds=True, env=env) result = None if process_input is not None: -- cgit From 7e810e1f266daaa63167ea8412dc0416e88f688f Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Tue, 9 Aug 2011 12:21:28 +0100 Subject: Fix usage of sudo -E and addl_env in dnsmasq/radvd calls, remove addl_env support, fix fake_execute allowed kwargs --- nova/network/linux_net.py | 63 +++++++++++++++++++---------------------------- nova/tests/fake_utils.py | 12 ++++++--- nova/utils.py | 8 +----- 3 files changed, 35 insertions(+), 48 deletions(-) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 3c62c75cd..a8af4af57 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -617,11 +617,26 @@ def update_dhcp(context, network_ref): else: LOG.debug(_('Pid %d is stale, relaunching dnsmasq'), pid) - # FLAGFILE and DNSMASQ_INTERFACE in env - env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile, - 'DNSMASQ_INTERFACE': network_ref['bridge']} - command = _dnsmasq_cmd(network_ref) - _execute(*command, addl_env=env) + cmd = ['FLAGFILE="%s"' % FLAGS.dhcpbridge_flagfile, + 'DNSMASQ_INTERFACE="%s"' % network_ref['bridge'], + 'dnsmasq', + '--strict-order', + '--bind-interfaces', + '--interface=%s' % network_ref['bridge'], + '--conf-file=%s' % FLAGS.dnsmasq_config_file, + '--domain=%s' % FLAGS.dhcp_domain, + '--pid-file=%s' % _dhcp_file(network_ref['bridge'], 'pid'), + '--listen-address=%s' % network_ref['dhcp_server'], + '--except-interface=lo', + '--dhcp-range=%s,static,120s' % network_ref['dhcp_start'], + '--dhcp-lease-max=%s' % len(netaddr.IPNetwork(network_ref['cidr'])), + '--dhcp-hostsfile=%s' % _dhcp_file(network_ref['bridge'], 'conf'), + '--dhcp-script=%s' % FLAGS.dhcpbridge, + '--leasefile-ro'] + if FLAGS.dns_server: + cmd += ['-h', '-R', '--server=%s' % FLAGS.dns_server] + + _execute(*cmd, run_as_root=True) @utils.synchronized('radvd_start') @@ -659,8 +674,12 @@ interface %s LOG.debug(_('killing radvd threw %s'), exc) else: LOG.debug(_('Pid %d is stale, relaunching radvd'), pid) - command = _ra_cmd(network_ref) - _execute(*command) + + cmd = ['radvd', + '-C', '%s' % _ra_file(network_ref['bridge'], 'conf'), + '-p', '%s' % _ra_file(network_ref['bridge'], 'pid')] + + _execute(*cmd, run_as_root=True) def _host_lease(fixed_ip_ref): @@ -704,36 +723,6 @@ def _device_exists(device): return not err -def _dnsmasq_cmd(net): - """Builds dnsmasq command.""" - cmd = ['sudo', '-E', 'dnsmasq', - '--strict-order', - '--bind-interfaces', - '--interface=%s' % net['bridge'], - '--conf-file=%s' % FLAGS.dnsmasq_config_file, - '--domain=%s' % FLAGS.dhcp_domain, - '--pid-file=%s' % _dhcp_file(net['bridge'], 'pid'), - '--listen-address=%s' % net['dhcp_server'], - '--except-interface=lo', - '--dhcp-range=%s,static,120s' % net['dhcp_start'], - '--dhcp-lease-max=%s' % len(netaddr.IPNetwork(net['cidr'])), - '--dhcp-hostsfile=%s' % _dhcp_file(net['bridge'], 'conf'), - '--dhcp-script=%s' % FLAGS.dhcpbridge, - '--leasefile-ro'] - if FLAGS.dns_server: - cmd += ['-h', '-R', '--server=%s' % FLAGS.dns_server] - return cmd - - -def _ra_cmd(net): - """Builds radvd command.""" - cmd = ['sudo', '-E', 'radvd', -# '-u', 'nobody', - '-C', '%s' % _ra_file(net['bridge'], 'conf'), - '-p', '%s' % _ra_file(net['bridge'], 'pid')] - return cmd - - def _stop_dnsmasq(network): """Stops the dnsmasq instance for a given network.""" pid = _dnsmasq_pid_for(network) diff --git a/nova/tests/fake_utils.py b/nova/tests/fake_utils.py index be59970c9..72f79921e 100644 --- a/nova/tests/fake_utils.py +++ b/nova/tests/fake_utils.py @@ -63,9 +63,11 @@ def fake_execute(*cmd_parts, **kwargs): """ global _fake_execute_repliers - process_input = kwargs.get('process_input', None) - addl_env = kwargs.get('addl_env', None) - check_exit_code = kwargs.get('check_exit_code', 0) + process_input = kwargs.pop('process_input', None) + check_exit_code = kwargs.pop('check_exit_code', 0) + delay_on_retry = kwargs.pop('delay_on_retry', True) + attempts = kwargs.pop('attempts', 1) + run_as_root = kwargs.pop('run_as_root', False) cmd_str = ' '.join(str(part) for part in cmd_parts) LOG.debug(_("Faking execution of cmd (subprocess): %s"), cmd_str) @@ -87,7 +89,9 @@ def fake_execute(*cmd_parts, **kwargs): # Alternative is a function, so call it reply = reply_handler(cmd_parts, process_input=process_input, - addl_env=addl_env, + delay_on_retry=delay_on_retry, + attempts=attempts, + run_as_root=run_as_root, check_exit_code=check_exit_code) except exception.ProcessExecutionError as e: LOG.debug(_('Faked command raised an exception %s' % str(e))) diff --git a/nova/utils.py b/nova/utils.py index d691481f8..dc81abdb9 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -132,7 +132,6 @@ def execute(*cmd, **kwargs): :cmd Passed to subprocess.Popen. :process_input Send to opened process. - :addl_env Added to the processes env. :check_exit_code Defaults to 0. Raise exception.ProcessExecutionError unless program exits with this code. :delay_on_retry True | False. Defaults to True. If set to True, wait a @@ -147,7 +146,6 @@ def execute(*cmd, **kwargs): """ process_input = kwargs.pop('process_input', None) - addl_env = kwargs.pop('addl_env', None) check_exit_code = kwargs.pop('check_exit_code', 0) delay_on_retry = kwargs.pop('delay_on_retry', True) attempts = kwargs.pop('attempts', 1) @@ -164,16 +162,12 @@ def execute(*cmd, **kwargs): attempts -= 1 try: LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd)) - env = os.environ.copy() - if addl_env: - env.update(addl_env) _PIPE = subprocess.PIPE # pylint: disable=E1101 obj = subprocess.Popen(cmd, stdin=_PIPE, stdout=_PIPE, stderr=_PIPE, - close_fds=True, - env=env) + close_fds=True) result = None if process_input is not None: result = obj.communicate(process_input) -- cgit From 446fb79eb0025ce50cac9fc5496d4e4840134bce Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Tue, 9 Aug 2011 13:30:06 +0100 Subject: Command args can be a tuple, convert them to list --- nova/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/utils.py b/nova/utils.py index dc81abdb9..80044e37d 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -155,7 +155,7 @@ def execute(*cmd, **kwargs): 'to utils.execute: %r') % kwargs) if run_as_root: - cmd = shlex.split(FLAGS.sudo_helper) + cmd + cmd = shlex.split(FLAGS.sudo_helper) + list(cmd) cmd = map(str, cmd) while attempts > 0: -- cgit From 142f072cc3416f88dd0af757b17d7ddd473c5e20 Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Tue, 9 Aug 2011 13:34:54 +0100 Subject: Remove old commented line --- nova/network/linux_net.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index a8af4af57..8b7b59f55 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -492,7 +492,6 @@ def ensure_bridge(bridge, interface, net_attrs=None): LOG.debug(_('Starting Bridge interface for %s'), interface) _execute('brctl', 'addbr', bridge, run_as_root=True) _execute('brctl', 'setfd', bridge, 0, run_as_root=True) - # _execute('sudo brctl setageing %s 10' % bridge) _execute('brctl', 'stp', bridge, 'off', run_as_root=True) _execute('ip', 'link', 'set', bridge, 'up', run_as_root=True) if net_attrs: -- cgit From 238122f55fe70eedcd670714d80e6d2c10c4f3ab Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Tue, 9 Aug 2011 13:47:55 +0100 Subject: Minor fix to reduce diff --- nova/tests/fake_utils.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nova/tests/fake_utils.py b/nova/tests/fake_utils.py index 72f79921e..84ab641ea 100644 --- a/nova/tests/fake_utils.py +++ b/nova/tests/fake_utils.py @@ -63,11 +63,11 @@ def fake_execute(*cmd_parts, **kwargs): """ global _fake_execute_repliers - process_input = kwargs.pop('process_input', None) - check_exit_code = kwargs.pop('check_exit_code', 0) - delay_on_retry = kwargs.pop('delay_on_retry', True) - attempts = kwargs.pop('attempts', 1) - run_as_root = kwargs.pop('run_as_root', False) + process_input = kwargs.get('process_input', None) + check_exit_code = kwargs.get('check_exit_code', 0) + delay_on_retry = kwargs.get('delay_on_retry', True) + attempts = kwargs.get('attempts', 1) + run_as_root = kwargs.get('run_as_root', False) cmd_str = ' '.join(str(part) for part in cmd_parts) LOG.debug(_("Faking execution of cmd (subprocess): %s"), cmd_str) -- cgit From c1ac67222e18c5fe74a52aa864231d5311374816 Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Tue, 9 Aug 2011 14:11:15 +0100 Subject: Rename sudo_helper FLAG into root_helper --- nova/flags.py | 2 +- nova/utils.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/flags.py b/nova/flags.py index e30c0d6be..35209aa24 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -393,5 +393,5 @@ DEFINE_bool('start_guests_on_host_boot', False, DEFINE_bool('resume_guests_state_on_host_boot', False, 'Whether to start guests, that was running before the host reboot') -DEFINE_string('sudo_helper', 'sudo', +DEFINE_string('root_helper', 'sudo', 'Command prefix to use for running commands as root') diff --git a/nova/utils.py b/nova/utils.py index 80044e37d..46c329c85 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -139,7 +139,7 @@ def execute(*cmd, **kwargs): :attempts How many times to retry cmd. :run_as_root True | False. Defaults to False. If set to True, the command is prefixed by the command specified - in the sudo_helper FLAG. + in the root_helper FLAG. :raises exception.Error on receiving unknown arguments :raises exception.ProcessExecutionError @@ -155,7 +155,7 @@ def execute(*cmd, **kwargs): 'to utils.execute: %r') % kwargs) if run_as_root: - cmd = shlex.split(FLAGS.sudo_helper) + list(cmd) + cmd = shlex.split(FLAGS.root_helper) + list(cmd) cmd = map(str, cmd) while attempts > 0: -- cgit From d5bcc6764b418d7aa16b53b50173261385445ee8 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Tue, 9 Aug 2011 11:31:48 -0700 Subject: initial port --- nova/api/openstack/contrib/keypairs.py | 137 +++++++++++++++++++++++++++++++++ 1 file changed, 137 insertions(+) create mode 100644 nova/api/openstack/contrib/keypairs.py diff --git a/nova/api/openstack/contrib/keypairs.py b/nova/api/openstack/contrib/keypairs.py new file mode 100644 index 000000000..879bd9712 --- /dev/null +++ b/nova/api/openstack/contrib/keypairs.py @@ -0,0 +1,137 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" Keypair management extension""" + +from webob import exc + +from nova import db +from nova import crypto +from nova import exception +from nova.api.openstack import extensions + + +class KeypairController(object): + """ Keypair API controller for the Openstack API """ + + # TODO(ja): the keypair crud logic should be in nova.compute.API? + + def _gen_key(self): + """ + Generate a key + """ + # TODO(ja): crypto.generate_key_pair is currently a slow method + # and should probably be moved to a process pool? + private_key, public_key, fingerprint = crypto.generate_key_pair() + return {'private_key': private_key, + 'public_key': public_key, + 'fingerprint': fingerprint} + + def create(self, req, body): + """ + Create or import keypair. + + Sending key_name will generate a key and return private_key + and fingerprint. + + You can send a public_key to add an existing ssh key + + params: keypair object with: + key_name (required) - string + fingerprint (optional) - string + public_key (optional) - string + """ + + context = req.environ['nova.context'] + params = body['keypair'] + key_name = params['key_name'] + + # NOTE(ja): generation is slow, so shortcut invalid key_name exception + try: + db.key_pair_get(context, context.user_id, key_name) + raise exception.KeyPairExists(key_name=key_name) + except exception.NotFound: + pass + + keypair = {'user_id': context.user_id, + 'name': key_name} + + # import if public_key is sent + if 'public_key' in params: + keypair['public_key'] = params['public_key'] + keypair['fingerprint'] = params.get('fingerprint', None) + else: + generated_key = self._gen_key() + keypair['private_key'] = generated_key['private_key'] + keypair['public_key'] = generated_key['public_key'] + keypair['fingerprint'] = generated_key['fingerprint'] + + db.key_pair_create(context, keypair) + return {'keypair': keypair} + + def delete(self, req, name): + """ + Delete a keypair with a given name + """ + context = req.environ['nova.context'] + db.key_pair_destroy(context, context.user_id, name) + return exc.HTTPAccepted() + + def index(self, req): + """ + List of keypairs for a user + """ + context = req.environ['nova.context'] + key_pairs = db.key_pair_get_all_by_user(context, context.user_id) + rval = [] + for key_pair in key_pairs: + rval.append({ + 'name': key_pair['name'], + 'key_name': key_pair['name'], + 'fingerprint': key_pair['fingerprint'], + }) + + return {'keypairs': rval} + + +class Keypairs(extensions.ExtensionDescriptor): + + def get_name(self): + return "Keypairs" + + def get_alias(self): + return "os-keypairs" + + def get_description(self): + return "Keypair Support" + + def get_namespace(self): + return \ + "http://docs.openstack.org/ext/keypairs/api/v1.1" + + def get_updated(self): + return "2011-08-08T00:00:00+00:00" + + def get_resources(self): + resources = [] + + res = extensions.ResourceExtension( + 'os-keypairs', + KeypairController()) + + resources.append(res) + return resources -- cgit From 057b6ad650013a952f88f6e02f3e3db0164084d1 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Tue, 9 Aug 2011 13:00:13 -0700 Subject: added tests - list doesn't pass due to unicode issues --- nova/api/openstack/contrib/keypairs.py | 26 +++--- nova/crypto.py | 10 ++- nova/tests/api/openstack/contrib/test_keypairs.py | 99 +++++++++++++++++++++++ 3 files changed, 122 insertions(+), 13 deletions(-) create mode 100644 nova/tests/api/openstack/contrib/test_keypairs.py diff --git a/nova/api/openstack/contrib/keypairs.py b/nova/api/openstack/contrib/keypairs.py index 879bd9712..8435494e1 100644 --- a/nova/api/openstack/contrib/keypairs.py +++ b/nova/api/openstack/contrib/keypairs.py @@ -23,19 +23,20 @@ from nova import db from nova import crypto from nova import exception from nova.api.openstack import extensions - +import os +import shutil +import tempfile class KeypairController(object): """ Keypair API controller for the Openstack API """ - # TODO(ja): the keypair crud logic should be in nova.compute.API? + # TODO(ja): both this file and nova.api.ec2.cloud.py have similar logic. + # move the common keypair logic to nova.compute.API? def _gen_key(self): """ Generate a key """ - # TODO(ja): crypto.generate_key_pair is currently a slow method - # and should probably be moved to a process pool? private_key, public_key, fingerprint = crypto.generate_key_pair() return {'private_key': private_key, 'public_key': public_key, @@ -52,7 +53,6 @@ class KeypairController(object): params: keypair object with: key_name (required) - string - fingerprint (optional) - string public_key (optional) - string """ @@ -72,8 +72,14 @@ class KeypairController(object): # import if public_key is sent if 'public_key' in params: + tmpdir = tempfile.mkdtemp() + fn = os.path.join(tmpdir, 'import.pub') + with open(fn, 'w') as pub: + pub.write(params['public_key']) + fingerprint = crypto.generate_fingerprint(fn) + shutil.rmtree(tmpdir) keypair['public_key'] = params['public_key'] - keypair['fingerprint'] = params.get('fingerprint', None) + keypair['fingerprint'] = fingerprint else: generated_key = self._gen_key() keypair['private_key'] = generated_key['private_key'] @@ -83,12 +89,12 @@ class KeypairController(object): db.key_pair_create(context, keypair) return {'keypair': keypair} - def delete(self, req, name): + def delete(self, req, id): """ Delete a keypair with a given name """ context = req.environ['nova.context'] - db.key_pair_destroy(context, context.user_id, name) + db.key_pair_destroy(context, context.user_id, id) return exc.HTTPAccepted() def index(self, req): @@ -99,11 +105,11 @@ class KeypairController(object): key_pairs = db.key_pair_get_all_by_user(context, context.user_id) rval = [] for key_pair in key_pairs: - rval.append({ + rval.append({'keypair': { 'name': key_pair['name'], 'key_name': key_pair['name'], 'fingerprint': key_pair['fingerprint'], - }) + }}) return {'keypairs': rval} diff --git a/nova/crypto.py b/nova/crypto.py index 8d535f426..71bef80f2 100644 --- a/nova/crypto.py +++ b/nova/crypto.py @@ -104,6 +104,12 @@ def fetch_ca(project_id=None, chain=True): return buffer +def generate_fingerprint(public_key): + (out, err) = utils.execute('ssh-keygen', '-q', '-l', '-f', public_key) + fingerprint = out.split(' ')[1] + return fingerprint + + def generate_key_pair(bits=1024): # what is the magic 65537? @@ -111,9 +117,7 @@ def generate_key_pair(bits=1024): keyfile = os.path.join(tmpdir, 'temp') utils.execute('ssh-keygen', '-q', '-b', bits, '-N', '', '-f', keyfile) - (out, err) = utils.execute('ssh-keygen', '-q', '-l', '-f', - '%s.pub' % (keyfile)) - fingerprint = out.split(' ')[1] + fingerprint = generate_fingerprint('%s.pub' % (keyfile)) private_key = open(keyfile).read() public_key = open(keyfile + '.pub').read() diff --git a/nova/tests/api/openstack/contrib/test_keypairs.py b/nova/tests/api/openstack/contrib/test_keypairs.py new file mode 100644 index 000000000..1f6028e08 --- /dev/null +++ b/nova/tests/api/openstack/contrib/test_keypairs.py @@ -0,0 +1,99 @@ +# Copyright 2011 Eldar Nugaev +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import webob + +from nova import context +from nova import db +from nova import test +from nova.api.openstack.contrib.keypairs import KeypairController +from nova.tests.api.openstack import fakes + + +def fake_keypair(name): + return {'public_key': 'FAKE_KEY', + 'fingerprint': 'FAKE_FINGERPRINT', + 'name': name} + +def db_key_pair_get_all_by_user(self, user_id): + return [fake_keypair('FAKE')] + + +def db_key_pair_create(self, keypair): + pass + + +def db_key_pair_destroy(context, user_id, key_name): + if not (user_id and key_name): + raise Exception() + + +class KeypairsTest(test.TestCase): + + def setUp(self): + super(KeypairsTest, self).setUp() + self.controller = KeypairController() + fakes.stub_out_networking(self.stubs) + fakes.stub_out_rate_limiting(self.stubs) + self.stubs.Set(db, "key_pair_get_all_by_user", + db_key_pair_get_all_by_user) + self.stubs.Set(db, "key_pair_create", + db_key_pair_create) + self.stubs.Set(db, "key_pair_destroy", + db_key_pair_destroy) + self.context = context.get_admin_context() + + def test_keypair_list(self): + req = webob.Request.blank('/v1.1/os-keypairs') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) + response = {'keypairs': [{'keypair': fake_keypair('FAKE')}]} + self.assertEqual(res_dict, response) + + def test_keypair_create(self): + body = {'keypair': {'key_name': 'create_test'}} + req = webob.Request.blank('/v1.1/os-keypairs') + req.method = 'POST' + req.body = json.dumps(body) + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) + self.assertTrue(len(res_dict['keypair']['fingerprint']) > 0) + self.assertTrue(len(res_dict['keypair']['private_key']) > 0) + + def test_keypair_import(self): + body = {'keypair': {'key_name': 'create_test', + 'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznAx9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6YQj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymiMZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7ljj5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGcj7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwBbHkXa6OciiJDvkRzJXzf'}} + req = webob.Request.blank('/v1.1/os-keypairs') + req.method = 'POST' + req.body = json.dumps(body) + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + # FIXME(ja): sholud we check that public_key was sent to create? + res_dict = json.loads(res.body) + self.assertTrue(len(res_dict['keypair']['fingerprint']) > 0) + self.assertFalse('private_key' in res_dict['keypair']) + + def test_keypair_delete(self): + req = webob.Request.blank('/v1.1/os-keypairs/FAKE') + req.method = 'DELETE' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + -- cgit From 568188c158db3caddfcd9ecb384189f93b076dd9 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Tue, 9 Aug 2011 14:23:30 -0700 Subject: add Keypairs to test_extensions --- nova/tests/api/openstack/test_extensions.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index 409fa0e71..eb9afbdb1 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -96,7 +96,7 @@ class ExtensionControllerTest(test.TestCase): names = [x['name'] for x in data['extensions']] names.sort() self.assertEqual(names, ["FlavorExtraSpecs", "Floating_ips", - "Fox In Socks", "Hosts", "Multinic", "Volumes"]) + "Fox In Socks", "Hosts", "Keypairs", "Multinic", "Volumes"]) # Make sure that at least Fox in Sox is correct. (fox_ext,) = [ @@ -143,7 +143,7 @@ class ExtensionControllerTest(test.TestCase): # Make sure we have all the extensions. exts = root.findall('{0}extension'.format(NS)) - self.assertEqual(len(exts), 6) + self.assertEqual(len(exts), 7) # Make sure that at least Fox in Sox is correct. (fox_ext,) = [x for x in exts if x.get('alias') == 'FOXNSOX'] -- cgit From 131b2185e23567895e3f87cdfe9c2822d18910b2 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Tue, 9 Aug 2011 16:12:59 -0700 Subject: tests pass --- nova/api/openstack/contrib/keypairs.py | 16 ++++++++-------- nova/tests/api/openstack/contrib/test_keypairs.py | 8 ++++---- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/nova/api/openstack/contrib/keypairs.py b/nova/api/openstack/contrib/keypairs.py index 8435494e1..d718846f1 100644 --- a/nova/api/openstack/contrib/keypairs.py +++ b/nova/api/openstack/contrib/keypairs.py @@ -46,29 +46,29 @@ class KeypairController(object): """ Create or import keypair. - Sending key_name will generate a key and return private_key + Sending name will generate a key and return private_key and fingerprint. You can send a public_key to add an existing ssh key params: keypair object with: - key_name (required) - string + name (required) - string public_key (optional) - string """ context = req.environ['nova.context'] params = body['keypair'] - key_name = params['key_name'] + name = params['name'] - # NOTE(ja): generation is slow, so shortcut invalid key_name exception + # NOTE(ja): generation is slow, so shortcut invalid name exception try: - db.key_pair_get(context, context.user_id, key_name) - raise exception.KeyPairExists(key_name=key_name) + db.key_pair_get(context, context.user_id, name) + raise exception.KeyPairExists(key_name=name) except exception.NotFound: pass keypair = {'user_id': context.user_id, - 'name': key_name} + 'name': name} # import if public_key is sent if 'public_key' in params: @@ -107,7 +107,7 @@ class KeypairController(object): for key_pair in key_pairs: rval.append({'keypair': { 'name': key_pair['name'], - 'key_name': key_pair['name'], + 'public_key': key_pair['public_key'], 'fingerprint': key_pair['fingerprint'], }}) diff --git a/nova/tests/api/openstack/contrib/test_keypairs.py b/nova/tests/api/openstack/contrib/test_keypairs.py index 1f6028e08..c9dc34d65 100644 --- a/nova/tests/api/openstack/contrib/test_keypairs.py +++ b/nova/tests/api/openstack/contrib/test_keypairs.py @@ -36,8 +36,8 @@ def db_key_pair_create(self, keypair): pass -def db_key_pair_destroy(context, user_id, key_name): - if not (user_id and key_name): +def db_key_pair_destroy(context, user_id, name): + if not (user_id and name): raise Exception() @@ -65,7 +65,7 @@ class KeypairsTest(test.TestCase): self.assertEqual(res_dict, response) def test_keypair_create(self): - body = {'keypair': {'key_name': 'create_test'}} + body = {'keypair': {'name': 'create_test'}} req = webob.Request.blank('/v1.1/os-keypairs') req.method = 'POST' req.body = json.dumps(body) @@ -77,7 +77,7 @@ class KeypairsTest(test.TestCase): self.assertTrue(len(res_dict['keypair']['private_key']) > 0) def test_keypair_import(self): - body = {'keypair': {'key_name': 'create_test', + body = {'keypair': {'name': 'create_test', 'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznAx9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6YQj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymiMZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7ljj5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGcj7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwBbHkXa6OciiJDvkRzJXzf'}} req = webob.Request.blank('/v1.1/os-keypairs') req.method = 'POST' -- cgit From 8c6ebaf9861e3b652663c0ff60ad545864f72677 Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Tue, 9 Aug 2011 19:34:51 -0400 Subject: use correct variable name --- nova/db/sqlalchemy/api.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 1da54b3da..8119cdfb8 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -3346,8 +3346,8 @@ def instance_metadata_update(context, instance_id, metadata, delete): item = {"value": meta_value} try: - meta_ref = instance_metadata_get_item( - context, instance_id, key, session) + meta_ref = instance_metadata_get_item(context, instance_id, + meta_key, session) except exception.InstanceMetadataNotFound, e: meta_ref = models.InstanceMetadata() item.update({"key": meta_key, "instance_id": instance_id}) -- cgit From 83ed9faa488b3ec0f1cb16e7147293c912e2fc2b Mon Sep 17 00:00:00 2001 From: Matthew Hooker Date: Tue, 9 Aug 2011 19:35:40 -0400 Subject: pep8 fix --- nova/api/openstack/contrib/floating_ips.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index 52c9c6cf9..2aba1068a 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -102,7 +102,7 @@ class FloatingIPController(object): def delete(self, req, id): context = req.environ['nova.context'] ip = self.network_api.get_floating_ip(context, id) - + if 'fixed_ip' in ip: try: self.disassociate(req, id, '') -- cgit From f73b6dc8e90b763da1fe86496fc6fd6a80b99f0a Mon Sep 17 00:00:00 2001 From: Tushar Patil Date: Tue, 9 Aug 2011 17:03:24 -0700 Subject: List security groups project wise for admin users same as other users --- nova/api/openstack/contrib/security_groups.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/nova/api/openstack/contrib/security_groups.py b/nova/api/openstack/contrib/security_groups.py index d3a8e21b8..1d70956dc 100644 --- a/nova/api/openstack/contrib/security_groups.py +++ b/nova/api/openstack/contrib/security_groups.py @@ -110,12 +110,8 @@ class SecurityGroupController(object): context = req.environ['nova.context'] self.compute_api.ensure_default_security_group(context) - if context.is_admin: - groups = db.security_group_get_all(context) - else: - groups = db.security_group_get_by_project(context, - context.project_id) - + groups = db.security_group_get_by_project(context, + context.project_id) limited_list = common.limited(groups, req) result = [self._format_security_group(context, group) for group in limited_list] -- cgit From 0719b038ba1548f98668991507c001eb18c82981 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Tue, 9 Aug 2011 17:22:01 -0700 Subject: import formatting - thx --- nova/api/openstack/contrib/keypairs.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/nova/api/openstack/contrib/keypairs.py b/nova/api/openstack/contrib/keypairs.py index d718846f1..201648ab5 100644 --- a/nova/api/openstack/contrib/keypairs.py +++ b/nova/api/openstack/contrib/keypairs.py @@ -17,15 +17,17 @@ """ Keypair management extension""" +import os +import shutil +import tempfile + from webob import exc -from nova import db from nova import crypto +from nova import db from nova import exception from nova.api.openstack import extensions -import os -import shutil -import tempfile + class KeypairController(object): """ Keypair API controller for the Openstack API """ -- cgit From b3b13df03f0e843300f63bded410ffc4e0bd4e9f Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Wed, 10 Aug 2011 11:25:38 -0500 Subject: Removed verbose debugging output when capabilities are reported. --- nova/scheduler/zone_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/scheduler/zone_manager.py b/nova/scheduler/zone_manager.py index 97bdf3d44..9d05ea42e 100644 --- a/nova/scheduler/zone_manager.py +++ b/nova/scheduler/zone_manager.py @@ -198,7 +198,7 @@ class ZoneManager(object): def update_service_capabilities(self, service_name, host, capabilities): """Update the per-service capabilities based on this notification.""" logging.debug(_("Received %(service_name)s service update from " - "%(host)s: %(capabilities)s") % locals()) + "%(host)s.") % locals()) service_caps = self.service_states.get(host, {}) capabilities["timestamp"] = utils.utcnow() # Reported time service_caps[service_name] = capabilities -- cgit From a46964ad11a85effa833decb81384b478a0cf75d Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 10 Aug 2011 13:21:36 -0400 Subject: Allows multiple MySQL connections to be maintained using eventlet's db_pool. --- nova/db/sqlalchemy/session.py | 92 ++++++++++++++++++++++++++++++++----------- nova/flags.py | 6 +++ 2 files changed, 74 insertions(+), 24 deletions(-) diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py index 4a9a28f43..726a801af 100644 --- a/nova/db/sqlalchemy/session.py +++ b/nova/db/sqlalchemy/session.py @@ -19,37 +19,81 @@ Session Handling for SQLAlchemy backend """ -from sqlalchemy import create_engine -from sqlalchemy import pool -from sqlalchemy.orm import sessionmaker +import eventlet.patcher +eventlet.patcher.monkey_patch() -from nova import exception -from nova import flags +import eventlet.db_pool +import sqlalchemy.orm +import sqlalchemy.pool + +import nova.exception +import nova.flags +import nova.log + + +FLAGS = nova.flags.FLAGS +LOG = nova.log.getLogger("nova.db.sqlalchemy") + + +try: + import MySQLdb +except ImportError: + LOG.debug(_("Unable to load MySQLdb module.")) -FLAGS = flags.FLAGS _ENGINE = None _MAKER = None def get_session(autocommit=True, expire_on_commit=False): - """Helper method to grab session""" - global _ENGINE - global _MAKER - if not _MAKER: - if not _ENGINE: - kwargs = {'pool_recycle': FLAGS.sql_idle_timeout, - 'echo': False} - - if FLAGS.sql_connection.startswith('sqlite'): - kwargs['poolclass'] = pool.NullPool - - _ENGINE = create_engine(FLAGS.sql_connection, - **kwargs) - _MAKER = (sessionmaker(bind=_ENGINE, - autocommit=autocommit, - expire_on_commit=expire_on_commit)) + """Return a SQLAlchemy session.""" + global _ENGINE, _MAKER + + if _MAKER is None or _ENGINE is None: + _ENGINE = get_engine() + _MAKER = get_maker(_ENGINE, autocommit, expire_on_commit) + session = _MAKER() - session.query = exception.wrap_db_error(session.query) - session.flush = exception.wrap_db_error(session.flush) + session.query = nova.exception.wrap_db_error(session.query) + session.flush = nova.exception.wrap_db_error(session.flush) return session + + +def get_engine(): + """Return a SQLAlchemy engine.""" + connection_dict = sqlalchemy.engine.url.make_url(FLAGS.sql_connection) + engine_args = { + "pool_recycle": FLAGS.sql_idle_timeout, + "echo": False, + } + + LOG.info(_("SQL connection: %s") % FLAGS.sql_connection) + + if "sqlite" in connection_dict.drivername: + engine_args["poolclass"] = sqlalchemy.pool.NullPool + + if "mysql" in connection_dict.drivername: + LOG.info(_("Using MySQL/eventlet DB connection pool.")) + pool_args = { + "db": connection_dict.database, + "user": connection_dict.username, + "passwd": connection_dict.password, + "host": connection_dict.host, + "min_size": FLAGS.sql_min_pool_size, + "max_size": FLAGS.sql_max_pool_size, + "max_idle": FLAGS.sql_idle_timeout, + "max_age": FLAGS.sql_idle_timeout, + } + creator = eventlet.db_pool.ConnectionPool(MySQLdb, **pool_args) + engine_args["pool_size"] = FLAGS.sql_max_pool_size + engine_args["pool_timeout"] = FLAGS.sql_pool_timeout + engine_args["creator"] = creator.create + + return sqlalchemy.create_engine(FLAGS.sql_connection, **engine_args) + + +def get_maker(engine, autocommit=True, expire_on_commit=False): + """Return a SQLAlchemy sessionmaker using the given engine.""" + return sqlalchemy.orm.sessionmaker(bind=engine, + autocommit=autocommit, + expire_on_commit=expire_on_commit) diff --git a/nova/flags.py b/nova/flags.py index eb6366ed9..fa5528c8c 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -345,6 +345,12 @@ DEFINE_string('logdir', None, 'output to a per-service log file in named ' 'directory') DEFINE_integer('logfile_mode', 0644, 'Default file mode of the logs.') DEFINE_string('sqlite_db', 'nova.sqlite', 'file name for sqlite') +DEFINE_integer('sql_pool_timeout', 30, + 'seconds to wait for connection from pool before erroring') +DEFINE_integer('sql_min_pool_size', 10, + 'minimum number of SQL connections to pool') +DEFINE_integer('sql_max_pool_size', 10, + 'maximum number of SQL connections to pool') DEFINE_string('sql_connection', 'sqlite:///$state_path/$sqlite_db', 'connection string for sql database') -- cgit From 93c4a691c28668d62103b2ae2f90b284950cd95f Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 10 Aug 2011 14:01:18 -0400 Subject: Make sure to not use MySQLdb if you don't have it. --- nova/db/sqlalchemy/session.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py index 726a801af..6ef2d7b2c 100644 --- a/nova/db/sqlalchemy/session.py +++ b/nova/db/sqlalchemy/session.py @@ -38,7 +38,7 @@ LOG = nova.log.getLogger("nova.db.sqlalchemy") try: import MySQLdb except ImportError: - LOG.debug(_("Unable to load MySQLdb module.")) + MySQLdb = None _ENGINE = None @@ -72,8 +72,8 @@ def get_engine(): if "sqlite" in connection_dict.drivername: engine_args["poolclass"] = sqlalchemy.pool.NullPool - if "mysql" in connection_dict.drivername: - LOG.info(_("Using MySQL/eventlet DB connection pool.")) + if MySQLdb and "mysql" in connection_dict.drivername: + LOG.info(_("Using MySQLdb/eventlet DB connection pool.")) pool_args = { "db": connection_dict.database, "user": connection_dict.username, -- cgit From ad3ccef3e86220b480a114bb70eaa9def2abd430 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 10 Aug 2011 14:03:16 -0400 Subject: Removed un-needed log line. --- nova/db/sqlalchemy/session.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py index 6ef2d7b2c..7ebe000e8 100644 --- a/nova/db/sqlalchemy/session.py +++ b/nova/db/sqlalchemy/session.py @@ -67,8 +67,6 @@ def get_engine(): "echo": False, } - LOG.info(_("SQL connection: %s") % FLAGS.sql_connection) - if "sqlite" in connection_dict.drivername: engine_args["poolclass"] = sqlalchemy.pool.NullPool -- cgit From 8331fee7695a40824ae1bd24c52b22987b5f3507 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 10 Aug 2011 15:06:04 -0400 Subject: elif and FLAG feedback --- nova/db/sqlalchemy/session.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py index 7ebe000e8..81cb9fad0 100644 --- a/nova/db/sqlalchemy/session.py +++ b/nova/db/sqlalchemy/session.py @@ -70,7 +70,7 @@ def get_engine(): if "sqlite" in connection_dict.drivername: engine_args["poolclass"] = sqlalchemy.pool.NullPool - if MySQLdb and "mysql" in connection_dict.drivername: + elif MySQLdb and "mysql" in connection_dict.drivername: LOG.info(_("Using MySQLdb/eventlet DB connection pool.")) pool_args = { "db": connection_dict.database, @@ -80,7 +80,6 @@ def get_engine(): "min_size": FLAGS.sql_min_pool_size, "max_size": FLAGS.sql_max_pool_size, "max_idle": FLAGS.sql_idle_timeout, - "max_age": FLAGS.sql_idle_timeout, } creator = eventlet.db_pool.ConnectionPool(MySQLdb, **pool_args) engine_args["pool_size"] = FLAGS.sql_max_pool_size -- cgit From 4d2d064e9da37ce72010408bc1aad8ca67708462 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 10 Aug 2011 16:20:31 -0400 Subject: Support for postgresql. --- nova/db/sqlalchemy/session.py | 37 +++++++++++++++++++++++++------------ 1 file changed, 25 insertions(+), 12 deletions(-) diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py index 81cb9fad0..07ca27bab 100644 --- a/nova/db/sqlalchemy/session.py +++ b/nova/db/sqlalchemy/session.py @@ -41,6 +41,12 @@ except ImportError: MySQLdb = None +try: + import psycopg2 +except ImportError: + psycopg2 = None + + _ENGINE = None _MAKER = None @@ -62,28 +68,35 @@ def get_session(autocommit=True, expire_on_commit=False): def get_engine(): """Return a SQLAlchemy engine.""" connection_dict = sqlalchemy.engine.url.make_url(FLAGS.sql_connection) + engine_args = { "pool_recycle": FLAGS.sql_idle_timeout, + "pool_size": FLAGS.sql_max_pool_size, + "pool_timeout": FLAGS.sql_pool_timeout, "echo": False, } + pool_args = { + "db": connection_dict.database, + "user": connection_dict.username, + "passwd": connection_dict.password, + "host": connection_dict.host, + "min_size": FLAGS.sql_min_pool_size, + "max_size": FLAGS.sql_max_pool_size, + "max_idle": FLAGS.sql_idle_timeout, + } + if "sqlite" in connection_dict.drivername: + del engine_args["pool_size"] + del engine_args["pool_timeout"] engine_args["poolclass"] = sqlalchemy.pool.NullPool elif MySQLdb and "mysql" in connection_dict.drivername: - LOG.info(_("Using MySQLdb/eventlet DB connection pool.")) - pool_args = { - "db": connection_dict.database, - "user": connection_dict.username, - "passwd": connection_dict.password, - "host": connection_dict.host, - "min_size": FLAGS.sql_min_pool_size, - "max_size": FLAGS.sql_max_pool_size, - "max_idle": FLAGS.sql_idle_timeout, - } creator = eventlet.db_pool.ConnectionPool(MySQLdb, **pool_args) - engine_args["pool_size"] = FLAGS.sql_max_pool_size - engine_args["pool_timeout"] = FLAGS.sql_pool_timeout + engine_args["creator"] = creator.create + + elif psycopg2 and "postgresql" in connection_dict.drivername: + creator = eventlet.db_pool.ConnectionPool(psycopg2, **pool_args) engine_args["creator"] = creator.create return sqlalchemy.create_engine(FLAGS.sql_connection, **engine_args) -- cgit From 7d9ff45ec1acc3cba0d58341f3cce011df140f8e Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Thu, 11 Aug 2011 12:37:29 +0100 Subject: Remove doublequotes from env variable setting since they are literally passed --- nova/network/linux_net.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 8b7b59f55..4e1e1f85a 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -616,8 +616,8 @@ def update_dhcp(context, network_ref): else: LOG.debug(_('Pid %d is stale, relaunching dnsmasq'), pid) - cmd = ['FLAGFILE="%s"' % FLAGS.dhcpbridge_flagfile, - 'DNSMASQ_INTERFACE="%s"' % network_ref['bridge'], + cmd = ['FLAGFILE=%s' % FLAGS.dhcpbridge_flagfile, + 'DNSMASQ_INTERFACE=%s' % network_ref['bridge'], 'dnsmasq', '--strict-order', '--bind-interfaces', -- cgit From b121cd266d3d5e1719e644d6bd82d6402f13d2e2 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 11 Aug 2011 11:55:02 -0400 Subject: Logging for SQLAlchemy type. --- nova/db/sqlalchemy/session.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py index 07ca27bab..073e4ae49 100644 --- a/nova/db/sqlalchemy/session.py +++ b/nova/db/sqlalchemy/session.py @@ -77,10 +77,8 @@ def get_engine(): } pool_args = { - "db": connection_dict.database, - "user": connection_dict.username, - "passwd": connection_dict.password, "host": connection_dict.host, + "user": connection_dict.username, "min_size": FLAGS.sql_min_pool_size, "max_size": FLAGS.sql_max_pool_size, "max_idle": FLAGS.sql_idle_timeout, @@ -92,10 +90,20 @@ def get_engine(): engine_args["poolclass"] = sqlalchemy.pool.NullPool elif MySQLdb and "mysql" in connection_dict.drivername: + LOG.info(_("Using mysql/eventlet db_pool.")) + pool_args.update({ + "db": connection_dict.database, + "passwd": connection_dict.password, + }) creator = eventlet.db_pool.ConnectionPool(MySQLdb, **pool_args) engine_args["creator"] = creator.create elif psycopg2 and "postgresql" in connection_dict.drivername: + LOG.info(_("Using postgresql/eventlet db_pool.")) + pool_args.update({ + "database": connection_dict.database, + "password": connection_dict.password, + }) creator = eventlet.db_pool.ConnectionPool(psycopg2, **pool_args) engine_args["creator"] = creator.create -- cgit From 253d75e71beb1ce9c65e84233a3178f95f82d77d Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 11 Aug 2011 12:02:05 -0400 Subject: More logging. --- nova/db/sqlalchemy/session.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py index 073e4ae49..141b7bf37 100644 --- a/nova/db/sqlalchemy/session.py +++ b/nova/db/sqlalchemy/session.py @@ -107,6 +107,9 @@ def get_engine(): creator = eventlet.db_pool.ConnectionPool(psycopg2, **pool_args) engine_args["creator"] = creator.create + LOG.debug(_("SQLAlchemy Engine Arguments: %(engine_args)s") % locals()) + LOG.debug(_("Eventlet Pool Arguments: %(pool_args)s") % locals()) + return sqlalchemy.create_engine(FLAGS.sql_connection, **engine_args) -- cgit From 49da55f7952f8daecf6df9498769b336af95ce6d Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 11 Aug 2011 14:27:14 -0400 Subject: Removed postgres, bug in current ubuntu package which won't allow it to work easily. Will add a bug in LP. --- nova/db/sqlalchemy/session.py | 31 +++++-------------------------- 1 file changed, 5 insertions(+), 26 deletions(-) diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py index 141b7bf37..ffa3c747c 100644 --- a/nova/db/sqlalchemy/session.py +++ b/nova/db/sqlalchemy/session.py @@ -41,12 +41,6 @@ except ImportError: MySQLdb = None -try: - import psycopg2 -except ImportError: - psycopg2 = None - - _ENGINE = None _MAKER = None @@ -76,14 +70,6 @@ def get_engine(): "echo": False, } - pool_args = { - "host": connection_dict.host, - "user": connection_dict.username, - "min_size": FLAGS.sql_min_pool_size, - "max_size": FLAGS.sql_max_pool_size, - "max_idle": FLAGS.sql_idle_timeout, - } - if "sqlite" in connection_dict.drivername: del engine_args["pool_size"] del engine_args["pool_timeout"] @@ -94,22 +80,15 @@ def get_engine(): pool_args.update({ "db": connection_dict.database, "passwd": connection_dict.password, + "host": connection_dict.host, + "user": connection_dict.username, + "min_size": FLAGS.sql_min_pool_size, + "max_size": FLAGS.sql_max_pool_size, + "max_idle": FLAGS.sql_idle_timeout, }) creator = eventlet.db_pool.ConnectionPool(MySQLdb, **pool_args) engine_args["creator"] = creator.create - elif psycopg2 and "postgresql" in connection_dict.drivername: - LOG.info(_("Using postgresql/eventlet db_pool.")) - pool_args.update({ - "database": connection_dict.database, - "password": connection_dict.password, - }) - creator = eventlet.db_pool.ConnectionPool(psycopg2, **pool_args) - engine_args["creator"] = creator.create - - LOG.debug(_("SQLAlchemy Engine Arguments: %(engine_args)s") % locals()) - LOG.debug(_("Eventlet Pool Arguments: %(pool_args)s") % locals()) - return sqlalchemy.create_engine(FLAGS.sql_connection, **engine_args) -- cgit From 3017d3a7cd9cd4928a5e5247054b877e63fac095 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 11 Aug 2011 14:36:29 -0400 Subject: Silly fixes. --- nova/db/sqlalchemy/session.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py index ffa3c747c..07f281938 100644 --- a/nova/db/sqlalchemy/session.py +++ b/nova/db/sqlalchemy/session.py @@ -65,19 +65,15 @@ def get_engine(): engine_args = { "pool_recycle": FLAGS.sql_idle_timeout, - "pool_size": FLAGS.sql_max_pool_size, - "pool_timeout": FLAGS.sql_pool_timeout, "echo": False, } if "sqlite" in connection_dict.drivername: - del engine_args["pool_size"] - del engine_args["pool_timeout"] engine_args["poolclass"] = sqlalchemy.pool.NullPool elif MySQLdb and "mysql" in connection_dict.drivername: LOG.info(_("Using mysql/eventlet db_pool.")) - pool_args.update({ + pool_args = { "db": connection_dict.database, "passwd": connection_dict.password, "host": connection_dict.host, @@ -85,8 +81,10 @@ def get_engine(): "min_size": FLAGS.sql_min_pool_size, "max_size": FLAGS.sql_max_pool_size, "max_idle": FLAGS.sql_idle_timeout, - }) + } creator = eventlet.db_pool.ConnectionPool(MySQLdb, **pool_args) + engine_args["pool_size"] = FLAGS.sql_max_pool_size + engine_args["pool_timeout"] = FLAGS.sql_pool_timeout engine_args["creator"] = creator.create return sqlalchemy.create_engine(FLAGS.sql_connection, **engine_args) -- cgit From 45d6ab8ffec6ff4b26500df7049ce4092b15f00c Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Thu, 11 Aug 2011 15:30:43 -0400 Subject: fixing id parsing --- nova/api/openstack/common.py | 9 ++++++--- nova/tests/api/openstack/test_common.py | 4 ++++ nova/tests/api/openstack/test_servers.py | 16 ++++++++++++++++ 3 files changed, 26 insertions(+), 3 deletions(-) diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index dfdd62201..23614d598 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -169,10 +169,13 @@ def get_id_from_href(href): Returns: 123 """ - if re.match(r'\d+$', str(href)): - return int(href) try: - return int(urlparse.urlsplit(href).path.split('/')[-1]) + href = str(href) + + if re.match(r'\d+$', href): + return int(href) + else: + return int(urlparse.urlsplit(href).path.split('/')[-1]) except ValueError, e: LOG.debug(_("Error extracting id from href: %s") % href) raise ValueError(_('could not parse id from href')) diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py index 5a6e43579..b422bc4d1 100644 --- a/nova/tests/api/openstack/test_common.py +++ b/nova/tests/api/openstack/test_common.py @@ -249,6 +249,10 @@ class MiscFunctionsTest(test.TestCase): common.get_id_from_href, fixture) + def test_get_id_from_href_int(self): + fixture = 1 + self.assertEqual(fixture, common.get_id_from_href(fixture)) + def test_get_version_from_href(self): fixture = 'http://www.testsite.com/v1.1/images' expected = '1.1' diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index b6342ae2f..290f6e990 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -1653,6 +1653,22 @@ class ServersTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 400) + def test_create_instance_v1_1_invalid_flavor_id_int(self): + self._setup_for_create_instance() + + image_href = 'http://localhost/v1.1/images/2' + flavor_ref = -1 + body = dict(server=dict( + name='server_test', imageRef=image_href, flavorRef=flavor_ref, + metadata={'hello': 'world', 'open': 'stack'}, + personality={})) + req = webob.Request.blank('/v1.1/servers') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + def test_create_instance_v1_1_bad_flavor_href(self): self._setup_for_create_instance() -- cgit From 8f3ad17b4ee25fedeee98132f22cf1eeb5974a2c Mon Sep 17 00:00:00 2001 From: William Wolf Date: Thu, 11 Aug 2011 16:04:12 -0400 Subject: fixed pep8 issue --- nova/tests/api/openstack/contrib/test_keypairs.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/nova/tests/api/openstack/contrib/test_keypairs.py b/nova/tests/api/openstack/contrib/test_keypairs.py index c9dc34d65..23f19157e 100644 --- a/nova/tests/api/openstack/contrib/test_keypairs.py +++ b/nova/tests/api/openstack/contrib/test_keypairs.py @@ -77,8 +77,21 @@ class KeypairsTest(test.TestCase): self.assertTrue(len(res_dict['keypair']['private_key']) > 0) def test_keypair_import(self): - body = {'keypair': {'name': 'create_test', - 'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznAx9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6YQj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymiMZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7ljj5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGcj7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwBbHkXa6OciiJDvkRzJXzf'}} + body = { + 'keypair': { + 'name': 'create_test', + 'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA' + 'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4' + 'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y' + 'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi' + 'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj' + 'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1' + 'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc' + 'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB' + 'bHkXa6OciiJDvkRzJXzf', + }, + } + req = webob.Request.blank('/v1.1/os-keypairs') req.method = 'POST' req.body = json.dumps(body) -- cgit From 03cf6551feae597dd71fbf7b52b41415863d1241 Mon Sep 17 00:00:00 2001 From: William Wolf Date: Thu, 11 Aug 2011 16:05:33 -0400 Subject: spacing fixes --- nova/tests/api/openstack/contrib/test_keypairs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/api/openstack/contrib/test_keypairs.py b/nova/tests/api/openstack/contrib/test_keypairs.py index 23f19157e..eb3bc7af0 100644 --- a/nova/tests/api/openstack/contrib/test_keypairs.py +++ b/nova/tests/api/openstack/contrib/test_keypairs.py @@ -28,6 +28,7 @@ def fake_keypair(name): 'fingerprint': 'FAKE_FINGERPRINT', 'name': name} + def db_key_pair_get_all_by_user(self, user_id): return [fake_keypair('FAKE')] @@ -109,4 +110,3 @@ class KeypairsTest(test.TestCase): req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 202) - -- cgit From 68161e3e224ff77e4e93d02e5fabbd9ea17b0d48 Mon Sep 17 00:00:00 2001 From: Tushar Patil Date: Thu, 11 Aug 2011 17:04:33 -0700 Subject: prefixed with os- for the newly added extensions --- nova/api/openstack/contrib/security_groups.py | 4 ++-- .../api/openstack/contrib/test_security_groups.py | 22 +++++++++++----------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/nova/api/openstack/contrib/security_groups.py b/nova/api/openstack/contrib/security_groups.py index 1d70956dc..6c57fbb51 100644 --- a/nova/api/openstack/contrib/security_groups.py +++ b/nova/api/openstack/contrib/security_groups.py @@ -366,7 +366,7 @@ class Security_groups(extensions.ExtensionDescriptor): } deserializer = wsgi.RequestDeserializer(body_deserializers) - res = extensions.ResourceExtension('security_groups', + res = extensions.ResourceExtension('os-security-groups', controller=SecurityGroupController(), deserializer=deserializer, serializer=serializer) @@ -378,7 +378,7 @@ class Security_groups(extensions.ExtensionDescriptor): } deserializer = wsgi.RequestDeserializer(body_deserializers) - res = extensions.ResourceExtension('security_group_rules', + res = extensions.ResourceExtension('os-security-group-rules', controller=SecurityGroupRulesController(), deserializer=deserializer, serializer=serializer) diff --git a/nova/tests/api/openstack/contrib/test_security_groups.py b/nova/tests/api/openstack/contrib/test_security_groups.py index 9a86a60f8..4317880ca 100644 --- a/nova/tests/api/openstack/contrib/test_security_groups.py +++ b/nova/tests/api/openstack/contrib/test_security_groups.py @@ -25,7 +25,7 @@ from nova.tests.api.openstack import fakes def _get_create_request_json(body_dict): - req = webob.Request.blank('/v1.1/security_groups') + req = webob.Request.blank('/v1.1/os-security-groups') req.headers['Content-Type'] = 'application/json' req.method = 'POST' req.body = json.dumps(body_dict) @@ -84,7 +84,7 @@ class TestSecurityGroups(test.TestCase): return ''.join(body_parts) def _get_create_request_xml(self, body_dict): - req = webob.Request.blank('/v1.1/security_groups') + req = webob.Request.blank('/v1.1/os-security-groups') req.headers['Content-Type'] = 'application/xml' req.content_type = 'application/xml' req.accept = 'application/xml' @@ -99,7 +99,7 @@ class TestSecurityGroups(test.TestCase): return response def _delete_security_group(self, id): - request = webob.Request.blank('/v1.1/security_groups/%s' + request = webob.Request.blank('/v1.1/os-security-groups/%s' % id) request.method = 'DELETE' response = request.get_response(fakes.wsgi_app()) @@ -238,7 +238,7 @@ class TestSecurityGroups(test.TestCase): security_group['description'] = "group-description" response = _create_security_group_json(security_group) - req = webob.Request.blank('/v1.1/security_groups') + req = webob.Request.blank('/v1.1/os-security-groups') req.headers['Content-Type'] = 'application/json' req.method = 'GET' response = req.get_response(fakes.wsgi_app()) @@ -272,7 +272,7 @@ class TestSecurityGroups(test.TestCase): response = _create_security_group_json(security_group) res_dict = json.loads(response.body) - req = webob.Request.blank('/v1.1/security_groups/%s' % + req = webob.Request.blank('/v1.1/os-security-groups/%s' % res_dict['security_group']['id']) req.headers['Content-Type'] = 'application/json' req.method = 'GET' @@ -292,14 +292,14 @@ class TestSecurityGroups(test.TestCase): self.assertEquals(res_dict, expected) def test_get_security_group_by_invalid_id(self): - req = webob.Request.blank('/v1.1/security_groups/invalid') + req = webob.Request.blank('/v1.1/os-security-groups/invalid') req.headers['Content-Type'] = 'application/json' req.method = 'GET' response = req.get_response(fakes.wsgi_app()) self.assertEquals(response.status_int, 400) def test_get_security_group_by_non_existing_id(self): - req = webob.Request.blank('/v1.1/security_groups/111111111') + req = webob.Request.blank('/v1.1/os-security-groups/111111111') req.headers['Content-Type'] = 'application/json' req.method = 'GET' response = req.get_response(fakes.wsgi_app()) @@ -354,7 +354,7 @@ class TestSecurityGroupRules(test.TestCase): super(TestSecurityGroupRules, self).tearDown() def _create_security_group_rule_json(self, rules): - request = webob.Request.blank('/v1.1/security_group_rules') + request = webob.Request.blank('/v1.1/os-security-group-rules') request.headers['Content-Type'] = 'application/json' request.method = 'POST' request.body = json.dumps(rules) @@ -362,7 +362,7 @@ class TestSecurityGroupRules(test.TestCase): return response def _delete_security_group_rule(self, id): - request = webob.Request.blank('/v1.1/security_group_rules/%s' + request = webob.Request.blank('/v1.1/os-security-group-rules/%s' % id) request.method = 'DELETE' response = request.get_response(fakes.wsgi_app()) @@ -420,7 +420,7 @@ class TestSecurityGroupRules(test.TestCase): self.assertEquals(response.status_int, 400) def test_create_with_no_body_json(self): - request = webob.Request.blank('/v1.1/security_group_rules') + request = webob.Request.blank('/v1.1/os-security-group-rules') request.headers['Content-Type'] = 'application/json' request.method = 'POST' request.body = json.dumps(None) @@ -428,7 +428,7 @@ class TestSecurityGroupRules(test.TestCase): self.assertEquals(response.status_int, 422) def test_create_with_no_security_group_rule_in_body_json(self): - request = webob.Request.blank('/v1.1/security_group_rules') + request = webob.Request.blank('/v1.1/os-security-group-rules') request.headers['Content-Type'] = 'application/json' request.method = 'POST' body_dict = {'test': "test"} -- cgit From c398d6cf85dcb30b8cd499f410618b88a0b5c8c9 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Thu, 11 Aug 2011 22:02:21 -0700 Subject: join virtual_interfaces.instance for DB queries for instances. updates instance_get_all to match instance_get_all_by_filters. --- nova/db/sqlalchemy/api.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 8119cdfb8..e5d35a20b 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1139,7 +1139,10 @@ def instance_get_all(context): session = get_session() return session.query(models.Instance).\ options(joinedload_all('fixed_ips.floating_ips')).\ - options(joinedload('virtual_interfaces')).\ + options(joinedload_all('virtual_interfaces.network')).\ + options(joinedload_all( + 'virtual_interfaces.fixed_ips.floating_ips')).\ + options(joinedload('virtual_interfaces.instance')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ @@ -1202,6 +1205,7 @@ def instance_get_all_by_filters(context, filters): options(joinedload_all('virtual_interfaces.network')).\ options(joinedload_all( 'virtual_interfaces.fixed_ips.floating_ips')).\ + options(joinedload('virtual_interfaces.instance')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ -- cgit From f95e0118d91a8f77345e4d78980e2523cb4dba56 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Fri, 12 Aug 2011 10:59:10 -0400 Subject: Fixes to the OSAPI floating API extension DELETE. Updated to use correct args for self.disassociate (don't sweep exceptions which should cause test cases to fail under the rug). Additionally updated to pass network_api.release_floating_ip the address instead of a dict. --- nova/api/openstack/contrib/floating_ips.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index 2aba1068a..c07bfdf09 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -104,12 +104,9 @@ class FloatingIPController(object): ip = self.network_api.get_floating_ip(context, id) if 'fixed_ip' in ip: - try: - self.disassociate(req, id, '') - except Exception as e: - LOG.exception(_("Error disassociating fixed_ip %s"), e) + self.disassociate(req, id) - self.network_api.release_floating_ip(context, address=ip) + self.network_api.release_floating_ip(context, address=ip['address']) return {'released': { "id": ip['id'], -- cgit From 15271a08de44da1813bfb2a2b68a2f28ef887c21 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 12 Aug 2011 08:43:42 -0700 Subject: fix typo that causes ami instances to launch with a kernal as ramdisk --- nova/virt/xenapi/vmops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index b9cd59946..b1522729a 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -186,7 +186,7 @@ class VMOps(object): instance.project_id, ImageType.KERNEL)[0] if instance.ramdisk_id: ramdisk = VMHelper.fetch_image(context, self._session, - instance.id, instance.kernel_id, instance.user_id, + instance.id, instance.ramdisk_id, instance.user_id, instance.project_id, ImageType.RAMDISK)[0] # Create the VM ref and attach the first disk first_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', -- cgit From 954e8e24c6b8ceb541c539ce7c26da4b35b5f0b1 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Fri, 12 Aug 2011 11:44:49 -0400 Subject: rewriting parsing --- nova/api/openstack/common.py | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index 23614d598..b2a675653 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -169,16 +169,20 @@ def get_id_from_href(href): Returns: 123 """ + LOG.debug(_("Attempting to treat %(href)s as an integer ID.") % locals()) + + try: + return int(href) + except ValueError: + pass + + LOG.debug(_("Attempting to treat %(href)s as a URL.") % locals()) + try: - href = str(href) - - if re.match(r'\d+$', href): - return int(href) - else: - return int(urlparse.urlsplit(href).path.split('/')[-1]) - except ValueError, e: - LOG.debug(_("Error extracting id from href: %s") % href) - raise ValueError(_('could not parse id from href')) + return int(urlparse.urlsplit(href).path.split('/')[-1]) + except ValueError as error: + LOG.debug(_("Failed to parse ID from %(href)s: %(error)s") % locals()) + raise def remove_version_from_href(href): -- cgit