diff options
-rw-r--r-- | nova/api/ec2/cloud.py | 14 | ||||
-rwxr-xr-x | nova/compute/manager.py | 7 | ||||
-rw-r--r-- | nova/db/sqlalchemy/api.py | 7 | ||||
-rw-r--r-- | nova/db/sqlalchemy/migrate_repo/versions/133_folsom.py | 2 | ||||
-rw-r--r-- | nova/db/sqlalchemy/models.py | 29 | ||||
-rw-r--r-- | nova/network/security_group/quantum_driver.py | 10 | ||||
-rw-r--r-- | nova/tests/api/ec2/test_cloud.py | 66 | ||||
-rw-r--r-- | nova/tests/api/openstack/compute/contrib/test_quantum_security_groups.py | 19 | ||||
-rw-r--r-- | nova/tests/compute/test_compute.py | 35 | ||||
-rw-r--r-- | nova/tests/test_migrations.py | 58 | ||||
-rw-r--r-- | nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/001_init.py | 2 | ||||
-rw-r--r-- | nova/virt/disk/vfs/guestfs.py | 3 | ||||
-rwxr-xr-x | tools/hacking.py | 6 |
13 files changed, 190 insertions, 68 deletions
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index dcbde3428..60b06c233 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -503,9 +503,17 @@ class CloudController(object): r['groups'] = [] r['ipRanges'] = [] if rule['group_id']: - source_group = rule['grantee_group'] - r['groups'] += [{'groupName': source_group['name'], - 'userId': source_group['project_id']}] + if rule.get('grantee_group'): + source_group = rule['grantee_group'] + r['groups'] += [{'groupName': source_group['name'], + 'userId': source_group['project_id']}] + else: + # rule is not always joined with grantee_group + # for example when using quantum driver. + source_group = self.security_group_api.get( + context, id=rule['group_id']) + r['groups'] += [{'groupName': source_group.get('name'), + 'userId': source_group.get('project_id')}] if rule['protocol']: r['ipProtocol'] = rule['protocol'].lower() r['fromPort'] = rule['from_port'] diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 99b97e921..9ca00bd9b 100755 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -780,6 +780,13 @@ class ComputeManager(manager.SchedulerDependentManager): msg = _('Failed to dealloc network for deleted instance') LOG.exception(msg, instance=instance) raise + except exception.UnexpectedTaskStateError as e: + actual_task_state = e.kwargs.get('actual', None) + if actual_task_state == 'deleting': + msg = _('Instance was deleted during spawn.') + LOG.debug(msg, instance=instance) + else: + raise except Exception: exc_info = sys.exc_info() # try to re-schedule instance: diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index f26d67cac..b888d2628 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -4228,7 +4228,7 @@ def aggregate_create(context, values, metadata=None): values['name'], session=session, read_deleted='no') - aggregate = query.options(joinedload('_metadata')).first() + aggregate = query.first() if not aggregate: aggregate = models.Aggregate() aggregate.update(values) @@ -4250,7 +4250,7 @@ def aggregate_get(context, aggregate_id): models.Aggregate, models.Aggregate.id, aggregate_id) - aggregate = query.options(joinedload('_metadata')).first() + aggregate = query.first() if not aggregate: raise exception.AggregateNotFound(aggregate_id=aggregate_id) @@ -4304,8 +4304,7 @@ def aggregate_update(context, aggregate_id, values): models.Aggregate, models.Aggregate.id, aggregate_id, - session=session). - options(joinedload('_metadata')).first()) + session=session).first()) if aggregate: if "availability_zone" in values: diff --git a/nova/db/sqlalchemy/migrate_repo/versions/133_folsom.py b/nova/db/sqlalchemy/migrate_repo/versions/133_folsom.py index 8a97959bc..1b688a883 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/133_folsom.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/133_folsom.py @@ -1222,4 +1222,4 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): - LOG.exception(_('Downgrade from Folsom is unsupported.')) + raise NotImplementedError('Downgrade from Folsom is unsupported.') diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 95456bf98..a675357df 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -164,12 +164,6 @@ class Instance(BASE, NovaBase): ramdisk_id = Column(String(255)) hostname = Column(String(255)) -# image_ref = Column(Integer, ForeignKey('images.id'), nullable=True) -# kernel_id = Column(Integer, ForeignKey('images.id'), nullable=True) -# ramdisk_id = Column(Integer, ForeignKey('images.id'), nullable=True) -# ramdisk = relationship(Ramdisk, backref=backref('instances', order_by=id)) -# kernel = relationship(Kernel, backref=backref('instances', order_by=id)) - launch_index = Column(Integer) key_name = Column(String(255)) key_data = Column(Text) @@ -841,29 +835,16 @@ class Aggregate(BASE, NovaBase): id = Column(Integer, primary_key=True, autoincrement=True) name = Column(String(255)) _hosts = relationship(AggregateHost, - lazy="joined", - secondary="aggregate_hosts", primaryjoin='and_(' - 'Aggregate.id == AggregateHost.aggregate_id,' - 'AggregateHost.deleted == 0,' - 'Aggregate.deleted == 0)', - secondaryjoin='and_(' - 'AggregateHost.aggregate_id == Aggregate.id, ' - 'AggregateHost.deleted == 0,' - 'Aggregate.deleted == 0)', - backref='aggregates') + 'Aggregate.id == AggregateHost.aggregate_id,' + 'AggregateHost.deleted == 0,' + 'Aggregate.deleted == 0)') _metadata = relationship(AggregateMetadata, - secondary="aggregate_metadata", - primaryjoin='and_(' + primaryjoin='and_(' 'Aggregate.id == AggregateMetadata.aggregate_id,' 'AggregateMetadata.deleted == 0,' - 'Aggregate.deleted == 0)', - secondaryjoin='and_(' - 'AggregateMetadata.aggregate_id == Aggregate.id, ' - 'AggregateMetadata.deleted == 0,' - 'Aggregate.deleted == 0)', - backref='aggregates') + 'Aggregate.deleted == 0)') def _extra_keys(self): return ['hosts', 'metadetails', 'availability_zone'] diff --git a/nova/network/security_group/quantum_driver.py b/nova/network/security_group/quantum_driver.py index 918c839e9..623f2f4ed 100644 --- a/nova/network/security_group/quantum_driver.py +++ b/nova/network/security_group/quantum_driver.py @@ -97,6 +97,9 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase): def get(self, context, name=None, id=None, map_exception=False): quantum = quantumv2.get_client(context) try: + if not id and name: + id = quantumv20.find_resourceid_by_name_or_id( + quantum, 'security_group', name) group = quantum.show_security_group(id).get('security_group') except q_exc.QuantumClientException as e: if e.status_code == 404: @@ -113,8 +116,13 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase): search_opts=None): """Returns list of security group rules owned by tenant.""" quantum = quantumv2.get_client(context) + search_opts = {} + if names: + search_opts['name'] = names + if ids: + search_opts['id'] = ids try: - security_groups = quantum.list_security_groups().get( + security_groups = quantum.list_security_groups(**search_opts).get( 'security_groups') except q_exc.QuantumClientException as e: LOG.exception(_("Quantum Error getting security groups")) diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py index 07780eb02..b8a4712c4 100644 --- a/nova/tests/api/ec2/test_cloud.py +++ b/nova/tests/api/ec2/test_cloud.py @@ -41,9 +41,12 @@ from nova import db from nova import exception from nova.image import s3 from nova.network import api as network_api +from nova.network import quantumv2 from nova.openstack.common import log as logging from nova.openstack.common import rpc from nova import test +from nova.tests.api.openstack.compute.contrib import ( + test_quantum_security_groups as test_quantum) from nova.tests import fake_network from nova.tests.image import fake from nova.tests import matchers @@ -2226,3 +2229,66 @@ class CloudTestCase(test.TestCase): test_dia_iisb('stop', image_id='ami-4') test_dia_iisb('stop', image_id='ami-5') test_dia_iisb('stop', image_id='ami-6') + + +class CloudTestCaseQuantumProxy(test.TestCase): + def setUp(self): + cfg.CONF.set_override('security_group_api', 'quantum') + self.cloud = cloud.CloudController() + self.original_client = quantumv2.get_client + quantumv2.get_client = test_quantum.get_client + self.user_id = 'fake' + self.project_id = 'fake' + self.context = context.RequestContext(self.user_id, + self.project_id, + is_admin=True) + super(CloudTestCaseQuantumProxy, self).setUp() + + def tearDown(self): + quantumv2.get_client = self.original_client + test_quantum.get_client()._reset() + super(CloudTestCaseQuantumProxy, self).tearDown() + + def test_describe_security_groups(self): + # Makes sure describe_security_groups works and filters results. + group_name = 'test' + description = 'test' + self.cloud.create_security_group(self.context, group_name, + description) + result = self.cloud.describe_security_groups(self.context) + # NOTE(vish): should have the default group as well + self.assertEqual(len(result['securityGroupInfo']), 2) + result = self.cloud.describe_security_groups(self.context, + group_name=[group_name]) + self.assertEqual(len(result['securityGroupInfo']), 1) + self.assertEqual(result['securityGroupInfo'][0]['groupName'], + group_name) + self.cloud.delete_security_group(self.context, group_name) + + def test_describe_security_groups_by_id(self): + group_name = 'test' + description = 'test' + self.cloud.create_security_group(self.context, group_name, + description) + quantum = test_quantum.get_client() + # Get id from quantum since cloud.create_security_group + # does not expose it. + search_opts = {'name': group_name} + groups = quantum.list_security_groups( + **search_opts)['security_groups'] + result = self.cloud.describe_security_groups(self.context, + group_id=[groups[0]['id']]) + self.assertEqual(len(result['securityGroupInfo']), 1) + self.assertEqual( + result['securityGroupInfo'][0]['groupName'], + group_name) + self.cloud.delete_security_group(self.context, group_name) + + def test_create_delete_security_group(self): + descript = 'test description' + create = self.cloud.create_security_group + result = create(self.context, 'testgrp', descript) + group_descript = result['securityGroupSet'][0]['groupDescription'] + self.assertEqual(descript, group_descript) + delete = self.cloud.delete_security_group + self.assertTrue(delete(self.context, 'testgrp')) diff --git a/nova/tests/api/openstack/compute/contrib/test_quantum_security_groups.py b/nova/tests/api/openstack/compute/contrib/test_quantum_security_groups.py index e32fadbb8..5f9c5cefa 100644 --- a/nova/tests/api/openstack/compute/contrib/test_quantum_security_groups.py +++ b/nova/tests/api/openstack/compute/contrib/test_quantum_security_groups.py @@ -569,10 +569,21 @@ class MockClient(object): def list_security_groups(self, **_params): ret = [] for security_group in self._fake_security_groups.values(): - if _params.get('name'): - if security_group.get('name') == _params['name']: - ret.append(security_group) - else: + names = _params.get('name') + if names: + if not isinstance(names, list): + names = [names] + for name in names: + if security_group.get('name') == name: + ret.append(security_group) + ids = _params.get('id') + if ids: + if not isinstance(ids, list): + ids = [ids] + for id in ids: + if security_group.get('id') == id: + ret.append(security_group) + elif not (names or ids): ret.append(security_group) return {'security_groups': ret} diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index 5ad333c9e..98eba5570 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -7225,6 +7225,41 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase): self.compute._reschedule_or_reraise(self.context, self.instance, exc_info, None, None, None, False, None, {}) + def test_no_reschedule_on_delete_during_spawn(self): + # instance should not be rescheduled if instance is deleted + # during the build + self.mox.StubOutWithMock(self.compute, '_spawn') + self.mox.StubOutWithMock(self.compute, '_reschedule_or_reraise') + + exc = exception.UnexpectedTaskStateError(expected=task_states.SPAWNING, + actual=task_states.DELETING) + self.compute._spawn(mox.IgnoreArg(), self.instance, mox.IgnoreArg(), + mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), + mox.IgnoreArg()).AndRaise(exc) + + self.mox.ReplayAll() + # test succeeds if mocked method '_reschedule_or_reraise' is not + # called. + self.compute._run_instance(self.context, None, {}, None, None, None, + False, None, self.instance) + + def test_no_reschedule_on_unexpected_task_state(self): + # instance shouldn't be rescheduled if unexpected task state arises. + # the exception should get reraised. + self.mox.StubOutWithMock(self.compute, '_spawn') + self.mox.StubOutWithMock(self.compute, '_reschedule_or_reraise') + + exc = exception.UnexpectedTaskStateError(expected=task_states.SPAWNING, + actual=task_states.SCHEDULING) + self.compute._spawn(mox.IgnoreArg(), self.instance, mox.IgnoreArg(), + mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), + mox.IgnoreArg()).AndRaise(exc) + + self.mox.ReplayAll() + self.assertRaises(exception.UnexpectedTaskStateError, + self.compute._run_instance, self.context, None, {}, None, None, + None, False, None, self.instance) + class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase): """Test logic and exception handling around rescheduling prep resize diff --git a/nova/tests/test_migrations.py b/nova/tests/test_migrations.py index ebccb84df..beb03b0aa 100644 --- a/nova/tests/test_migrations.py +++ b/nova/tests/test_migrations.py @@ -366,36 +366,37 @@ class BaseMigrationTestCase(test.TestCase): self.migration_api.db_version(engine, self.REPOSITORY)) - self.migration_api.upgrade(engine, - self.REPOSITORY, - self.INIT_VERSION + 1) - LOG.debug('latest version is %s' % self.REPOSITORY.latest) + versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1) - for version in xrange(self.INIT_VERSION + 2, - self.REPOSITORY.latest + 1): + for version in versions: # upgrade -> downgrade -> upgrade self._migrate_up(engine, version, with_data=True) if snake_walk: - self._migrate_down(engine, version - 1) - self._migrate_up(engine, version) + downgraded = self._migrate_down( + engine, version - 1, with_data=True) + if downgraded: + self._migrate_up(engine, version) if downgrade: # Now walk it back down to 0 from the latest, testing # the downgrade paths. - for version in reversed( - xrange(self.INIT_VERSION + 2, - self.REPOSITORY.latest + 1)): + for version in reversed(versions): # downgrade -> upgrade -> downgrade - self._migrate_down(engine, version - 1) - if snake_walk: + downgraded = self._migrate_down(engine, version - 1) + + if snake_walk and downgraded: self._migrate_up(engine, version) self._migrate_down(engine, version - 1) - def _migrate_down(self, engine, version): - self.migration_api.downgrade(engine, - self.REPOSITORY, - version) + def _migrate_down(self, engine, version, with_data=False): + try: + self.migration_api.downgrade(engine, self.REPOSITORY, version) + except NotImplementedError: + # NOTE(sirp): some migrations, namely release-level + # migrations, don't support a downgrade. + return False + self.assertEqual(version, self.migration_api.db_version(engine, self.REPOSITORY)) @@ -403,10 +404,13 @@ class BaseMigrationTestCase(test.TestCase): # NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target' # version). So if we have any downgrade checks, they need to be run for # the previous (higher numbered) migration. - post_downgrade = getattr( - self, "_post_downgrade_%03d" % (version + 1), None) - if post_downgrade: - post_downgrade(engine) + if with_data: + post_downgrade = getattr( + self, "_post_downgrade_%03d" % (version + 1), None) + if post_downgrade: + post_downgrade(engine) + + return True def _migrate_up(self, engine, version, with_data=False): """migrate up to a new version of the db. @@ -425,14 +429,10 @@ class BaseMigrationTestCase(test.TestCase): if pre_upgrade: data = pre_upgrade(engine) - self.migration_api.upgrade(engine, - self.REPOSITORY, - version) - self.assertEqual( - version, - self.migration_api.db_version(engine, - self.REPOSITORY)) - + self.migration_api.upgrade(engine, self.REPOSITORY, version) + self.assertEqual(version, + self.migration_api.db_version(engine, + self.REPOSITORY)) if with_data: check = getattr(self, "_check_%03d" % version, None) if check: diff --git a/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/001_init.py b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/001_init.py index 2d899406c..fad83e21b 100644 --- a/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/001_init.py +++ b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/001_init.py @@ -120,4 +120,4 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): - pass + raise NotImplementedError('Downgrade from 001_init is unsupported.') diff --git a/nova/virt/disk/vfs/guestfs.py b/nova/virt/disk/vfs/guestfs.py index 11cf9d098..985858a0d 100644 --- a/nova/virt/disk/vfs/guestfs.py +++ b/nova/virt/disk/vfs/guestfs.py @@ -14,6 +14,7 @@ # License for the specific language governing permissions and limitations # under the License. +from eventlet import tpool import guestfs from nova import exception @@ -93,7 +94,7 @@ class VFSGuestFS(vfs.VFS): def setup(self): LOG.debug(_("Setting up appliance for %(imgfile)s %(imgfmt)s") % {'imgfile': self.imgfile, 'imgfmt': self.imgfmt}) - self.handle = guestfs.GuestFS() + self.handle = tpool.Proxy(guestfs.GuestFS()) try: self.handle.add_drive_opts(self.imgfile, format=self.imgfmt) diff --git a/tools/hacking.py b/tools/hacking.py index dbb4fbe79..03e46d738 100755 --- a/tools/hacking.py +++ b/tools/hacking.py @@ -29,6 +29,7 @@ import re import subprocess import sys import tokenize +import traceback import pep8 @@ -210,6 +211,11 @@ def nova_import_rules(logical_line): _missingImport.add(missing) return True return False + except Exception, exc: + # NOTE(jogo) don't stack trace if unexpected import error, + # log and continue. + traceback.print_exc() + return False return True def is_module(mod): |