diff options
author | gtt116 <gtt116@126.com> | 2013-03-13 03:43:16 +0000 |
---|---|---|
committer | gtt116 <gtt116@126.com> | 2013-03-13 12:23:29 +0000 |
commit | 22e8e37e45c69136b252a494d8398b0b98e35b9e (patch) | |
tree | d012b32722bd84a234fdcd82fc28e6815a07cad6 | |
parent | 99155de44615dbb5718bf7d20bf749ef49a4b507 (diff) | |
download | nova-22e8e37e45c69136b252a494d8398b0b98e35b9e.tar.gz nova-22e8e37e45c69136b252a494d8398b0b98e35b9e.tar.xz nova-22e8e37e45c69136b252a494d8398b0b98e35b9e.zip |
Do cleaning up resource before rescheduling.
Fix bug: #1153964
If some exception raised when spawn instance, the instance will be
rescheduled to other host. But just deallocate network before
rescheduling is not enough, like security group filters and volumes
will stay on old host.
So shutdown instance and clean up volumes instead of just deallocate
network before rescheduling.
Change-Id: Ieb6220ae66f0b97a92c50f6998f456968b516fcb
-rwxr-xr-x | nova/compute/manager.py | 15 | ||||
-rw-r--r-- | nova/tests/compute/test_compute.py | 42 |
2 files changed, 37 insertions, 20 deletions
diff --git a/nova/compute/manager.py b/nova/compute/manager.py index e19059542..4df4a7c92 100755 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -810,7 +810,7 @@ class ComputeManager(manager.SchedulerDependentManager): # try to re-schedule instance: self._reschedule_or_reraise(context, instance, exc_info, requested_networks, admin_password, injected_files, - is_first_time, request_spec, filter_properties) + is_first_time, request_spec, filter_properties, bdms) else: # Spawn success: self._notify_about_instance_usage(context, instance, @@ -828,7 +828,7 @@ class ComputeManager(manager.SchedulerDependentManager): def _reschedule_or_reraise(self, context, instance, exc_info, requested_networks, admin_password, injected_files, is_first_time, - request_spec, filter_properties): + request_spec, filter_properties, bdms=None): """Try to re-schedule the build or re-raise the original build error to error out the instance. """ @@ -839,9 +839,16 @@ class ComputeManager(manager.SchedulerDependentManager): instance, exc_info[1], exc_info=exc_info) try: - self._deallocate_network(context, instance) + LOG.debug(_("Clean up resource before rescheduling."), + instance=instance) + if bdms is None: + capi = self.conductor_api + bdms = capi.block_device_mapping_get_all_by_instance(context, + instance) + self._shutdown_instance(context, instance, bdms) + self._cleanup_volumes(context, instance['uuid'], bdms) except Exception: - # do not attempt retry if network de-allocation failed: + # do not attempt retry if clean up failed: with excutils.save_and_reraise_exception(): self._log_original_error(exc_info, instance_uuid) diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index f8c0e86f2..bfad3cea7 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -7290,18 +7290,18 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase): self.compute._spawn(mox.IgnoreArg(), self.instance, None, None, None, False, None).AndRaise(test.TestingException("BuildError")) self.compute._reschedule_or_reraise(mox.IgnoreArg(), self.instance, - mox.IgnoreArg(), None, None, None, False, None, {}) + mox.IgnoreArg(), None, None, None, False, None, {}, []) self.mox.ReplayAll() self.compute._run_instance(self.context, None, {}, None, None, None, False, None, self.instance) - def test_deallocate_network_fail(self): - """Test de-allocation of network failing before re-scheduling logic - can even run. + def test_shutdown_instance_fail(self): + """Test shutdown instance failing before re-scheduling logic can even + run. """ instance_uuid = self.instance['uuid'] - self.mox.StubOutWithMock(self.compute, '_deallocate_network') + self.mox.StubOutWithMock(self.compute, '_shutdown_instance') try: raise test.TestingException("Original") @@ -7311,8 +7311,8 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase): compute_utils.add_instance_fault_from_exc(self.context, self.compute.conductor_api, self.instance, exc_info[0], exc_info=exc_info) - self.compute._deallocate_network(self.context, - self.instance).AndRaise(InnerTestingException("Error")) + self.compute._shutdown_instance(self.context, self.instance, + mox.IgnoreArg()).AndRaise(InnerTestingException("Error")) self.compute._log_original_error(exc_info, instance_uuid) self.mox.ReplayAll() @@ -7327,11 +7327,14 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase): # Test handling of exception from _reschedule. instance_uuid = self.instance['uuid'] method_args = (None, None, None, None, False, {}) - self.mox.StubOutWithMock(self.compute, '_deallocate_network') + self.mox.StubOutWithMock(self.compute, '_shutdown_instance') + self.mox.StubOutWithMock(self.compute, '_cleanup_volumes') self.mox.StubOutWithMock(self.compute, '_reschedule') - self.compute._deallocate_network(self.context, - self.instance) + self.compute._shutdown_instance(self.context, self.instance, + mox.IgnoreArg()) + self.compute._cleanup_volumes(self.context, instance_uuid, + mox.IgnoreArg()) self.compute._reschedule(self.context, None, instance_uuid, {}, self.compute.scheduler_rpcapi.run_instance, method_args, task_states.SCHEDULING).AndRaise( @@ -7352,7 +7355,8 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase): # Test not-rescheduling, but no nested exception. instance_uuid = self.instance['uuid'] method_args = (None, None, None, None, False, {}) - self.mox.StubOutWithMock(self.compute, '_deallocate_network') + self.mox.StubOutWithMock(self.compute, '_shutdown_instance') + self.mox.StubOutWithMock(self.compute, '_cleanup_volumes') self.mox.StubOutWithMock(self.compute, '_reschedule') try: @@ -7362,8 +7366,11 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase): compute_utils.add_instance_fault_from_exc(self.context, self.compute.conductor_api, self.instance, exc_info[0], exc_info=exc_info) - self.compute._deallocate_network(self.context, - self.instance) + + self.compute._shutdown_instance(self.context, self.instance, + mox.IgnoreArg()) + self.compute._cleanup_volumes(self.context, instance_uuid, + mox.IgnoreArg()) self.compute._reschedule(self.context, None, {}, instance_uuid, self.compute.scheduler_rpcapi.run_instance, method_args, task_states.SCHEDULING, exc_info).AndReturn(False) @@ -7380,7 +7387,8 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase): # Test behavior when re-scheduling happens. instance_uuid = self.instance['uuid'] method_args = (None, None, None, None, False, {}) - self.mox.StubOutWithMock(self.compute, '_deallocate_network') + self.mox.StubOutWithMock(self.compute, '_shutdown_instance') + self.mox.StubOutWithMock(self.compute, '_cleanup_volumes') self.mox.StubOutWithMock(self.compute, '_reschedule') try: @@ -7391,8 +7399,10 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase): compute_utils.add_instance_fault_from_exc(self.context, self.compute.conductor_api, self.instance, exc_info[0], exc_info=exc_info) - self.compute._deallocate_network(self.context, - self.instance) + self.compute._shutdown_instance(self.context, self.instance, + mox.IgnoreArg()) + self.compute._cleanup_volumes(self.context, instance_uuid, + mox.IgnoreArg()) self.compute._reschedule(self.context, None, {}, instance_uuid, self.compute.scheduler_rpcapi.run_instance, method_args, task_states.SCHEDULING, exc_info).AndReturn( |