From 7c847bc659e7c493cf009adc417be2e884c3c616 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 1 Jun 2012 02:52:57 +0000 Subject: Cleans up power_off and power_on semantics compute.api changes: * improves state handling for delete/restrore * removes hack to deal with SHUTOFF on start * fixes api tests (volume shouldn't detach on stop) compute.manager changes: * uses power_off/power_on for stop/start virt.libvirt changes: * implements power_off/power_on for libvirt * synchronizes usage of domain.create() * cleans up usage of instance.name * added tests for power_on and power_off * fixes bug 1006950 Change-Id: I91845a643e3f97955e7c81ca57c6ee5aa0a3d295 --- nova/compute/api.py | 33 ++++++++---------------- nova/compute/manager.py | 51 ++++++++++++++++---------------------- nova/tests/api/ec2/test_cloud.py | 10 ++++---- nova/tests/compute/test_compute.py | 40 +++++++++++++++--------------- nova/tests/test_virt_drivers.py | 16 ++++++++++++ nova/virt/fake.py | 6 +++++ nova/virt/libvirt/connection.py | 31 ++++++++++++++++------- 7 files changed, 101 insertions(+), 86 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index a0672cfff..08954f7e2 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -967,7 +967,6 @@ class API(base.Base): if instance['host']: self.update(context, instance, - vm_state=vm_states.SOFT_DELETE, task_state=task_states.POWERING_OFF, deleted_at=utils.utcnow()) @@ -1047,15 +1046,18 @@ class API(base.Base): @check_instance_state(vm_state=[vm_states.SOFT_DELETE]) def restore(self, context, instance): """Restore a previously deleted (but not reclaimed) instance.""" - self.update(context, - instance, - vm_state=vm_states.ACTIVE, - task_state=None, - deleted_at=None) - if instance['host']: - self.update(context, instance, task_state=task_states.POWERING_ON) + self.update(context, + instance, + task_state=task_states.POWERING_ON, + deleted_at=None) self.compute_rpcapi.power_on_instance(context, instance) + else: + self.update(context, + instance, + vm_state=vm_states.ACTIVE, + task_state=None, + deleted_at=None) @wrap_check_policy @check_instance_state(vm_state=[vm_states.SOFT_DELETE]) @@ -1074,9 +1076,7 @@ class API(base.Base): self.update(context, instance, - vm_state=vm_states.ACTIVE, task_state=task_states.STOPPING, - terminated_at=utils.utcnow(), progress=0) self.compute_rpcapi.stop_instance(context, instance, cast=do_cast) @@ -1085,23 +1085,10 @@ class API(base.Base): @check_instance_state(vm_state=[vm_states.STOPPED, vm_states.SHUTOFF]) def start(self, context, instance): """Start an instance.""" - vm_state = instance["vm_state"] - instance_uuid = instance["uuid"] LOG.debug(_("Going to try to start instance"), instance=instance) - if vm_state == vm_states.SHUTOFF: - if instance['shutdown_terminate']: - LOG.warning(_("Instance %(instance_uuid)s is not " - "stopped. (%(vm_state)s") % locals()) - return - - # NOTE(yamahata): nova compute doesn't reap instances - # which initiated shutdown itself. So reap it here. - self.stop(context, instance, do_cast=False) - self.update(context, instance, - vm_state=vm_states.STOPPED, task_state=task_states.STARTING) # TODO(yamahata): injected_files isn't supported right now. diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 2776ee230..4735f4a76 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -701,24 +701,10 @@ class ComputeManager(manager.SchedulerDependentManager): self._run_instance(context, instance_uuid, **kwargs) do_run_instance() - @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) - @checks_instance_lock - @wrap_instance_fault - def start_instance(self, context, instance_uuid): - @utils.synchronized(instance_uuid) - def do_start_instance(): - """Starting an instance on this host.""" - # TODO(yamahata): injected_files isn't supported. - # Anyway OSAPI doesn't support stop/start yet - # FIXME(vish): I've kept the files during stop instance, but - # I think start will fail due to the files still - self._run_instance(context, instance_uuid) - do_start_instance() - - def _shutdown_instance(self, context, instance, action_str): + def _shutdown_instance(self, context, instance): """Shutdown an instance on this host.""" context = context.elevated() - LOG.audit(_('%(action_str)s instance') % {'action_str': action_str}, + LOG.audit(_('%(action_str)s instance') % {'action_str': 'Terminating'}, context=context, instance=instance) self._notify_about_instance_usage(context, instance, "shutdown.start") @@ -765,7 +751,7 @@ class ComputeManager(manager.SchedulerDependentManager): """Delete an instance on this host.""" instance_uuid = instance['uuid'] self._notify_about_instance_usage(context, instance, "delete.start") - self._shutdown_instance(context, instance, 'Terminating') + self._shutdown_instance(context, instance) self._cleanup_volumes(context, instance_uuid) instance = self._instance_update(context, instance_uuid, @@ -804,21 +790,26 @@ class ComputeManager(manager.SchedulerDependentManager): @checks_instance_lock @wrap_instance_fault def stop_instance(self, context, instance_uuid): - """Stopping an instance on this host.""" - @utils.synchronized(instance_uuid) - def do_stop_instance(): - instance = self.db.instance_get_by_uuid(context, instance_uuid) - self._shutdown_instance(context, instance, 'Stopping') - self._instance_update(context, - instance_uuid, - vm_state=vm_states.STOPPED, - task_state=None) - do_stop_instance() + """Stopping an instance on this host. + + Alias for power_off_instance for compatibility""" + self.power_off_instance(context, instance_uuid, + final_state=vm_states.STOPPED) + + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) + @checks_instance_lock + @wrap_instance_fault + def start_instance(self, context, instance_uuid): + """Starting an instance on this host. + + Alias for power_on_instance for compatibility""" + self.power_on_instance(context, instance_uuid) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock @wrap_instance_fault - def power_off_instance(self, context, instance_uuid): + def power_off_instance(self, context, instance_uuid, + final_state=vm_states.SOFT_DELETE): """Power off an instance on this host.""" instance = self.db.instance_get_by_uuid(context, instance_uuid) self._notify_about_instance_usage(context, instance, "power_off.start") @@ -827,6 +818,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_uuid, power_state=current_power_state, + vm_state=final_state, task_state=None) self._notify_about_instance_usage(context, instance, "power_off.end") @@ -842,6 +834,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_uuid, power_state=current_power_state, + vm_state=vm_states.ACTIVE, task_state=None) self._notify_about_instance_usage(context, instance, "power_on.end") @@ -2579,7 +2572,7 @@ class ComputeManager(manager.SchedulerDependentManager): "'%(name)s' which is marked as " "DELETED but still present on host."), locals(), instance=instance) - self._shutdown_instance(context, instance, 'Terminating') + self._shutdown_instance(context, instance) self._cleanup_volumes(context, instance['uuid']) else: raise Exception(_("Unrecognized value '%(action)s'" diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py index 58cba6d15..31f6abf5f 100644 --- a/nova/tests/api/ec2/test_cloud.py +++ b/nova/tests/api/ec2/test_cloud.py @@ -1963,9 +1963,10 @@ class CloudTestCase(test.TestCase): self.assertTrue(result) vol = db.volume_get(self.context, vol1['id']) - self._assert_volume_detached(vol) + self._assert_volume_attached(vol, instance_uuid, '/dev/vdb') + vol = db.volume_get(self.context, vol2['id']) - self._assert_volume_detached(vol) + self._assert_volume_attached(vol, instance_uuid, '/dev/vdc') self.cloud.start_instances(self.context, [ec2_instance_id]) vols = db.volume_get_all_by_instance_uuid(self.context, instance_uuid) @@ -2034,9 +2035,8 @@ class CloudTestCase(test.TestCase): result = self.cloud.stop_instances(self.context, [ec2_instance_id]) self.assertTrue(result) - for vol_id in (vol1['id'], vol2['id']): - vol = db.volume_get(self.context, vol_id) - self._assert_volume_detached(vol) + vol = db.volume_get(self.context, vol2['id']) + self._assert_volume_attached(vol, instance_uuid, '/dev/vdc') self.cloud.start_instances(self.context, [ec2_instance_id]) vols = db.volume_get_all_by_instance_uuid(self.context, instance_uuid) diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index 6f03e9c84..03fee4703 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -445,7 +445,7 @@ class ComputeTestCase(BaseTestCase): def fake_driver_power_on(self, instance): called['power_on'] = True - self.stubs.Set(nova.virt.driver.ComputeDriver, 'power_on', + self.stubs.Set(nova.virt.fake.FakeDriver, 'power_on', fake_driver_power_on) instance = self._create_fake_instance() @@ -463,7 +463,7 @@ class ComputeTestCase(BaseTestCase): def fake_driver_power_off(self, instance): called['power_off'] = True - self.stubs.Set(nova.virt.driver.ComputeDriver, 'power_off', + self.stubs.Set(nova.virt.fake.FakeDriver, 'power_off', fake_driver_power_off) instance = self._create_fake_instance() @@ -1700,8 +1700,8 @@ class ComputeTestCase(BaseTestCase): ).AndReturn([instance]) self.mox.StubOutWithMock(self.compute, "_shutdown_instance") - self.compute._shutdown_instance(admin_context, instance, - 'Terminating').AndReturn(None) + self.compute._shutdown_instance(admin_context, + instance).AndReturn(None) self.mox.StubOutWithMock(self.compute, "_cleanup_volumes") self.compute._cleanup_volumes(admin_context, @@ -2256,13 +2256,8 @@ class ComputeAPITestCase(BaseTestCase): check_state(instance_uuid, power_state.NOSTATE, vm_states.SHUTOFF, None) - start_check_state(instance_uuid, - power_state.NOSTATE, vm_states.SHUTOFF, None) - - db.instance_update(self.context, instance_uuid, - {'shutdown_terminate': False}) start_check_state(instance_uuid, power_state.NOSTATE, - vm_states.STOPPED, task_states.STARTING) + vm_states.SHUTOFF, task_states.STARTING) db.instance_destroy(self.context, instance['id']) @@ -2338,6 +2333,10 @@ class ComputeAPITestCase(BaseTestCase): instance = db.instance_get_by_uuid(self.context, instance_uuid) self.assertEqual(instance['task_state'], task_states.POWERING_OFF) + # set the state that the instance gets when soft_delete finishes + instance = db.instance_update(self.context, instance['uuid'], + {'vm_state': vm_states.SOFT_DELETE}) + self.compute_api.force_delete(self.context, instance) instance = db.instance_get_by_uuid(self.context, instance_uuid) @@ -2402,9 +2401,8 @@ class ComputeAPITestCase(BaseTestCase): self.compute.pause_instance(self.context, instance_uuid) # set the state that the instance gets when pause finishes - db.instance_update(self.context, instance['uuid'], + instance = db.instance_update(self.context, instance['uuid'], {'vm_state': vm_states.PAUSED}) - instance = db.instance_get_by_uuid(self.context, instance['uuid']) self.compute_api.unpause(self.context, instance) @@ -2425,6 +2423,10 @@ class ComputeAPITestCase(BaseTestCase): instance = db.instance_get_by_uuid(self.context, instance_uuid) self.assertEqual(instance['task_state'], task_states.POWERING_OFF) + # set the state that the instance gets when soft_delete finishes + instance = db.instance_update(self.context, instance['uuid'], + {'vm_state': vm_states.SOFT_DELETE}) + self.compute_api.restore(self.context, instance) instance = db.instance_get_by_uuid(self.context, instance_uuid) @@ -2766,10 +2768,9 @@ class ComputeAPITestCase(BaseTestCase): {'instance_uuid': instance['uuid'], 'status': 'finished'}) # set the state that the instance gets when resize finishes - db.instance_update(self.context, instance['uuid'], - {'task_state': task_states.RESIZE_VERIFY, - 'vm_state': vm_states.ACTIVE}) - instance = db.instance_get_by_uuid(context, instance['uuid']) + instance = db.instance_update(self.context, instance['uuid'], + {'task_state': task_states.RESIZE_VERIFY, + 'vm_state': vm_states.ACTIVE}) self.compute_api.confirm_resize(context, instance) self.compute.terminate_instance(context, instance['uuid']) @@ -2787,10 +2788,9 @@ class ComputeAPITestCase(BaseTestCase): {'instance_uuid': instance['uuid'], 'status': 'finished'}) # set the state that the instance gets when resize finishes - db.instance_update(self.context, instance['uuid'], - {'task_state': task_states.RESIZE_VERIFY, - 'vm_state': vm_states.ACTIVE}) - instance = db.instance_get_by_uuid(context, instance['uuid']) + instance = db.instance_update(self.context, instance['uuid'], + {'task_state': task_states.RESIZE_VERIFY, + 'vm_state': vm_states.ACTIVE}) self.compute_api.revert_resize(context, instance) diff --git a/nova/tests/test_virt_drivers.py b/nova/tests/test_virt_drivers.py index d4be2b463..774450e02 100644 --- a/nova/tests/test_virt_drivers.py +++ b/nova/tests/test_virt_drivers.py @@ -176,6 +176,22 @@ class _VirtDriverTestCase(test.TestCase): self.ctxt, instance_ref, 'dest_host', instance_type_ref, network_info) + @catch_notimplementederror + def test_power_off(self): + instance_ref, network_info = self._get_running_instance() + self.connection.power_off(instance_ref) + + @catch_notimplementederror + def test_test_power_on_running(self): + instance_ref, network_info = self._get_running_instance() + self.connection.power_on(instance_ref) + + @catch_notimplementederror + def test_test_power_on_powered_off(self): + instance_ref, network_info = self._get_running_instance() + self.connection.power_off(instance_ref) + self.connection.power_on(instance_ref) + @catch_notimplementederror def test_pause(self): instance_ref, network_info = self._get_running_instance() diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 316a7769a..0e7a4c1e3 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -139,6 +139,12 @@ class FakeDriver(driver.ComputeDriver): def finish_revert_migration(self, instance, network_info): pass + def power_off(self, instance): + pass + + def power_on(self, instance): + pass + def pause(self, instance): pass diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 34729aeed..5c2077897 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -718,7 +718,7 @@ class LibvirtDriver(driver.ComputeDriver): finally: libvirt_utils.delete_snapshot(disk_path, snapshot_name) if state == power_state.RUNNING: - virt_dom.create() + self._create_domain(domain=virt_dom) # Upload that image to the image service with libvirt_utils.file_open(out_path) as image_file: @@ -770,7 +770,7 @@ class LibvirtDriver(driver.ComputeDriver): power_state.CRASHED]: LOG.info(_("Instance shutdown successfully."), instance=instance) - dom.create() + self._create_domain(domain=dom) timer = utils.LoopingCall(self._wait_for_running, instance) return timer.start(interval=0.5) greenthread.sleep(1) @@ -808,26 +808,39 @@ class LibvirtDriver(driver.ComputeDriver): @exception.wrap_exception() def pause(self, instance): """Pause VM instance""" - dom = self._lookup_by_name(instance.name) + dom = self._lookup_by_name(instance['name']) dom.suspend() @exception.wrap_exception() def unpause(self, instance): """Unpause paused VM instance""" - dom = self._lookup_by_name(instance.name) + dom = self._lookup_by_name(instance['name']) dom.resume() + @exception.wrap_exception() + def power_off(self, instance): + """Power off the specified instance""" + self._destroy(instance) + + @exception.wrap_exception() + def power_on(self, instance): + """Power on the specified instance""" + dom = self._lookup_by_name(instance['name']) + self._create_domain(domain=dom) + timer = utils.LoopingCall(self._wait_for_running, instance) + return timer.start(interval=0.5) + @exception.wrap_exception() def suspend(self, instance): """Suspend the specified instance""" - dom = self._lookup_by_name(instance.name) + dom = self._lookup_by_name(instance['name']) dom.managedSave(0) @exception.wrap_exception() def resume(self, instance): """resume the specified instance""" - dom = self._lookup_by_name(instance.name) - dom.create() + dom = self._lookup_by_name(instance['name']) + self._create_domain(domain=dom) @exception.wrap_exception() def resume_state_on_host_boot(self, context, instance, network_info): @@ -2253,7 +2266,7 @@ class LibvirtDriver(driver.ComputeDriver): post_method(ctxt, instance_ref, dest, block_migration) timer.f = wait_for_live_migration - timer.start(interval=0.5) + return timer.start(interval=0.5) def pre_live_migration(self, block_device_info): """Preparation live migration. @@ -2505,7 +2518,7 @@ class LibvirtDriver(driver.ComputeDriver): disk_info_text = self.get_instance_disk_info(instance['name']) disk_info = jsonutils.loads(disk_info_text) - self._destroy(instance) + self.power_off(instance) # copy disks to destination # if disk type is qcow2, convert to raw then send to dest. -- cgit