summaryrefslogtreecommitdiffstats
path: root/nova/compute
diff options
context:
space:
mode:
authorVishvananda Ishaya <vishvananda@gmail.com>2012-06-01 02:52:57 +0000
committerVishvananda Ishaya <vishvananda@gmail.com>2012-06-06 15:17:44 -0700
commit7c847bc659e7c493cf009adc417be2e884c3c616 (patch)
tree2042f2b65e03a3e6c6358671cd6b57443ffffeff /nova/compute
parentae878fc8b9761d099a4145617e4a48cbeb390623 (diff)
Cleans up power_off and power_on semantics
compute.api changes: * improves state handling for delete/restrore * removes hack to deal with SHUTOFF on start * fixes api tests (volume shouldn't detach on stop) compute.manager changes: * uses power_off/power_on for stop/start virt.libvirt changes: * implements power_off/power_on for libvirt * synchronizes usage of domain.create() * cleans up usage of instance.name * added tests for power_on and power_off * fixes bug 1006950 Change-Id: I91845a643e3f97955e7c81ca57c6ee5aa0a3d295
Diffstat (limited to 'nova/compute')
-rw-r--r--nova/compute/api.py33
-rw-r--r--nova/compute/manager.py51
2 files changed, 32 insertions, 52 deletions
diff --git a/nova/compute/api.py b/nova/compute/api.py
index a0672cfff..08954f7e2 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -967,7 +967,6 @@ class API(base.Base):
if instance['host']:
self.update(context,
instance,
- vm_state=vm_states.SOFT_DELETE,
task_state=task_states.POWERING_OFF,
deleted_at=utils.utcnow())
@@ -1047,15 +1046,18 @@ class API(base.Base):
@check_instance_state(vm_state=[vm_states.SOFT_DELETE])
def restore(self, context, instance):
"""Restore a previously deleted (but not reclaimed) instance."""
- self.update(context,
- instance,
- vm_state=vm_states.ACTIVE,
- task_state=None,
- deleted_at=None)
-
if instance['host']:
- self.update(context, instance, task_state=task_states.POWERING_ON)
+ self.update(context,
+ instance,
+ task_state=task_states.POWERING_ON,
+ deleted_at=None)
self.compute_rpcapi.power_on_instance(context, instance)
+ else:
+ self.update(context,
+ instance,
+ vm_state=vm_states.ACTIVE,
+ task_state=None,
+ deleted_at=None)
@wrap_check_policy
@check_instance_state(vm_state=[vm_states.SOFT_DELETE])
@@ -1074,9 +1076,7 @@ class API(base.Base):
self.update(context,
instance,
- vm_state=vm_states.ACTIVE,
task_state=task_states.STOPPING,
- terminated_at=utils.utcnow(),
progress=0)
self.compute_rpcapi.stop_instance(context, instance, cast=do_cast)
@@ -1085,23 +1085,10 @@ class API(base.Base):
@check_instance_state(vm_state=[vm_states.STOPPED, vm_states.SHUTOFF])
def start(self, context, instance):
"""Start an instance."""
- vm_state = instance["vm_state"]
- instance_uuid = instance["uuid"]
LOG.debug(_("Going to try to start instance"), instance=instance)
- if vm_state == vm_states.SHUTOFF:
- if instance['shutdown_terminate']:
- LOG.warning(_("Instance %(instance_uuid)s is not "
- "stopped. (%(vm_state)s") % locals())
- return
-
- # NOTE(yamahata): nova compute doesn't reap instances
- # which initiated shutdown itself. So reap it here.
- self.stop(context, instance, do_cast=False)
-
self.update(context,
instance,
- vm_state=vm_states.STOPPED,
task_state=task_states.STARTING)
# TODO(yamahata): injected_files isn't supported right now.
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 2776ee230..4735f4a76 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -701,24 +701,10 @@ class ComputeManager(manager.SchedulerDependentManager):
self._run_instance(context, instance_uuid, **kwargs)
do_run_instance()
- @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
- @checks_instance_lock
- @wrap_instance_fault
- def start_instance(self, context, instance_uuid):
- @utils.synchronized(instance_uuid)
- def do_start_instance():
- """Starting an instance on this host."""
- # TODO(yamahata): injected_files isn't supported.
- # Anyway OSAPI doesn't support stop/start yet
- # FIXME(vish): I've kept the files during stop instance, but
- # I think start will fail due to the files still
- self._run_instance(context, instance_uuid)
- do_start_instance()
-
- def _shutdown_instance(self, context, instance, action_str):
+ def _shutdown_instance(self, context, instance):
"""Shutdown an instance on this host."""
context = context.elevated()
- LOG.audit(_('%(action_str)s instance') % {'action_str': action_str},
+ LOG.audit(_('%(action_str)s instance') % {'action_str': 'Terminating'},
context=context, instance=instance)
self._notify_about_instance_usage(context, instance, "shutdown.start")
@@ -765,7 +751,7 @@ class ComputeManager(manager.SchedulerDependentManager):
"""Delete an instance on this host."""
instance_uuid = instance['uuid']
self._notify_about_instance_usage(context, instance, "delete.start")
- self._shutdown_instance(context, instance, 'Terminating')
+ self._shutdown_instance(context, instance)
self._cleanup_volumes(context, instance_uuid)
instance = self._instance_update(context,
instance_uuid,
@@ -804,21 +790,26 @@ class ComputeManager(manager.SchedulerDependentManager):
@checks_instance_lock
@wrap_instance_fault
def stop_instance(self, context, instance_uuid):
- """Stopping an instance on this host."""
- @utils.synchronized(instance_uuid)
- def do_stop_instance():
- instance = self.db.instance_get_by_uuid(context, instance_uuid)
- self._shutdown_instance(context, instance, 'Stopping')
- self._instance_update(context,
- instance_uuid,
- vm_state=vm_states.STOPPED,
- task_state=None)
- do_stop_instance()
+ """Stopping an instance on this host.
+
+ Alias for power_off_instance for compatibility"""
+ self.power_off_instance(context, instance_uuid,
+ final_state=vm_states.STOPPED)
+
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
+ @checks_instance_lock
+ @wrap_instance_fault
+ def start_instance(self, context, instance_uuid):
+ """Starting an instance on this host.
+
+ Alias for power_on_instance for compatibility"""
+ self.power_on_instance(context, instance_uuid)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
@wrap_instance_fault
- def power_off_instance(self, context, instance_uuid):
+ def power_off_instance(self, context, instance_uuid,
+ final_state=vm_states.SOFT_DELETE):
"""Power off an instance on this host."""
instance = self.db.instance_get_by_uuid(context, instance_uuid)
self._notify_about_instance_usage(context, instance, "power_off.start")
@@ -827,6 +818,7 @@ class ComputeManager(manager.SchedulerDependentManager):
self._instance_update(context,
instance_uuid,
power_state=current_power_state,
+ vm_state=final_state,
task_state=None)
self._notify_about_instance_usage(context, instance, "power_off.end")
@@ -842,6 +834,7 @@ class ComputeManager(manager.SchedulerDependentManager):
self._instance_update(context,
instance_uuid,
power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
task_state=None)
self._notify_about_instance_usage(context, instance, "power_on.end")
@@ -2579,7 +2572,7 @@ class ComputeManager(manager.SchedulerDependentManager):
"'%(name)s' which is marked as "
"DELETED but still present on host."),
locals(), instance=instance)
- self._shutdown_instance(context, instance, 'Terminating')
+ self._shutdown_instance(context, instance)
self._cleanup_volumes(context, instance['uuid'])
else:
raise Exception(_("Unrecognized value '%(action)s'"