summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYun Mao <yunmao@gmail.com>2012-06-05 14:55:34 -0400
committerYun Mao <yunmao@gmail.com>2012-06-21 15:34:53 -0400
commit129b87e17d3333aeaa9e855a70dea51e6581ea63 (patch)
tree052999c17d4402237e57218dd8e708d6a1723836
parent1ecf8311f817cf1c5b3b6f0efe7c022da1950187 (diff)
downloadnova-129b87e17d3333aeaa9e855a70dea51e6581ea63.tar.gz
nova-129b87e17d3333aeaa9e855a70dea51e6581ea63.tar.xz
nova-129b87e17d3333aeaa9e855a70dea51e6581ea63.zip
vm state and task state management
partially implements bp task-management fixes bug 997867 also see http://wiki.openstack.org/VMState Refactored the following API/state: * rebuild * migrate * resize * start * stop * delete * soft delete * rework sync_power_state in compute/manager. fix broken tests, add transition diagram in dot Change-Id: I3c5a97508a6dad7175fba12828bd3fa6ef1e50ee
-rw-r--r--doc/source/devref/vmstates.rst137
-rw-r--r--nova/api/ec2/cloud.py25
-rw-r--r--nova/api/ec2/inst_state.py2
-rw-r--r--nova/api/openstack/common.py27
-rw-r--r--nova/api/openstack/compute/contrib/floating_ips.py2
-rw-r--r--nova/compute/api.py60
-rw-r--r--nova/compute/manager.py182
-rw-r--r--nova/compute/task_states.py44
-rw-r--r--nova/compute/vm_states.py35
-rw-r--r--nova/db/sqlalchemy/api.py7
-rw-r--r--nova/scheduler/driver.py4
-rw-r--r--nova/tests/api/ec2/test_cloud.py2
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_admin_actions.py3
-rw-r--r--nova/tests/api/openstack/compute/test_server_actions.py5
-rw-r--r--nova/tests/api/openstack/compute/test_servers.py24
-rw-r--r--nova/tests/compute/test_compute.py40
-rw-r--r--nova/tests/scheduler/test_scheduler.py5
-rw-r--r--nova/tests/test_imagecache.py12
-rw-r--r--nova/virt/libvirt/imagecache.py9
-rw-r--r--nova/virt/xenapi/host.py3
20 files changed, 423 insertions, 205 deletions
diff --git a/doc/source/devref/vmstates.rst b/doc/source/devref/vmstates.rst
index 6b9d0c705..9733b44a9 100644
--- a/doc/source/devref/vmstates.rst
+++ b/doc/source/devref/vmstates.rst
@@ -8,6 +8,143 @@ Preconditions for commands
The following diagrams show the required virtual machine (VM) states and
task states for various commands issued by the user:
+.. graphviz::
+
+ digraph states {
+ node [fontsize=10 fontname="Monospace"]
+ /* states */
+ building [label="BUILDING"]
+
+ active [label="ACTIVE"]
+ paused [label="PAUSED"]
+ suspended [label="SUSPENDED"]
+ stopped [label="STOPPED"]
+ rescued [label="RESCUED"]
+ resized [label="RESIZED"]
+ soft_deleted [label="SOFT_DELETED"]
+ deleted [label="DELETED"]
+ error [label="ERROR"]
+
+ /* apis */
+ create [shape="rectangle"]
+ create -> active
+ create -> error
+ building -> create
+
+ delete [shape="rectangle"]
+ delete -> deleted
+ building -> delete
+ paused -> delete
+ suspended -> delete
+ stopped -> delete
+ rescued -> delete
+ soft_deleted -> delete
+ error -> delete
+
+ soft_delete [shape="rectangle"]
+ soft_delete -> soft_deleted
+ soft_delete -> error
+ active -> soft_delete
+ stopped -> soft_delete
+
+ restore [shape="rectangle"]
+ restore -> active
+ restore -> error
+ soft_deleted -> restore
+
+ pause [shape="rectangle"]
+ pause -> paused
+ pause -> error
+ active -> pause
+
+ unpause [shape="rectangle"]
+ unpause -> active
+ unpause -> error
+ paused -> unpause
+
+ suspend [shape="rectangle"]
+ suspend -> suspended
+ suspend -> error
+ active -> suspend
+
+ resume [shape="rectangle"]
+ resume -> active
+ resume -> error
+ suspended -> resume
+
+ start [shape="rectangle"]
+ start -> active
+ start -> error
+ stopped -> start
+
+ stop [shape="rectangle"]
+ stop -> stopped
+ stop -> error
+ active -> stop
+
+ rescue [shape="rectangle"]
+ rescue -> rescued
+ rescue -> error
+ active -> rescue
+ stopped -> rescue
+
+ unrescue [shape="rectangle"]
+ unrescue -> active
+ rescued -> unrescue
+
+ resize [shape="rectangle"]
+ resize -> resized
+ resize -> error
+ active -> resize
+ stopped -> resize
+
+ confirm_resize [shape="rectangle"]
+ confirm_resize -> active
+ confirm_resize -> error
+ resized -> confirm_resize
+ confirm_resize [shape="rectangle"]
+
+ revert_resize -> active
+ revert_resize -> error
+ resized -> revert_resize
+
+ snapshot [shape="rectangle"]
+ snapshot -> active
+ snapshot -> stopped
+ snapshot -> error
+ active -> snapshot
+ stopped -> snapshot
+
+ backup [shape="rectangle"]
+ backup -> active
+ backup -> stopped
+ backup -> error
+ active -> backup
+ stopped -> backup
+
+ rebuild [shape="rectangle"]
+ rebuild -> active
+ rebuild -> error
+ active -> rebuild
+ stopped -> rebuild
+
+ set_admin_password [shape="rectangle"]
+ set_admin_password -> active
+ set_admin_password -> error
+ active -> set_admin_password
+
+ reboot [shape="rectangle"]
+ reboot -> active
+ reboot -> error
+ active -> reboot
+ stopped -> reboot
+ rescued -> reboot
+
+ live_migrate [shape="rectangle"]
+ live_migrate -> active
+ live_migrate -> error
+ active -> live_migrate
+ }
.. image:: /images/PowerStates1.png
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index cfbb0894f..ac569652d 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -71,26 +71,22 @@ _STATE_DESCRIPTION_MAP = {
None: inst_state.PENDING,
vm_states.ACTIVE: inst_state.RUNNING,
vm_states.BUILDING: inst_state.PENDING,
- vm_states.REBUILDING: inst_state.PENDING,
vm_states.DELETED: inst_state.TERMINATED,
- vm_states.SOFT_DELETE: inst_state.TERMINATED,
+ vm_states.SOFT_DELETED: inst_state.TERMINATED,
vm_states.STOPPED: inst_state.STOPPED,
- vm_states.SHUTOFF: inst_state.SHUTOFF,
- vm_states.MIGRATING: inst_state.MIGRATE,
- vm_states.RESIZING: inst_state.RESIZE,
vm_states.PAUSED: inst_state.PAUSE,
vm_states.SUSPENDED: inst_state.SUSPEND,
vm_states.RESCUED: inst_state.RESCUE,
+ vm_states.RESIZED: inst_state.RESIZE,
}
-def _state_description(vm_state, shutdown_terminate):
+def _state_description(vm_state, _shutdown_terminate):
"""Map the vm state to the server status string"""
- if (vm_state == vm_states.SHUTOFF and
- not shutdown_terminate):
- name = inst_state.STOPPED
- else:
- name = _STATE_DESCRIPTION_MAP.get(vm_state, vm_state)
+ # Note(maoy): We do not provide EC2 compatibility
+ # in shutdown_terminate flag behavior. So we ignore
+ # it here.
+ name = _STATE_DESCRIPTION_MAP.get(vm_state, vm_state)
return {'code': inst_state.name_to_code(name),
'name': name}
@@ -1138,7 +1134,7 @@ class CloudController(object):
except exception.NoFloatingIpInterface:
msg = _('l3driver call to add floating ip failed.')
raise exception.EC2APIError(msg)
- except:
+ except Exception:
msg = _('Error, unable to associate floating ip.')
raise exception.EC2APIError(msg)
@@ -1453,11 +1449,10 @@ class CloudController(object):
vm_state = instance['vm_state']
# if the instance is in subtle state, refuse to proceed.
- if vm_state not in (vm_states.ACTIVE, vm_states.SHUTOFF,
- vm_states.STOPPED):
+ if vm_state not in (vm_states.ACTIVE, vm_states.STOPPED):
raise exception.InstanceNotRunning(instance_id=ec2_instance_id)
- if vm_state in (vm_states.ACTIVE, vm_states.SHUTOFF):
+ if vm_state == vm_states.ACTIVE:
restart_instance = True
self.compute_api.stop(context, instance)
diff --git a/nova/api/ec2/inst_state.py b/nova/api/ec2/inst_state.py
index 68d18c8ad..1451bbfba 100644
--- a/nova/api/ec2/inst_state.py
+++ b/nova/api/ec2/inst_state.py
@@ -30,7 +30,6 @@ STOPPING = 'stopping'
STOPPED = 'stopped'
# non-ec2 value
-SHUTOFF = 'shutoff'
MIGRATE = 'migrate'
RESIZE = 'resize'
PAUSE = 'pause'
@@ -47,7 +46,6 @@ _NAME_TO_CODE = {
STOPPED: STOPPED_CODE,
# approximation
- SHUTOFF: TERMINATED_CODE,
MIGRATE: RUNNING_CODE,
RESIZE: RUNNING_CODE,
PAUSE: STOPPED_CODE,
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index 93b5a36ab..eacd9b8cb 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -48,25 +48,26 @@ _STATE_MAP = {
task_states.REBOOTING: 'REBOOT',
task_states.REBOOTING_HARD: 'HARD_REBOOT',
task_states.UPDATING_PASSWORD: 'PASSWORD',
- task_states.RESIZE_VERIFY: 'VERIFY_RESIZE',
+ task_states.REBUILDING: 'REBUILD',
+ task_states.REBUILD_BLOCK_DEVICE_MAPPING: 'REBUILD',
+ task_states.REBUILD_SPAWNING: 'REBUILD',
+ task_states.MIGRATING: 'MIGRATING',
+ task_states.RESIZE_PREP: 'RESIZE',
+ task_states.RESIZE_MIGRATING: 'RESIZE',
+ task_states.RESIZE_MIGRATED: 'RESIZE',
+ task_states.RESIZE_FINISH: 'RESIZE',
},
vm_states.BUILDING: {
'default': 'BUILD',
},
- vm_states.REBUILDING: {
- 'default': 'REBUILD',
- },
vm_states.STOPPED: {
'default': 'STOPPED',
},
- vm_states.SHUTOFF: {
- 'default': 'SHUTOFF',
- },
- vm_states.MIGRATING: {
- 'default': 'MIGRATING',
- },
- vm_states.RESIZING: {
- 'default': 'RESIZE',
+ vm_states.RESIZED: {
+ 'default': 'VERIFY_RESIZE',
+ # Note(maoy): the OS API spec 1.1 doesn't have CONFIRMING_RESIZE
+ # state so we comment that out for future reference only.
+ #task_states.RESIZE_CONFIRMING: 'CONFIRMING_RESIZE',
task_states.RESIZE_REVERTING: 'REVERT_RESIZE',
},
vm_states.PAUSED: {
@@ -84,7 +85,7 @@ _STATE_MAP = {
vm_states.DELETED: {
'default': 'DELETED',
},
- vm_states.SOFT_DELETE: {
+ vm_states.SOFT_DELETED: {
'default': 'DELETED',
},
}
diff --git a/nova/api/openstack/compute/contrib/floating_ips.py b/nova/api/openstack/compute/contrib/floating_ips.py
index a3dac71ae..f4dfe2ecd 100644
--- a/nova/api/openstack/compute/contrib/floating_ips.py
+++ b/nova/api/openstack/compute/contrib/floating_ips.py
@@ -253,7 +253,7 @@ class FloatingIPActionController(wsgi.Controller):
except exception.NoFloatingIpInterface:
msg = _('l3driver call to add floating ip failed')
raise webob.exc.HTTPBadRequest(explanation=msg)
- except:
+ except Exception:
msg = _('Error. Unable to associate floating ip')
raise webob.exc.HTTPBadRequest(explanation=msg)
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 0f8375058..caec8f4b6 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -62,7 +62,7 @@ flags.DECLARE('consoleauth_topic', 'nova.consoleauth')
QUOTAS = quota.QUOTAS
-def check_instance_state(vm_state=None, task_state=None):
+def check_instance_state(vm_state=None, task_state=(None,)):
"""Decorator to check VM and/or task state before entry to API functions.
If the instance is in the wrong state, the wrapper will raise an exception.
@@ -811,7 +811,7 @@ class API(base.Base):
return dict(instance_ref.iteritems())
@wrap_check_policy
- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF,
+ @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def soft_delete(self, context, instance):
"""Terminate an instance."""
@@ -865,7 +865,7 @@ class API(base.Base):
task_state=task_states.DELETING,
progress=0)
- if instance['task_state'] == task_states.RESIZE_VERIFY:
+ if instance['vm_state'] == vm_states.RESIZED:
# If in the middle of a resize, use confirm_resize to
# ensure the original instance is cleaned up too
migration_ref = self.db.migration_get_by_instance_and_status(
@@ -887,13 +887,9 @@ class API(base.Base):
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, reservations)
- # NOTE(jerdfelt): The API implies that only ACTIVE and ERROR are
- # allowed but the EC2 API appears to allow from RESCUED and STOPPED
- # too
+ # NOTE(maoy): we allow delete to be called no matter what vm_state says.
@wrap_check_policy
- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.BUILDING,
- vm_states.ERROR, vm_states.RESCUED,
- vm_states.SHUTOFF, vm_states.STOPPED])
+ @check_instance_state(vm_state=None, task_state=None)
def delete(self, context, instance):
"""Terminate an instance."""
LOG.debug(_("Going to try to terminate instance"), instance=instance)
@@ -904,7 +900,7 @@ class API(base.Base):
self._delete(context, instance)
@wrap_check_policy
- @check_instance_state(vm_state=[vm_states.SOFT_DELETE])
+ @check_instance_state(vm_state=[vm_states.SOFT_DELETED])
def restore(self, context, instance):
"""Restore a previously deleted (but not reclaimed) instance."""
if instance['host']:
@@ -921,14 +917,14 @@ class API(base.Base):
deleted_at=None)
@wrap_check_policy
- @check_instance_state(vm_state=[vm_states.SOFT_DELETE])
+ @check_instance_state(vm_state=[vm_states.SOFT_DELETED])
def force_delete(self, context, instance):
"""Force delete a previously deleted (but not reclaimed) instance."""
self._delete(context, instance)
@wrap_check_policy
- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF,
- vm_states.RESCUED],
+ @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED,
+ vm_states.ERROR, vm_states.STOPPED],
task_state=[None])
def stop(self, context, instance, do_cast=True):
"""Stop an instance."""
@@ -943,7 +939,7 @@ class API(base.Base):
self.compute_rpcapi.stop_instance(context, instance, cast=do_cast)
@wrap_check_policy
- @check_instance_state(vm_state=[vm_states.STOPPED, vm_states.SHUTOFF])
+ @check_instance_state(vm_state=[vm_states.STOPPED])
def start(self, context, instance):
"""Start an instance."""
LOG.debug(_("Going to try to start instance"), instance=instance)
@@ -1088,7 +1084,7 @@ class API(base.Base):
sort_dir)
@wrap_check_policy
- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF])
+ @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def backup(self, context, instance, name, backup_type, rotation,
extra_properties=None):
"""Backup the given instance
@@ -1106,7 +1102,7 @@ class API(base.Base):
return recv_meta
@wrap_check_policy
- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF])
+ @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def snapshot(self, context, instance, name, extra_properties=None):
"""Snapshot the given instance.
@@ -1201,7 +1197,7 @@ class API(base.Base):
return min_ram, min_disk
@wrap_check_policy
- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF,
+ @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.RESCUED],
task_state=[None])
def reboot(self, context, instance, reboot_type):
@@ -1222,7 +1218,7 @@ class API(base.Base):
return image_service.show(context, image_id)
@wrap_check_policy
- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF],
+ @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED],
task_state=[None])
def rebuild(self, context, instance, image_href, admin_password, **kwargs):
"""Rebuild the given instance with the provided attributes."""
@@ -1270,11 +1266,10 @@ class API(base.Base):
self.update(context,
instance,
- vm_state=vm_states.REBUILDING,
+ task_state=task_states.REBUILDING,
# Unfortunately we need to set image_ref early,
# so API users can see it.
image_ref=image_href,
- task_state=None,
progress=0,
**kwargs)
@@ -1288,8 +1283,7 @@ class API(base.Base):
image_ref=image_href, orig_image_ref=orig_image_ref)
@wrap_check_policy
- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF],
- task_state=[task_states.RESIZE_VERIFY])
+ @check_instance_state(vm_state=[vm_states.RESIZED])
def revert_resize(self, context, instance):
"""Reverts a resize, deleting the 'new' instance in the process."""
context = context.elevated()
@@ -1301,7 +1295,6 @@ class API(base.Base):
self.update(context,
instance,
- vm_state=vm_states.RESIZING,
task_state=task_states.RESIZE_REVERTING)
self.compute_rpcapi.revert_resize(context,
@@ -1312,8 +1305,7 @@ class API(base.Base):
{'status': 'reverted'})
@wrap_check_policy
- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF],
- task_state=[task_states.RESIZE_VERIFY])
+ @check_instance_state(vm_state=[vm_states.RESIZED])
def confirm_resize(self, context, instance):
"""Confirms a migration/resize and deletes the 'old' instance."""
context = context.elevated()
@@ -1338,7 +1330,7 @@ class API(base.Base):
{'host': migration_ref['dest_compute'], })
@wrap_check_policy
- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF],
+ @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED],
task_state=[None])
def resize(self, context, instance, flavor_id=None, **kwargs):
"""Resize (ie, migrate) a running instance.
@@ -1385,7 +1377,6 @@ class API(base.Base):
self.update(context,
instance,
- vm_state=vm_states.RESIZING,
task_state=task_states.RESIZE_PREP,
progress=0,
**kwargs)
@@ -1424,9 +1415,7 @@ class API(base.Base):
instance=instance, address=address)
@wrap_check_policy
- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF,
- vm_states.RESCUED],
- task_state=[None])
+ @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED])
def pause(self, context, instance):
"""Pause the given instance."""
self.update(context,
@@ -1451,9 +1440,7 @@ class API(base.Base):
return self.compute_rpcapi.get_diagnostics(context, instance=instance)
@wrap_check_policy
- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF,
- vm_states.RESCUED],
- task_state=[None])
+ @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED])
def suspend(self, context, instance):
"""Suspend the given instance."""
self.update(context,
@@ -1473,9 +1460,7 @@ class API(base.Base):
self.compute_rpcapi.resume_instance(context, instance=instance)
@wrap_check_policy
- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF,
- vm_states.STOPPED],
- task_state=[None])
+ @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def rescue(self, context, instance, rescue_password=None):
"""Rescue the given instance."""
self.update(context,
@@ -1497,8 +1482,7 @@ class API(base.Base):
self.compute_rpcapi.unrescue_instance(context, instance=instance)
@wrap_check_policy
- @check_instance_state(vm_state=[vm_states.ACTIVE],
- task_state=[None])
+ @check_instance_state(vm_state=[vm_states.ACTIVE])
def set_admin_password(self, context, instance, password=None):
"""Set the root/admin password for the given instance."""
self.update(context,
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 7e5070bce..a9816817b 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -811,7 +811,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@checks_instance_lock
@wrap_instance_fault
def power_off_instance(self, context, instance_uuid,
- final_state=vm_states.SOFT_DELETE):
+ final_state=vm_states.SOFT_DELETED):
"""Power off an instance on this host."""
instance = self.db.instance_get_by_uuid(context, instance_uuid)
self._notify_about_instance_usage(context, instance, "power_off.start")
@@ -895,16 +895,15 @@ class ComputeManager(manager.SchedulerDependentManager):
self._instance_update(context,
instance_uuid,
power_state=current_power_state,
- vm_state=vm_states.REBUILDING,
- task_state=None)
+ task_state=task_states.REBUILDING)
network_info = self._get_instance_nw_info(context, instance)
self.driver.destroy(instance, self._legacy_nw_info(network_info))
instance = self._instance_update(context,
instance_uuid,
- vm_state=vm_states.REBUILDING,
- task_state=task_states.BLOCK_DEVICE_MAPPING)
+ task_state=task_states.\
+ REBUILD_BLOCK_DEVICE_MAPPING)
instance.injected_files = kwargs.get('injected_files', [])
network_info = self.network_api.get_instance_nw_info(context,
@@ -913,8 +912,8 @@ class ComputeManager(manager.SchedulerDependentManager):
instance = self._instance_update(context,
instance_uuid,
- vm_state=vm_states.REBUILDING,
- task_state=task_states.SPAWNING)
+ task_state=task_states.\
+ REBUILD_SPAWNING)
# pull in new password here since the original password isn't in the db
instance.admin_pass = kwargs.get('new_pass',
utils.generate_password(FLAGS.password_length))
@@ -1459,10 +1458,10 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_ref = self._instance_update(context,
instance_ref.uuid,
- vm_state=vm_states.ACTIVE,
+ vm_state=vm_states.RESIZED,
host=migration_ref['dest_compute'],
launched_at=timeutils.utcnow(),
- task_state=task_states.RESIZE_VERIFY)
+ task_state=None)
self.db.migration_update(context, migration_ref.id,
{'status': 'finished'})
@@ -2340,9 +2339,10 @@ class ComputeManager(manager.SchedulerDependentManager):
_set_migration_to_error(migration_id, reason % locals(),
instance=instance)
continue
- if instance['task_state'] != task_states.RESIZE_VERIFY:
- state = instance['task_state']
- reason = _("In %(state)s task_state, not RESIZE_VERIFY")
+ if instance['vm_state'] != vm_states.RESIZED \
+ and instance['task_state'] != None:
+ state = instance['vm_state']
+ reason = _("In %(state)s vm_state, not RESIZED")
_set_migration_to_error(migration_id, reason % locals(),
instance=instance)
continue
@@ -2407,7 +2407,7 @@ class ComputeManager(manager.SchedulerDependentManager):
each loop to allow the periodic task eventlet to do other work.
If the instance is not found on the hypervisor, but is in the database,
- then it will be set to power_state.NOSTATE.
+ then a stop() API will be called on the instance.
"""
db_instances = self.db.instance_get_all_by_host(context, self.host)
@@ -2422,68 +2422,114 @@ class ComputeManager(manager.SchedulerDependentManager):
# Allow other periodic tasks to do some work...
greenthread.sleep(0)
db_power_state = db_instance['power_state']
+ if db_instance['task_state'] is not None:
+ LOG.info(_("During sync_power_state the instance has a "
+ "pending task. Skip."), instance=db_instance)
+ continue
+ # No pending tasks. Now try to figure out the real vm_power_state.
try:
vm_instance = self.driver.get_info(db_instance)
vm_power_state = vm_instance['state']
except exception.InstanceNotFound:
- # This exception might have been caused by a race condition
- # between _sync_power_states and live migrations. Two cases
- # are possible as documented below. To this aim, refresh the
- # DB instance state.
- try:
- u = self.db.instance_get_by_uuid(context,
- db_instance['uuid'])
- if self.host != u['host']:
- # on the sending end of nova-compute _sync_power_state
- # may have yielded to the greenthread performing a live
- # migration; this in turn has changed the resident-host
- # for the VM; However, the instance is still active, it
- # is just in the process of migrating to another host.
- # This implies that the compute source must relinquish
- # control to the compute destination.
- LOG.info(_("During the sync_power process the "
- "instance has moved from "
- "host %(src)s to host %(dst)s") %
- {'src': self.host,
- 'dst': u['host']},
- instance=db_instance)
- elif (u['host'] == self.host and
- u['vm_state'] == vm_states.MIGRATING):
- # on the receiving end of nova-compute, it could happen
- # that the DB instance already report the new resident
- # but the actual VM has not showed up on the hypervisor
- # yet. In this case, let's allow the loop to continue
- # and run the state sync in a later round
- LOG.info(_("Instance is in the process of "
- "migrating to this host. Wait next "
- "sync_power cycle before setting "
- "power state to NOSTATE"),
- instance=db_instance)
- else:
- LOG.warn(_("Instance found in database but not "
- "known by hypervisor. Setting power "
- "state to NOSTATE"), locals(),
- instance=db_instance)
- vm_power_state = power_state.NOSTATE
- except exception.InstanceNotFound:
- # no need to update vm_state for deleted instances
- continue
-
- if vm_power_state == db_power_state:
+ vm_power_state = power_state.NOSTATE
+ # Note(maoy): the above get_info call might take a long time,
+ # for example, because of a broken libvirt driver.
+ # We re-query the DB to get the latest instance info to minimize
+ # (not eliminate) race condition.
+ u = self.db.instance_get_by_uuid(context,
+ db_instance['uuid'])
+ db_power_state = u["power_state"]
+ vm_state = u['vm_state']
+ if self.host != u['host']:
+ # on the sending end of nova-compute _sync_power_state
+ # may have yielded to the greenthread performing a live
+ # migration; this in turn has changed the resident-host
+ # for the VM; However, the instance is still active, it
+ # is just in the process of migrating to another host.
+ # This implies that the compute source must relinquish
+ # control to the compute destination.
+ LOG.info(_("During the sync_power process the "
+ "instance has moved from "
+ "host %(src)s to host %(dst)s") %
+ {'src': self.host,
+ 'dst': u['host']},
+ instance=db_instance)
continue
-
- if (vm_power_state in (power_state.NOSTATE,
- power_state.SHUTDOWN,
- power_state.CRASHED)
- and db_instance['vm_state'] == vm_states.ACTIVE):
- self._instance_update(context,
- db_instance['uuid'],
- power_state=vm_power_state,
- vm_state=vm_states.SHUTOFF)
- else:
+ elif u['task_state'] is not None:
+ # on the receiving end of nova-compute, it could happen
+ # that the DB instance already report the new resident
+ # but the actual VM has not showed up on the hypervisor
+ # yet. In this case, let's allow the loop to continue
+ # and run the state sync in a later round
+ LOG.info(_("During sync_power_state the instance has a "
+ "pending task. Skip."), instance=db_instance)
+ continue
+ if vm_power_state != db_power_state:
+ # power_state is always updated from hypervisor to db
self._instance_update(context,
db_instance['uuid'],
power_state=vm_power_state)
+ db_power_state = vm_power_state
+ # Note(maoy): Now resolve the discrepancy between vm_state and
+ # vm_power_state. We go through all possible vm_states.
+ if vm_state in (vm_states.BUILDING,
+ vm_states.RESCUED,
+ vm_states.RESIZED,
+ vm_states.SUSPENDED,
+ vm_states.PAUSED,
+ vm_states.ERROR):
+ # TODO(maoy): we ignore these vm_state for now.
+ pass
+ elif vm_state == vm_states.ACTIVE:
+ # The only rational power state should be RUNNING
+ if vm_power_state in (power_state.NOSTATE,
+ power_state.SHUTDOWN,
+ power_state.CRASHED):
+ LOG.warn(_("Instance shutdown by itself. Calling "
+ "the stop API."), instance=db_instance)
+ try:
+ # Note(maoy): here we call the API instead of
+ # brutally updating the vm_state in the database
+ # to allow all the hooks and checks to be performed.
+ self.compute_api.stop(context, db_instance)
+ except Exception:
+ # Note(maoy): there is no need to propergate the error
+ # because the same power_state will be retrieved next
+ # time and retried.
+ # For example, there might be another task scheduled.
+ LOG.exception(_("error during stop() in "
+ "sync_power_state."))
+ elif vm_power_state in (power_state.PAUSED,
+ power_state.SUSPENDED):
+ LOG.warn(_("Instance is paused or suspended "
+ "unexpectedly. Calling "
+ "the stop API."), instance=db_instance)
+ try:
+ self.compute_api.stop(context, db_instance)
+ except Exception:
+ LOG.exception(_("error during stop() in "
+ "sync_power_state."))
+ elif vm_state == vm_states.STOPPED:
+ if vm_power_state not in (power_state.NOSTATE,
+ power_state.SHUTDOWN,
+ power_state.CRASHED):
+ LOG.warn(_("Instance is not stopped. Calling "
+ "the stop API."), instance=db_instance)
+ try:
+ # Note(maoy): this assumes that the stop API is
+ # idempotent.
+ self.compute_api.stop(context, db_instance)
+ except Exception:
+ LOG.exception(_("error during stop() in "
+ "sync_power_state."))
+ elif vm_state in (vm_states.SOFT_DELETED,
+ vm_states.DELETED):
+ if vm_power_state not in (power_state.NOSTATE,
+ power_state.SHUTDOWN):
+ # Note(maoy): this should be taken care of periodically in
+ # _cleanup_running_deleted_instances().
+ LOG.warn(_("Instance is not (soft-)deleted."),
+ instance=db_instance)
@manager.periodic_task
def _reclaim_queued_deletes(self, context):
@@ -2498,7 +2544,7 @@ class ComputeManager(manager.SchedulerDependentManager):
old_enough = (not instance.deleted_at or
timeutils.is_older_than(instance.deleted_at,
interval))
- soft_deleted = instance.vm_state == vm_states.SOFT_DELETE
+ soft_deleted = instance.vm_state == vm_states.SOFT_DELETED
if soft_deleted and old_enough:
LOG.info(_('Reclaiming deleted instance'), instance=instance)
diff --git a/nova/compute/task_states.py b/nova/compute/task_states.py
index 795213be0..d4df75e60 100644
--- a/nova/compute/task_states.py
+++ b/nova/compute/task_states.py
@@ -25,36 +25,74 @@ necessary.
"""
+# possible task states during create()
SCHEDULING = 'scheduling'
BLOCK_DEVICE_MAPPING = 'block_device_mapping'
NETWORKING = 'networking'
SPAWNING = 'spawning'
+# possible task states during snapshot()
IMAGE_SNAPSHOT = 'image_snapshot'
+
+# possible task states during backup()
IMAGE_BACKUP = 'image_backup'
+# possible task states during set_admin_password()
UPDATING_PASSWORD = 'updating_password'
+# possible task states during resize()
RESIZE_PREP = 'resize_prep'
RESIZE_MIGRATING = 'resize_migrating'
RESIZE_MIGRATED = 'resize_migrated'
RESIZE_FINISH = 'resize_finish'
+
+# possible task states during revert_resize()
RESIZE_REVERTING = 'resize_reverting'
+
+# possible task states during confirm_resize()
RESIZE_CONFIRMING = 'resize_confirming'
-RESIZE_VERIFY = 'resize_verify'
+# possible task states during reboot()
REBOOTING = 'rebooting'
REBOOTING_HARD = 'rebooting_hard'
+
+# possible task states during pause()
PAUSING = 'pausing'
+
+# possible task states during unpause()
UNPAUSING = 'unpausing'
+
+# possible task states during suspend()
SUSPENDING = 'suspending'
+
+# possible task states during resume()
RESUMING = 'resuming'
+
+# possible task states during stop()
+STOPPING = 'stopping'
+
+# possible task states during start()
+STARTING = 'starting'
+
+# possible task states during soft_delete()
POWERING_OFF = 'powering-off'
+
+# possible task states during restore()
POWERING_ON = 'powering-on'
+# possible task states during rescue()
RESCUING = 'rescuing'
+
+# possible task states during unrescue()
UNRESCUING = 'unrescuing'
+# possible task states during rebuild()
+REBUILDING = 'rebuilding'
+REBUILD_BLOCK_DEVICE_MAPPING = "rebuild_block_device_mapping"
+REBUILD_SPAWNING = 'rebuild_spawning'
+
+# possible task states during live_migrate()
+MIGRATING = "migrating"
+
+# possible task states during delete()
DELETING = 'deleting'
-STOPPING = 'stopping'
-STARTING = 'starting'
diff --git a/nova/compute/vm_states.py b/nova/compute/vm_states.py
index 1d0aa6d62..94a566cce 100644
--- a/nova/compute/vm_states.py
+++ b/nova/compute/vm_states.py
@@ -18,24 +18,29 @@
"""Possible vm states for instances.
Compute instance vm states represent the state of an instance as it pertains to
-a user or administrator. When combined with task states (task_states.py), a
-better picture can be formed regarding the instance's health.
+a user or administrator.
-"""
+vm_state describes a VM's current stable (not transition) state. That is, if
+there is no ongoing compute API calls (running tasks), vm_state should reflect
+what the customer expect the VM to be. When combined with task states
+(task_states.py), a better picture can be formed regarding the instance's
+health and progress.
-ACTIVE = 'active'
-BUILDING = 'building'
-REBUILDING = 'rebuilding'
+See http://wiki.openstack.org/VMState
+"""
+ACTIVE = 'active' # VM is running
+BUILDING = 'building' # VM only exists in DB
PAUSED = 'paused'
-SUSPENDED = 'suspended'
-SHUTOFF = 'shutoff'
-RESCUED = 'rescued'
-DELETED = 'deleted'
-STOPPED = 'stopped'
-SOFT_DELETE = 'soft-delete'
-
-MIGRATING = 'migrating'
-RESIZING = 'resizing'
+SUSPENDED = 'suspended' # VM is suspended to disk.
+STOPPED = 'stopped' # VM is powered off, the disk image is still there.
+RESCUED = 'rescued' # A rescue image is running with the original VM image
+# attached.
+RESIZED = 'resized' # a VM with the new size is active. The user is expected
+# to manually confirm or revert.
+
+SOFT_DELETED = 'soft-delete' # VM is marked as deleted but the disk images are
+# still available to restore.
+DELETED = 'deleted' # VM is permanently deleted.
ERROR = 'error'
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index cff4cd8ab..eabd03a22 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -503,8 +503,7 @@ def _get_host_utilization(context, host, ram_mb, disk_gb):
free_ram_mb -= instance.memory_mb
free_disk_gb -= instance.root_gb
free_disk_gb -= instance.ephemeral_gb
- if instance.vm_state in [vm_states.BUILDING, vm_states.REBUILDING,
- vm_states.MIGRATING, vm_states.RESIZING]:
+ if instance.task_state is not None:
work += 1
return dict(free_ram_mb=free_ram_mb,
free_disk_gb=free_disk_gb,
@@ -1497,12 +1496,12 @@ def instance_get_all_by_filters(context, filters, sort_key, sort_dir):
# include or exclude both
if filters.pop('deleted'):
deleted = or_(models.Instance.deleted == True,
- models.Instance.vm_state == vm_states.SOFT_DELETE)
+ models.Instance.vm_state == vm_states.SOFT_DELETED)
query_prefix = query_prefix.filter(deleted)
else:
query_prefix = query_prefix.\
filter_by(deleted=False).\
- filter(models.Instance.vm_state != vm_states.SOFT_DELETE)
+ filter(models.Instance.vm_state != vm_states.SOFT_DELETED)
if not context.is_admin:
# If we're not admin context, add appropriate filter..
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index 9cc528049..41a2f5c19 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -24,7 +24,7 @@ Scheduler base class that all Schedulers should inherit from
from nova.compute import api as compute_api
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
-from nova.compute import vm_states
+from nova.compute import task_states
from nova import db
from nova import exception
from nova import flags
@@ -228,7 +228,7 @@ class Scheduler(object):
disk_over_commit)
# Changing instance_state.
- values = {"vm_state": vm_states.MIGRATING}
+ values = {"task_state": task_states.MIGRATING}
# update instance state and notify
(old_ref, new_instance_ref) = db.instance_update_and_get_original(
diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py
index a6853a610..6dd6fe916 100644
--- a/nova/tests/api/ec2/test_cloud.py
+++ b/nova/tests/api/ec2/test_cloud.py
@@ -854,7 +854,7 @@ class CloudTestCase(test.TestCase):
test_instance_state(inst_state.RUNNING_CODE, inst_state.RUNNING,
power_state.RUNNING, vm_states.ACTIVE)
test_instance_state(inst_state.STOPPED_CODE, inst_state.STOPPED,
- power_state.NOSTATE, vm_states.SHUTOFF,
+ power_state.NOSTATE, vm_states.STOPPED,
{'shutdown_terminate': False})
def test_describe_instances_no_ipv6(self):
diff --git a/nova/tests/api/openstack/compute/contrib/test_admin_actions.py b/nova/tests/api/openstack/compute/contrib/test_admin_actions.py
index 922e09f0b..3b96b552e 100644
--- a/nova/tests/api/openstack/compute/contrib/test_admin_actions.py
+++ b/nova/tests/api/openstack/compute/contrib/test_admin_actions.py
@@ -57,7 +57,8 @@ def fake_compute_api_raises_invalid_state(*args, **kwargs):
def fake_compute_api_get(self, context, instance_id):
- return {'id': 1, 'uuid': instance_id, 'vm_state': vm_states.ACTIVE}
+ return {'id': 1, 'uuid': instance_id, 'vm_state': vm_states.ACTIVE,
+ 'task_state': None}
def fake_scheduler_api_live_migration(self, context, block_migration,
diff --git a/nova/tests/api/openstack/compute/test_server_actions.py b/nova/tests/api/openstack/compute/test_server_actions.py
index b82185312..3e3b4eafc 100644
--- a/nova/tests/api/openstack/compute/test_server_actions.py
+++ b/nova/tests/api/openstack/compute/test_server_actions.py
@@ -19,6 +19,7 @@ import mox
import webob
from nova.api.openstack.compute import servers
+from nova.compute import task_states
from nova.compute import vm_states
import nova.db
from nova import exception
@@ -461,8 +462,8 @@ class ServerActionsControllerTest(test.TestCase):
context = req.environ['nova.context']
update(context, mox.IgnoreArg(),
image_ref=self._image_href,
- vm_state=vm_states.REBUILDING,
- task_state=None, progress=0, **attributes).AndReturn(None)
+ task_state=task_states.REBUILDING,
+ progress=0, **attributes).AndReturn(None)
self.mox.ReplayAll()
self.controller._action_rebuild(req, FAKE_UUID, body)
diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py
index fbc60da68..613747ce5 100644
--- a/nova/tests/api/openstack/compute/test_servers.py
+++ b/nova/tests/api/openstack/compute/test_servers.py
@@ -1331,16 +1331,17 @@ class ServersControllerTest(test.TestCase):
self.server_delete_called = False
self.stubs.Set(nova.db, 'instance_get_by_uuid',
- fakes.fake_instance_get(vm_state=vm_states.RESIZING))
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
+ task_state=task_states.RESIZE_PREP))
- def instance_destroy_mock(context, id):
+ def instance_destroy_mock(*args, **kwargs):
self.server_delete_called = True
self.stubs.Set(nova.db, 'instance_destroy', instance_destroy_mock)
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller.delete,
- req,
- FAKE_UUID)
+ self.controller.delete(req, FAKE_UUID)
+ # Delete shoud be allowed in any case, even during resizing,
+ # because it may get stuck.
+ self.assertEqual(self.server_delete_called, True)
class ServerStatusTest(test.TestCase):
@@ -1374,7 +1375,8 @@ class ServerStatusTest(test.TestCase):
self.assertEqual(response['server']['status'], 'HARD_REBOOT')
def test_rebuild(self):
- response = self._get_with_state(vm_states.REBUILDING)
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.REBUILDING)
self.assertEqual(response['server']['status'], 'REBUILD')
def test_rebuild_error(self):
@@ -1382,16 +1384,16 @@ class ServerStatusTest(test.TestCase):
self.assertEqual(response['server']['status'], 'ERROR')
def test_resize(self):
- response = self._get_with_state(vm_states.RESIZING)
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.RESIZE_PREP)
self.assertEqual(response['server']['status'], 'RESIZE')
def test_verify_resize(self):
- response = self._get_with_state(vm_states.ACTIVE,
- task_states.RESIZE_VERIFY)
+ response = self._get_with_state(vm_states.RESIZED, None)
self.assertEqual(response['server']['status'], 'VERIFY_RESIZE')
def test_revert_resize(self):
- response = self._get_with_state(vm_states.RESIZING,
+ response = self._get_with_state(vm_states.RESIZED,
task_states.RESIZE_REVERTING)
self.assertEqual(response['server']['status'], 'REVERT_RESIZE')
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index 0a90a754c..c816acc95 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -44,9 +44,9 @@ from nova import log as logging
from nova.notifier import test_notifier
from nova.openstack.common import importutils
from nova.openstack.common import policy as common_policy
-from nova.openstack.common import timeutils
from nova.openstack.common import rpc
from nova.openstack.common.rpc import common as rpc_common
+from nova.openstack.common import timeutils
import nova.policy
from nova import quota
from nova.scheduler import driver as scheduler_driver
@@ -1518,7 +1518,7 @@ class ComputeTestCase(BaseTestCase):
instance_id = instance['id']
i_ref = db.instance_get(c, instance_id)
db.instance_update(c, i_ref['uuid'],
- {'vm_state': vm_states.MIGRATING,
+ {'task_state': task_states.MIGRATING,
'power_state': power_state.PAUSED})
v_ref = db.volume_create(c, {'size': 1, 'instance_id': instance_id})
fix_addr = db.fixed_ip_create(c, {'address': '1.1.1.1',
@@ -1579,7 +1579,7 @@ class ComputeTestCase(BaseTestCase):
instances = db.instance_get_all(ctxt)
LOG.info(_("After force-killing instances: %s"), instances)
self.assertEqual(len(instances), 1)
- self.assertEqual(power_state.NOSTATE, instances[0]['power_state'])
+ self.assertEqual(task_states.STOPPING, instances[0]['task_state'])
def test_add_instance_fault(self):
exc_info = None
@@ -1819,17 +1819,17 @@ class ComputeTestCase(BaseTestCase):
self.assertEqual(call_info['get_nw_info'], 4)
def test_poll_unconfirmed_resizes(self):
- instances = [{'uuid': 'fake_uuid1', 'vm_state': vm_states.ACTIVE,
- 'task_state': task_states.RESIZE_VERIFY},
+ instances = [{'uuid': 'fake_uuid1', 'vm_state': vm_states.RESIZED,
+ 'task_state': None},
{'uuid': 'noexist'},
{'uuid': 'fake_uuid2', 'vm_state': vm_states.ERROR,
- 'task_state': task_states.RESIZE_VERIFY},
+ 'task_state': None},
{'uuid': 'fake_uuid3', 'vm_state': vm_states.ACTIVE,
'task_state': task_states.REBOOTING},
{'uuid': 'fake_uuid4', 'vm_state': vm_states.ACTIVE,
- 'task_state': task_states.RESIZE_VERIFY},
+ 'task_state': None},
{'uuid': 'fake_uuid5', 'vm_state': vm_states.ACTIVE,
- 'task_state': task_states.RESIZE_VERIFY}]
+ 'task_state': None}]
expected_migration_status = {'fake_uuid1': 'confirmed',
'noexist': 'error',
'fake_uuid2': 'error',
@@ -2261,12 +2261,12 @@ class ComputeAPITestCase(BaseTestCase):
# the instance is shutdown by itself
db.instance_update(self.context, instance['uuid'],
{'power_state': power_state.NOSTATE,
- 'vm_state': vm_states.SHUTOFF})
- check_state(instance['uuid'], power_state.NOSTATE, vm_states.SHUTOFF,
+ 'vm_state': vm_states.STOPPED})
+ check_state(instance['uuid'], power_state.NOSTATE, vm_states.STOPPED,
None)
start_check_state(instance['uuid'], power_state.NOSTATE,
- vm_states.SHUTOFF, task_states.STARTING)
+ vm_states.STOPPED, task_states.STARTING)
db.instance_destroy(self.context, instance['uuid'])
@@ -2344,7 +2344,8 @@ class ComputeAPITestCase(BaseTestCase):
# set the state that the instance gets when soft_delete finishes
instance = db.instance_update(self.context, instance['uuid'],
- {'vm_state': vm_states.SOFT_DELETE})
+ {'vm_state': vm_states.SOFT_DELETED,
+ 'task_state': None})
self.compute_api.force_delete(self.context, instance)
@@ -2432,7 +2433,8 @@ class ComputeAPITestCase(BaseTestCase):
# set the state that the instance gets when soft_delete finishes
instance = db.instance_update(self.context, instance['uuid'],
- {'vm_state': vm_states.SOFT_DELETE})
+ {'vm_state': vm_states.SOFT_DELETED,
+ 'task_state': None})
self.compute_api.restore(self.context, instance)
@@ -2476,7 +2478,7 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEqual(info['image_ref'], image_ref)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
- self.assertEqual(instance['vm_state'], vm_states.REBUILDING)
+ self.assertEqual(instance['task_state'], task_states.REBUILDING)
sys_metadata = db.instance_system_metadata_get(self.context,
instance_uuid)
self.assertEqual(sys_metadata,
@@ -2772,8 +2774,8 @@ class ComputeAPITestCase(BaseTestCase):
'status': 'finished'})
# set the state that the instance gets when resize finishes
instance = db.instance_update(self.context, instance['uuid'],
- {'task_state': task_states.RESIZE_VERIFY,
- 'vm_state': vm_states.ACTIVE})
+ {'task_state': None,
+ 'vm_state': vm_states.RESIZED})
self.compute_api.confirm_resize(context, instance)
self.compute.terminate_instance(context, instance['uuid'])
@@ -2792,13 +2794,13 @@ class ComputeAPITestCase(BaseTestCase):
'status': 'finished'})
# set the state that the instance gets when resize finishes
instance = db.instance_update(self.context, instance['uuid'],
- {'task_state': task_states.RESIZE_VERIFY,
- 'vm_state': vm_states.ACTIVE})
+ {'task_state': None,
+ 'vm_state': vm_states.RESIZED})
self.compute_api.revert_resize(context, instance)
instance = db.instance_get_by_uuid(context, instance['uuid'])
- self.assertEqual(instance['vm_state'], vm_states.RESIZING)
+ self.assertEqual(instance['vm_state'], vm_states.RESIZED)
self.assertEqual(instance['task_state'], task_states.RESIZE_REVERTING)
self.compute.terminate_instance(context, instance['uuid'])
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index 9979c5e04..5d1d6c8e9 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -22,6 +22,7 @@ Tests For Scheduler
from nova.compute import api as compute_api
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
+from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
@@ -454,7 +455,7 @@ class SchedulerTestCase(test.TestCase):
self.driver._live_migration_common_check(self.context, instance,
dest, block_migration, disk_over_commit)
db.instance_update_and_get_original(self.context, instance['id'],
- {"vm_state": vm_states.MIGRATING}).AndReturn(
+ {"task_state": task_states.MIGRATING}).AndReturn(
(instance, instance))
driver.cast_to_compute_host(self.context, instance['host'],
@@ -563,7 +564,7 @@ class SchedulerTestCase(test.TestCase):
).AndReturn(True)
db.instance_update_and_get_original(self.context, instance['id'],
- {"vm_state": vm_states.MIGRATING}).AndReturn(
+ {"task_state": task_states.MIGRATING}).AndReturn(
(instance, instance))
driver.cast_to_compute_host(self.context, instance['host'],
diff --git a/nova/tests/test_imagecache.py b/nova/tests/test_imagecache.py
index 4b1b5dac4..43ee7ed93 100644
--- a/nova/tests/test_imagecache.py
+++ b/nova/tests/test_imagecache.py
@@ -26,7 +26,7 @@ import time
from nova import test
-from nova.compute import task_states
+from nova.compute import vm_states
from nova import db
from nova import flags
from nova import log
@@ -150,16 +150,19 @@ class ImageCacheManagerTestCase(test.TestCase):
'host': FLAGS.host,
'name': 'inst-1',
'uuid': '123',
+ 'vm_state': '',
'task_state': ''},
{'image_ref': '2',
'host': FLAGS.host,
'name': 'inst-2',
'uuid': '456',
+ 'vm_state': '',
'task_state': ''},
{'image_ref': '2',
'host': 'remotehost',
'name': 'inst-3',
'uuid': '789',
+ 'vm_state': '',
'task_state': ''}])
image_cache_manager = imagecache.ImageCacheManager()
@@ -183,7 +186,8 @@ class ImageCacheManagerTestCase(test.TestCase):
'host': FLAGS.host,
'name': 'inst-1',
'uuid': '123',
- 'task_state': task_states.RESIZE_VERIFY}])
+ 'vm_state': vm_states.RESIZED,
+ 'task_state': None}])
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager._list_running_instances(None)
@@ -766,11 +770,13 @@ class ImageCacheManagerTestCase(test.TestCase):
'host': FLAGS.host,
'name': 'instance-1',
'uuid': '123',
+ 'vm_state': '',
'task_state': ''},
{'image_ref': '1',
'host': FLAGS.host,
'name': 'instance-2',
'uuid': '456',
+ 'vm_state': '',
'task_state': ''}])
image_cache_manager = imagecache.ImageCacheManager()
@@ -865,11 +871,13 @@ class ImageCacheManagerTestCase(test.TestCase):
'host': FLAGS.host,
'name': 'instance-1',
'uuid': '123',
+ 'vm_state': '',
'task_state': ''},
{'image_ref': '1',
'host': FLAGS.host,
'name': 'instance-2',
'uuid': '456',
+ 'vm_state': '',
'task_state': ''}])
def touch(filename):
diff --git a/nova/virt/libvirt/imagecache.py b/nova/virt/libvirt/imagecache.py
index e8536bb46..9e800e3dd 100644
--- a/nova/virt/libvirt/imagecache.py
+++ b/nova/virt/libvirt/imagecache.py
@@ -29,6 +29,7 @@ import re
import time
from nova.compute import task_states
+from nova.compute import vm_states
from nova import db
from nova import flags
from nova import log as logging
@@ -140,11 +141,9 @@ class ImageCacheManager(object):
resize_states = [task_states.RESIZE_PREP,
task_states.RESIZE_MIGRATING,
task_states.RESIZE_MIGRATED,
- task_states.RESIZE_FINISH,
- task_states.RESIZE_REVERTING,
- task_states.RESIZE_CONFIRMING,
- task_states.RESIZE_VERIFY]
- if instance['task_state'] in resize_states:
+ task_states.RESIZE_FINISH]
+ if instance['task_state'] in resize_states or \
+ instance['vm_state'] in vm_states.RESIZED:
self.instance_names.add(instance['name'] + '_resize')
image_ref_str = str(instance['image_ref'])
diff --git a/nova/virt/xenapi/host.py b/nova/virt/xenapi/host.py
index 387a6b8a5..7fe8a7b77 100644
--- a/nova/virt/xenapi/host.py
+++ b/nova/virt/xenapi/host.py
@@ -21,6 +21,7 @@ Management class for host-related functions (start, reboot, etc).
import logging
+from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
@@ -80,7 +81,7 @@ class Host(object):
ctxt,
instance.id,
{'host': dest,
- 'vm_state': vm_states.MIGRATING})
+ 'task_state': task_states.MIGRATING})
notifications.send_update(ctxt, old_ref, new_ref)
self._session.call_xenapi('VM.pool_migrate',