summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBrian Lamar <brian.lamar@rackspace.com>2011-08-31 22:55:34 +0000
committerTarmac <>2011-08-31 22:55:34 +0000
commit3fa7bf7cc4dece3662cf31e8fb0b6daa7df54f8b (patch)
tree2f8e720bac08d3d98ffea5009a4fb8d718cfe557
parent32f639aa6a6d017b234987757d7db1a3600009d8 (diff)
parentc9a6681f484f38778987fbbaa352d07bd8f747c3 (diff)
downloadnova-3fa7bf7cc4dece3662cf31e8fb0b6daa7df54f8b.tar.gz
nova-3fa7bf7cc4dece3662cf31e8fb0b6daa7df54f8b.tar.xz
nova-3fa7bf7cc4dece3662cf31e8fb0b6daa7df54f8b.zip
Fixed and improved the way instance "states" are set. Instead of relying on solely the power_state of a VM, there are now explicitly defined VM states and VM task states which respectively define the current state of the VM and the task which is currently being performed by the VM.
-rw-r--r--nova/api/ec2/cloud.py50
-rw-r--r--nova/api/openstack/common.py78
-rw-r--r--nova/api/openstack/servers.py22
-rw-r--r--nova/api/openstack/views/servers.py13
-rw-r--r--nova/compute/api.py118
-rw-r--r--nova/compute/manager.py453
-rw-r--r--nova/compute/task_states.py59
-rw-r--r--nova/compute/vm_states.py39
-rw-r--r--nova/db/sqlalchemy/api.py20
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py138
-rw-r--r--nova/db/sqlalchemy/models.py16
-rw-r--r--nova/exception.py2
-rw-r--r--nova/scheduler/driver.py10
-rw-r--r--nova/tests/api/openstack/test_server_actions.py47
-rw-r--r--nova/tests/api/openstack/test_servers.py101
-rw-r--r--nova/tests/integrated/test_servers.py34
-rw-r--r--nova/tests/scheduler/test_scheduler.py13
-rw-r--r--nova/tests/test_cloud.py15
-rw-r--r--nova/tests/test_compute.py38
-rw-r--r--nova/tests/vmwareapi/db_fakes.py5
20 files changed, 830 insertions, 441 deletions
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 9aebf92e3..fe44191c8 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -47,6 +47,7 @@ from nova import utils
from nova import volume
from nova.api.ec2 import ec2utils
from nova.compute import instance_types
+from nova.compute import vm_states
from nova.image import s3
@@ -78,6 +79,30 @@ def _gen_key(context, user_id, key_name):
return {'private_key': private_key, 'fingerprint': fingerprint}
+# EC2 API can return the following values as documented in the EC2 API
+# http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/
+# ApiReference-ItemType-InstanceStateType.html
+# pending | running | shutting-down | terminated | stopping | stopped
+_STATE_DESCRIPTION_MAP = {
+ None: 'pending',
+ vm_states.ACTIVE: 'running',
+ vm_states.BUILDING: 'pending',
+ vm_states.REBUILDING: 'pending',
+ vm_states.DELETED: 'terminated',
+ vm_states.STOPPED: 'stopped',
+ vm_states.MIGRATING: 'migrate',
+ vm_states.RESIZING: 'resize',
+ vm_states.PAUSED: 'pause',
+ vm_states.SUSPENDED: 'suspend',
+ vm_states.RESCUED: 'rescue',
+}
+
+
+def state_description_from_vm_state(vm_state):
+ """Map the vm state to the server status string"""
+ return _STATE_DESCRIPTION_MAP.get(vm_state, vm_state)
+
+
# TODO(yamahata): hypervisor dependent default device name
_DEFAULT_ROOT_DEVICE_NAME = '/dev/sda1'
_DEFAULT_MAPPINGS = {'ami': 'sda1',
@@ -1039,11 +1064,12 @@ class CloudController(object):
def _format_attr_instance_initiated_shutdown_behavior(instance,
result):
- state_description = instance['state_description']
- state_to_value = {'stopping': 'stop',
- 'stopped': 'stop',
- 'terminating': 'terminate'}
- value = state_to_value.get(state_description)
+ vm_state = instance['vm_state']
+ state_to_value = {
+ vm_states.STOPPED: 'stopped',
+ vm_states.DELETED: 'terminated',
+ }
+ value = state_to_value.get(vm_state)
if value:
result['instanceInitiatedShutdownBehavior'] = value
@@ -1198,8 +1224,8 @@ class CloudController(object):
self._format_kernel_id(instance, i, 'kernelId')
self._format_ramdisk_id(instance, i, 'ramdiskId')
i['instanceState'] = {
- 'code': instance['state'],
- 'name': instance['state_description']}
+ 'code': instance['power_state'],
+ 'name': state_description_from_vm_state(instance['vm_state'])}
fixed_addr = None
floating_addr = None
if instance['fixed_ips']:
@@ -1618,22 +1644,22 @@ class CloudController(object):
# stop the instance if necessary
restart_instance = False
if not no_reboot:
- state_description = instance['state_description']
+ vm_state = instance['vm_state']
# if the instance is in subtle state, refuse to proceed.
- if state_description not in ('running', 'stopping', 'stopped'):
+ if vm_state not in (vm_states.ACTIVE, vm_states.STOPPED):
raise exception.InstanceNotRunning(instance_id=ec2_instance_id)
- if state_description == 'running':
+ if vm_state == vm_states.ACTIVE:
restart_instance = True
self.compute_api.stop(context, instance_id=instance_id)
# wait instance for really stopped
start_time = time.time()
- while state_description != 'stopped':
+ while vm_state != vm_states.STOPPED:
time.sleep(1)
instance = self.compute_api.get(context, instance_id)
- state_description = instance['state_description']
+ vm_state = instance['vm_state']
# NOTE(yamahata): timeout and error. 1 hour for now for safety.
# Is it too short/long?
# Or is there any better way?
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index d9eb832f2..d743a66ef 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -27,7 +27,8 @@ from nova import flags
from nova import log as logging
from nova import quota
from nova.api.openstack import wsgi
-from nova.compute import power_state as compute_power_state
+from nova.compute import vm_states
+from nova.compute import task_states
LOG = logging.getLogger('nova.api.openstack.common')
@@ -38,36 +39,61 @@ XML_NS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0'
XML_NS_V11 = 'http://docs.openstack.org/compute/api/v1.1'
-_STATUS_MAP = {
- None: 'BUILD',
- compute_power_state.NOSTATE: 'BUILD',
- compute_power_state.RUNNING: 'ACTIVE',
- compute_power_state.BLOCKED: 'ACTIVE',
- compute_power_state.SUSPENDED: 'SUSPENDED',
- compute_power_state.PAUSED: 'PAUSED',
- compute_power_state.SHUTDOWN: 'SHUTDOWN',
- compute_power_state.SHUTOFF: 'SHUTOFF',
- compute_power_state.CRASHED: 'ERROR',
- compute_power_state.FAILED: 'ERROR',
- compute_power_state.BUILDING: 'BUILD',
+_STATE_MAP = {
+ vm_states.ACTIVE: {
+ 'default': 'ACTIVE',
+ task_states.REBOOTING: 'REBOOT',
+ task_states.UPDATING_PASSWORD: 'PASSWORD',
+ task_states.RESIZE_VERIFY: 'VERIFY_RESIZE',
+ },
+ vm_states.BUILDING: {
+ 'default': 'BUILD',
+ },
+ vm_states.REBUILDING: {
+ 'default': 'REBUILD',
+ },
+ vm_states.STOPPED: {
+ 'default': 'STOPPED',
+ },
+ vm_states.MIGRATING: {
+ 'default': 'MIGRATING',
+ },
+ vm_states.RESIZING: {
+ 'default': 'RESIZE',
+ },
+ vm_states.PAUSED: {
+ 'default': 'PAUSED',
+ },
+ vm_states.SUSPENDED: {
+ 'default': 'SUSPENDED',
+ },
+ vm_states.RESCUED: {
+ 'default': 'RESCUE',
+ },
+ vm_states.ERROR: {
+ 'default': 'ERROR',
+ },
+ vm_states.DELETED: {
+ 'default': 'DELETED',
+ },
}
-def status_from_power_state(power_state):
- """Map the power state to the server status string"""
- return _STATUS_MAP[power_state]
+def status_from_state(vm_state, task_state='default'):
+ """Given vm_state and task_state, return a status string."""
+ task_map = _STATE_MAP.get(vm_state, dict(default='UNKNOWN_STATE'))
+ status = task_map.get(task_state, task_map['default'])
+ LOG.debug("Generated %(status)s from vm_state=%(vm_state)s "
+ "task_state=%(task_state)s." % locals())
+ return status
-def power_states_from_status(status):
- """Map the server status string to a list of power states"""
- power_states = []
- for power_state, status_map in _STATUS_MAP.iteritems():
- # Skip the 'None' state
- if power_state is None:
- continue
- if status.lower() == status_map.lower():
- power_states.append(power_state)
- return power_states
+def vm_state_from_status(status):
+ """Map the server status string to a vm state."""
+ for state, task_map in _STATE_MAP.iteritems():
+ status_string = task_map.get("default")
+ if status.lower() == status_string.lower():
+ return state
def get_pagination_params(request):
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 27c67e79e..e0dd9bdb1 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -95,17 +95,15 @@ class Controller(object):
search_opts['recurse_zones'] = utils.bool_from_str(
search_opts.get('recurse_zones', False))
- # If search by 'status', we need to convert it to 'state'
- # If the status is unknown, bail.
- # Leave 'state' in search_opts so compute can pass it on to
- # child zones..
+ # If search by 'status', we need to convert it to 'vm_state'
+ # to pass on to child zones.
if 'status' in search_opts:
status = search_opts['status']
- search_opts['state'] = common.power_states_from_status(status)
- if len(search_opts['state']) == 0:
+ state = common.vm_state_from_status(status)
+ if state is None:
reason = _('Invalid server status: %(status)s') % locals()
- LOG.error(reason)
raise exception.InvalidInput(reason=reason)
+ search_opts['vm_state'] = state
# By default, compute's get_all() will return deleted instances.
# If an admin hasn't specified a 'deleted' search option, we need
@@ -608,9 +606,8 @@ class ControllerV10(Controller):
try:
self.compute_api.rebuild(context, instance_id, image_id, password)
- except exception.BuildInProgress:
- msg = _("Instance %s is currently being rebuilt.") % instance_id
- LOG.debug(msg)
+ except exception.RebuildRequiresActiveInstance:
+ msg = _("Instance %s must be active to rebuild.") % instance_id
raise exc.HTTPConflict(explanation=msg)
return webob.Response(status_int=202)
@@ -750,9 +747,8 @@ class ControllerV11(Controller):
self.compute_api.rebuild(context, instance_id, image_href,
password, name=name, metadata=metadata,
files_to_inject=personalities)
- except exception.BuildInProgress:
- msg = _("Instance %s is currently being rebuilt.") % instance_id
- LOG.debug(msg)
+ except exception.RebuildRequiresActiveInstance:
+ msg = _("Instance %s must be active to rebuild.") % instance_id
raise exc.HTTPConflict(explanation=msg)
except exception.InstanceNotFound:
msg = _("Instance %s could not be found") % instance_id
diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py
index 0ec98591e..b0daeb7a8 100644
--- a/nova/api/openstack/views/servers.py
+++ b/nova/api/openstack/views/servers.py
@@ -21,13 +21,12 @@ import hashlib
import os
from nova import exception
-import nova.compute
-import nova.context
from nova.api.openstack import common
from nova.api.openstack.views import addresses as addresses_view
from nova.api.openstack.views import flavors as flavors_view
from nova.api.openstack.views import images as images_view
from nova import utils
+from nova.compute import vm_states
class ViewBuilder(object):
@@ -61,17 +60,13 @@ class ViewBuilder(object):
def _build_detail(self, inst):
"""Returns a detailed model of a server."""
+ vm_state = inst.get('vm_state', vm_states.BUILDING)
+ task_state = inst.get('task_state')
inst_dict = {
'id': inst['id'],
'name': inst['display_name'],
- 'status': common.status_from_power_state(inst.get('state'))}
-
- ctxt = nova.context.get_admin_context()
- compute_api = nova.compute.API()
-
- if compute_api.has_finished_migration(ctxt, inst['uuid']):
- inst_dict['status'] = 'RESIZE-CONFIRM'
+ 'status': common.status_from_state(vm_state, task_state)}
# Return the metadata as a dictionary
metadata = {}
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 3b4bde8ea..595622ba1 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -37,6 +37,8 @@ from nova import utils
from nova import volume
from nova.compute import instance_types
from nova.compute import power_state
+from nova.compute import task_states
+from nova.compute import vm_states
from nova.compute.utils import terminate_volumes
from nova.scheduler import api as scheduler_api
from nova.db import base
@@ -75,12 +77,18 @@ def generate_default_hostname(instance):
def _is_able_to_shutdown(instance, instance_id):
- states = {'terminating': "Instance %s is already being terminated",
- 'migrating': "Instance %s is being migrated",
- 'stopping': "Instance %s is being stopped"}
- msg = states.get(instance['state_description'])
- if msg:
- LOG.warning(_(msg), instance_id)
+ vm_state = instance["vm_state"]
+ task_state = instance["task_state"]
+
+ valid_shutdown_states = [
+ vm_states.ACTIVE,
+ vm_states.REBUILDING,
+ vm_states.BUILDING,
+ ]
+
+ if vm_state not in valid_shutdown_states:
+ LOG.warn(_("Instance %(instance_id)s is not in an 'active' state. It "
+ "is currently %(vm_state)s. Shutdown aborted.") % locals())
return False
return True
@@ -251,10 +259,10 @@ class API(base.Base):
'image_ref': image_href,
'kernel_id': kernel_id or '',
'ramdisk_id': ramdisk_id or '',
+ 'power_state': power_state.NOSTATE,
+ 'vm_state': vm_states.BUILDING,
'config_drive_id': config_drive_id or '',
'config_drive': config_drive or '',
- 'state': 0,
- 'state_description': 'scheduling',
'user_id': context.user_id,
'project_id': context.project_id,
'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
@@ -415,6 +423,8 @@ class API(base.Base):
updates['display_name'] = "Server %s" % instance_id
instance['display_name'] = updates['display_name']
updates['hostname'] = self.hostname_factory(instance)
+ updates['vm_state'] = vm_states.BUILDING
+ updates['task_state'] = task_states.SCHEDULING
instance = self.update(context, instance_id, **updates)
return instance
@@ -750,10 +760,8 @@ class API(base.Base):
return
self.update(context,
- instance['id'],
- state_description='terminating',
- state=0,
- terminated_at=utils.utcnow())
+ instance_id,
+ task_state=task_states.DELETING)
host = instance['host']
if host:
@@ -773,9 +781,9 @@ class API(base.Base):
return
self.update(context,
- instance['id'],
- state_description='stopping',
- state=power_state.NOSTATE,
+ instance_id,
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.STOPPING,
terminated_at=utils.utcnow())
host = instance['host']
@@ -787,12 +795,18 @@ class API(base.Base):
"""Start an instance."""
LOG.debug(_("Going to try to start %s"), instance_id)
instance = self._get_instance(context, instance_id, 'starting')
- if instance['state_description'] != 'stopped':
- _state_description = instance['state_description']
+ vm_state = instance["vm_state"]
+
+ if vm_state != vm_states.STOPPED:
LOG.warning(_("Instance %(instance_id)s is not "
- "stopped(%(_state_description)s)") % locals())
+ "stopped. (%(vm_state)s)") % locals())
return
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.STOPPED,
+ task_state=task_states.STARTING)
+
# TODO(yamahata): injected_files isn't supported right now.
# It is used only for osapi. not for ec2 api.
# availability_zone isn't used by run_instance.
@@ -1020,6 +1034,10 @@ class API(base.Base):
@scheduler_api.reroute_compute("reboot")
def reboot(self, context, instance_id):
"""Reboot the given instance."""
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.REBOOTING)
self._cast_compute_message('reboot_instance', context, instance_id)
@scheduler_api.reroute_compute("rebuild")
@@ -1027,21 +1045,25 @@ class API(base.Base):
name=None, metadata=None, files_to_inject=None):
"""Rebuild the given instance with the provided metadata."""
instance = db.api.instance_get(context, instance_id)
+ name = name or instance["display_name"]
- if instance["state"] == power_state.BUILDING:
- msg = _("Instance already building")
- raise exception.BuildInProgress(msg)
+ if instance["vm_state"] != vm_states.ACTIVE:
+ msg = _("Instance must be active to rebuild.")
+ raise exception.RebuildRequiresActiveInstance(msg)
files_to_inject = files_to_inject or []
+ metadata = metadata or {}
+
self._check_injected_file_quota(context, files_to_inject)
+ self._check_metadata_properties_quota(context, metadata)
- values = {"image_ref": image_href}
- if metadata is not None:
- self._check_metadata_properties_quota(context, metadata)
- values['metadata'] = metadata
- if name is not None:
- values['display_name'] = name
- self.db.instance_update(context, instance_id, values)
+ self.update(context,
+ instance_id,
+ metadata=metadata,
+ display_name=name,
+ image_ref=image_href,
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.REBUILDING)
rebuild_params = {
"new_pass": admin_password,
@@ -1065,6 +1087,11 @@ class API(base.Base):
raise exception.MigrationNotFoundByStatus(instance_id=instance_id,
status='finished')
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.ACTIVE,
+ task_state=None)
+
params = {'migration_id': migration_ref['id']}
self._cast_compute_message('revert_resize', context,
instance_ref['uuid'],
@@ -1085,6 +1112,12 @@ class API(base.Base):
if not migration_ref:
raise exception.MigrationNotFoundByStatus(instance_id=instance_id,
status='finished')
+
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.ACTIVE,
+ task_state=None)
+
params = {'migration_id': migration_ref['id']}
self._cast_compute_message('confirm_resize', context,
instance_ref['uuid'],
@@ -1130,6 +1163,11 @@ class API(base.Base):
if (current_memory_mb == new_memory_mb) and flavor_id:
raise exception.CannotResizeToSameSize()
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.RESIZING,
+ task_state=task_states.RESIZE_PREP)
+
instance_ref = self._get_instance(context, instance_id, 'resize')
self._cast_scheduler_message(context,
{"method": "prep_resize",
@@ -1163,11 +1201,19 @@ class API(base.Base):
@scheduler_api.reroute_compute("pause")
def pause(self, context, instance_id):
"""Pause the given instance."""
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.PAUSING)
self._cast_compute_message('pause_instance', context, instance_id)
@scheduler_api.reroute_compute("unpause")
def unpause(self, context, instance_id):
"""Unpause the given instance."""
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.PAUSED,
+ task_state=task_states.UNPAUSING)
self._cast_compute_message('unpause_instance', context, instance_id)
def _call_compute_message_for_host(self, action, context, host, params):
@@ -1200,21 +1246,37 @@ class API(base.Base):
@scheduler_api.reroute_compute("suspend")
def suspend(self, context, instance_id):
"""Suspend the given instance."""
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.SUSPENDING)
self._cast_compute_message('suspend_instance', context, instance_id)
@scheduler_api.reroute_compute("resume")
def resume(self, context, instance_id):
"""Resume the given instance."""
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.SUSPENDED,
+ task_state=task_states.RESUMING)
self._cast_compute_message('resume_instance', context, instance_id)
@scheduler_api.reroute_compute("rescue")
def rescue(self, context, instance_id):
"""Rescue the given instance."""
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.RESCUING)
self._cast_compute_message('rescue_instance', context, instance_id)
@scheduler_api.reroute_compute("unrescue")
def unrescue(self, context, instance_id):
"""Unrescue the given instance."""
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.RESCUED,
+ task_state=task_states.UNRESCUING)
self._cast_compute_message('unrescue_instance', context, instance_id)
@scheduler_api.reroute_compute("set_admin_password")
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 6fcb3786c..0477db745 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -56,6 +56,8 @@ from nova import rpc
from nova import utils
from nova import volume
from nova.compute import power_state
+from nova.compute import task_states
+from nova.compute import vm_states
from nova.notifier import api as notifier
from nova.compute.utils import terminate_volumes
from nova.virt import driver
@@ -146,6 +148,10 @@ class ComputeManager(manager.SchedulerDependentManager):
super(ComputeManager, self).__init__(service_name="compute",
*args, **kwargs)
+ def _instance_update(self, context, instance_id, **kwargs):
+ """Update an instance in the database using kwargs as value."""
+ return self.db.instance_update(context, instance_id, kwargs)
+
def init_host(self):
"""Initialization for a standalone compute service."""
self.driver.init_host(host=self.host)
@@ -153,8 +159,8 @@ class ComputeManager(manager.SchedulerDependentManager):
instances = self.db.instance_get_all_by_host(context, self.host)
for instance in instances:
inst_name = instance['name']
- db_state = instance['state']
- drv_state = self._update_state(context, instance['id'])
+ db_state = instance['power_state']
+ drv_state = self._get_power_state(context, instance)
expect_running = db_state == power_state.RUNNING \
and drv_state != db_state
@@ -177,29 +183,13 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.warning(_('Hypervisor driver does not '
'support firewall rules'))
- def _update_state(self, context, instance_id, state=None):
- """Update the state of an instance from the driver info."""
- instance_ref = self.db.instance_get(context, instance_id)
-
- if state is None:
- try:
- LOG.debug(_('Checking state of %s'), instance_ref['name'])
- info = self.driver.get_info(instance_ref['name'])
- except exception.NotFound:
- info = None
-
- if info is not None:
- state = info['state']
- else:
- state = power_state.FAILED
-
- self.db.instance_set_state(context, instance_id, state)
- return state
-
- def _update_launched_at(self, context, instance_id, launched_at=None):
- """Update the launched_at parameter of the given instance."""
- data = {'launched_at': launched_at or utils.utcnow()}
- self.db.instance_update(context, instance_id, data)
+ def _get_power_state(self, context, instance):
+ """Retrieve the power state for the given instance."""
+ LOG.debug(_('Checking state of %s'), instance['name'])
+ try:
+ return self.driver.get_info(instance['name'])["state"]
+ except exception.NotFound:
+ return power_state.FAILED
def get_console_topic(self, context, **kwargs):
"""Retrieves the console host for a project on this host.
@@ -251,11 +241,6 @@ class ComputeManager(manager.SchedulerDependentManager):
def _setup_block_device_mapping(self, context, instance_id):
"""setup volumes for block device mapping"""
- self.db.instance_set_state(context,
- instance_id,
- power_state.NOSTATE,
- 'block_device_mapping')
-
volume_api = volume.API()
block_device_mapping = []
swap = None
@@ -389,17 +374,12 @@ class ComputeManager(manager.SchedulerDependentManager):
updates = {}
updates['host'] = self.host
updates['launched_on'] = self.host
- instance = self.db.instance_update(context,
- instance_id,
- updates)
+ updates['vm_state'] = vm_states.BUILDING
+ updates['task_state'] = task_states.NETWORKING
+ instance = self.db.instance_update(context, instance_id, updates)
instance['injected_files'] = kwargs.get('injected_files', [])
instance['admin_pass'] = kwargs.get('admin_password', None)
- self.db.instance_set_state(context,
- instance_id,
- power_state.NOSTATE,
- 'networking')
-
is_vpn = instance['image_ref'] == str(FLAGS.vpn_image_id)
try:
# NOTE(vish): This could be a cast because we don't do anything
@@ -418,6 +398,11 @@ class ComputeManager(manager.SchedulerDependentManager):
# all vif creation and network injection, maybe this is correct
network_info = []
+ self._instance_update(context,
+ instance_id,
+ vm_state=vm_states.BUILDING,
+ task_state=task_states.BLOCK_DEVICE_MAPPING)
+
(swap, ephemerals,
block_device_mapping) = self._setup_block_device_mapping(
context, instance_id)
@@ -427,9 +412,12 @@ class ComputeManager(manager.SchedulerDependentManager):
'ephemerals': ephemerals,
'block_device_mapping': block_device_mapping}
- # TODO(vish) check to make sure the availability zone matches
- self._update_state(context, instance_id, power_state.BUILDING)
+ self._instance_update(context,
+ instance_id,
+ vm_state=vm_states.BUILDING,
+ task_state=task_states.SPAWNING)
+ # TODO(vish) check to make sure the availability zone matches
try:
self.driver.spawn(context, instance,
network_info, block_device_info)
@@ -438,13 +426,21 @@ class ComputeManager(manager.SchedulerDependentManager):
"virtualization enabled in the BIOS? Details: "
"%(ex)s") % locals()
LOG.exception(msg)
+ return
+
+ current_power_state = self._get_power_state(context, instance)
+ self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ task_state=None,
+ launched_at=utils.utcnow())
- self._update_launched_at(context, instance_id)
- self._update_state(context, instance_id)
usage_info = utils.usage_from_instance(instance)
notifier.notify('compute.%s' % self.host,
'compute.instance.create',
notifier.INFO, usage_info)
+
except exception.InstanceNotFound:
# FIXME(wwolf): We are just ignoring InstanceNotFound
# exceptions here in case the instance was immediately
@@ -480,8 +476,7 @@ class ComputeManager(manager.SchedulerDependentManager):
for volume in volumes:
self._detach_volume(context, instance_id, volume['id'], False)
- if (instance['state'] == power_state.SHUTOFF and
- instance['state_description'] != 'stopped'):
+ if instance['power_state'] == power_state.SHUTOFF:
self.db.instance_destroy(context, instance_id)
raise exception.Error(_('trying to destroy already destroyed'
' instance: %s') % instance_id)
@@ -496,9 +491,14 @@ class ComputeManager(manager.SchedulerDependentManager):
"""Terminate an instance on this host."""
self._shutdown_instance(context, instance_id, 'Terminating')
instance = self.db.instance_get(context.elevated(), instance_id)
+ self._instance_update(context,
+ instance_id,
+ vm_state=vm_states.DELETED,
+ task_state=None,
+ terminated_at=utils.utcnow())
- # TODO(ja): should we keep it in a terminated state for a bit?
self.db.instance_destroy(context, instance_id)
+
usage_info = utils.usage_from_instance(instance)
notifier.notify('compute.%s' % self.host,
'compute.instance.delete',
@@ -509,7 +509,10 @@ class ComputeManager(manager.SchedulerDependentManager):
def stop_instance(self, context, instance_id):
"""Stopping an instance on this host."""
self._shutdown_instance(context, instance_id, 'Stopping')
- # instance state will be updated to stopped by _poll_instance_states()
+ self._instance_update(context,
+ instance_id,
+ vm_state=vm_states.STOPPED,
+ task_state=None)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
@@ -529,26 +532,46 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_ref = self.db.instance_get(context, instance_id)
LOG.audit(_("Rebuilding instance %s"), instance_id, context=context)
- self._update_state(context, instance_id, power_state.BUILDING)
+ current_power_state = self._get_power_state(context, instance_ref)
+ self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ vm_state=vm_states.REBUILDING,
+ task_state=None)
network_info = self._get_instance_nw_info(context, instance_ref)
-
self.driver.destroy(instance_ref, network_info)
+
+ self._instance_update(context,
+ instance_id,
+ vm_state=vm_states.REBUILDING,
+ task_state=task_states.BLOCK_DEVICE_MAPPING)
+
instance_ref.injected_files = kwargs.get('injected_files', [])
network_info = self.network_api.get_instance_nw_info(context,
instance_ref)
bd_mapping = self._setup_block_device_mapping(context, instance_id)
+ self._instance_update(context,
+ instance_id,
+ vm_state=vm_states.REBUILDING,
+ task_state=task_states.SPAWNING)
+
# pull in new password here since the original password isn't in the db
instance_ref.admin_pass = kwargs.get('new_pass',
utils.generate_password(FLAGS.password_length))
self.driver.spawn(context, instance_ref, network_info, bd_mapping)
- self._update_launched_at(context, instance_id)
- self._update_state(context, instance_id)
- usage_info = utils.usage_from_instance(instance_ref)
+ current_power_state = self._get_power_state(context, instance_ref)
+ self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ task_state=None,
+ launched_at=utils.utcnow())
+ usage_info = utils.usage_from_instance(instance_ref)
notifier.notify('compute.%s' % self.host,
'compute.instance.rebuild',
notifier.INFO,
@@ -558,26 +581,34 @@ class ComputeManager(manager.SchedulerDependentManager):
@checks_instance_lock
def reboot_instance(self, context, instance_id):
"""Reboot an instance on this host."""
+ LOG.audit(_("Rebooting instance %s"), instance_id, context=context)
context = context.elevated()
- self._update_state(context, instance_id)
instance_ref = self.db.instance_get(context, instance_id)
- LOG.audit(_("Rebooting instance %s"), instance_id, context=context)
- if instance_ref['state'] != power_state.RUNNING:
- state = instance_ref['state']
+ current_power_state = self._get_power_state(context, instance_ref)
+ self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.REBOOTING)
+
+ if instance_ref['power_state'] != power_state.RUNNING:
+ state = instance_ref['power_state']
running = power_state.RUNNING
LOG.warn(_('trying to reboot a non-running '
'instance: %(instance_id)s (state: %(state)s '
'expected: %(running)s)') % locals(),
context=context)
- self.db.instance_set_state(context,
- instance_id,
- power_state.NOSTATE,
- 'rebooting')
network_info = self._get_instance_nw_info(context, instance_ref)
self.driver.reboot(instance_ref, network_info)
- self._update_state(context, instance_id)
+
+ current_power_state = self._get_power_state(context, instance_ref)
+ self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ task_state=None)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def snapshot_instance(self, context, instance_id, image_id,
@@ -593,37 +624,45 @@ class ComputeManager(manager.SchedulerDependentManager):
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
"""
+ if image_type == "snapshot":
+ task_state = task_states.IMAGE_SNAPSHOT
+ elif image_type == "backup":
+ task_state = task_states.IMAGE_BACKUP
+ else:
+ raise Exception(_('Image type not recognized %s') % image_type)
+
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
- #NOTE(sirp): update_state currently only refreshes the state field
- # if we add is_snapshotting, we will need this refreshed too,
- # potentially?
- self._update_state(context, instance_id)
+ current_power_state = self._get_power_state(context, instance_ref)
+ self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ task_state=task_state)
LOG.audit(_('instance %s: snapshotting'), instance_id,
context=context)
- if instance_ref['state'] != power_state.RUNNING:
- state = instance_ref['state']
+
+ if instance_ref['power_state'] != power_state.RUNNING:
+ state = instance_ref['power_state']
running = power_state.RUNNING
LOG.warn(_('trying to snapshot a non-running '
'instance: %(instance_id)s (state: %(state)s '
'expected: %(running)s)') % locals())
self.driver.snapshot(context, instance_ref, image_id)
+ self._instance_update(context, instance_id, task_state=None)
+
+ if image_type == 'snapshot' and rotation:
+ raise exception.ImageRotationNotAllowed()
+
+ elif image_type == 'backup' and rotation:
+ instance_uuid = instance_ref['uuid']
+ self.rotate_backups(context, instance_uuid, backup_type, rotation)
- if image_type == 'snapshot':
- if rotation:
- raise exception.ImageRotationNotAllowed()
elif image_type == 'backup':
- if rotation:
- instance_uuid = instance_ref['uuid']
- self.rotate_backups(context, instance_uuid, backup_type,
- rotation)
- else:
- raise exception.RotationRequiredForBackup()
- else:
- raise Exception(_('Image type not recognized %s') % image_type)
+ raise exception.RotationRequiredForBackup()
def rotate_backups(self, context, instance_uuid, backup_type, rotation):
"""Delete excess backups associated to an instance.
@@ -691,7 +730,7 @@ class ComputeManager(manager.SchedulerDependentManager):
for i in xrange(max_tries):
instance_ref = self.db.instance_get(context, instance_id)
instance_id = instance_ref["id"]
- instance_state = instance_ref["state"]
+ instance_state = instance_ref["power_state"]
expected_state = power_state.RUNNING
if instance_state != expected_state:
@@ -726,7 +765,7 @@ class ComputeManager(manager.SchedulerDependentManager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
instance_id = instance_ref['id']
- instance_state = instance_ref['state']
+ instance_state = instance_ref['power_state']
expected_state = power_state.RUNNING
if instance_state != expected_state:
LOG.warn(_('trying to inject a file into a non-running '
@@ -744,7 +783,7 @@ class ComputeManager(manager.SchedulerDependentManager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
instance_id = instance_ref['id']
- instance_state = instance_ref['state']
+ instance_state = instance_ref['power_state']
expected_state = power_state.RUNNING
if instance_state != expected_state:
LOG.warn(_('trying to update agent on a non-running '
@@ -759,40 +798,41 @@ class ComputeManager(manager.SchedulerDependentManager):
@checks_instance_lock
def rescue_instance(self, context, instance_id):
"""Rescue an instance on this host."""
+ LOG.audit(_('instance %s: rescuing'), instance_id, context=context)
context = context.elevated()
+
instance_ref = self.db.instance_get(context, instance_id)
- LOG.audit(_('instance %s: rescuing'), instance_id, context=context)
- self.db.instance_set_state(context,
- instance_id,
- power_state.NOSTATE,
- 'rescuing')
- _update_state = lambda result: self._update_state_callback(
- self, context, instance_id, result)
network_info = self._get_instance_nw_info(context, instance_ref)
- self.driver.rescue(context, instance_ref, _update_state, network_info)
- self._update_state(context, instance_id)
+
+ # NOTE(blamar): None of the virt drivers use the 'callback' param
+ self.driver.rescue(context, instance_ref, None, network_info)
+
+ current_power_state = self._get_power_state(context, instance_ref)
+ self._instance_update(context,
+ instance_id,
+ vm_state=vm_states.RESCUED,
+ task_state=None,
+ power_state=current_power_state)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def unrescue_instance(self, context, instance_id):
"""Rescue an instance on this host."""
+ LOG.audit(_('instance %s: unrescuing'), instance_id, context=context)
context = context.elevated()
+
instance_ref = self.db.instance_get(context, instance_id)
- LOG.audit(_('instance %s: unrescuing'), instance_id, context=context)
- self.db.instance_set_state(context,
- instance_id,
- power_state.NOSTATE,
- 'unrescuing')
- _update_state = lambda result: self._update_state_callback(
- self, context, instance_id, result)
network_info = self._get_instance_nw_info(context, instance_ref)
- self.driver.unrescue(instance_ref, _update_state, network_info)
- self._update_state(context, instance_id)
- @staticmethod
- def _update_state_callback(self, context, instance_id, result):
- """Update instance state when async task completes."""
- self._update_state(context, instance_id)
+ # NOTE(blamar): None of the virt drivers use the 'callback' param
+ self.driver.unrescue(instance_ref, None, network_info)
+
+ current_power_state = self._get_power_state(context, instance_ref)
+ self._instance_update(context,
+ instance_id,
+ vm_state=vm_states.ACTIVE,
+ task_state=None,
+ power_state=current_power_state)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
@@ -851,11 +891,12 @@ class ComputeManager(manager.SchedulerDependentManager):
# Just roll back the record. There's no need to resize down since
# the 'old' VM already has the preferred attributes
- self.db.instance_update(context, instance_ref['uuid'],
- dict(memory_mb=instance_type['memory_mb'],
- vcpus=instance_type['vcpus'],
- local_gb=instance_type['local_gb'],
- instance_type_id=instance_type['id']))
+ self._instance_update(context,
+ instance_ref["uuid"],
+ memory_mb=instance_type['memory_mb'],
+ vcpus=instance_type['vcpus'],
+ local_gb=instance_type['local_gb'],
+ instance_type_id=instance_type['id'])
self.driver.revert_migration(instance_ref)
self.db.migration_update(context, migration_id,
@@ -882,8 +923,11 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_ref = self.db.instance_get_by_uuid(context, instance_id)
if instance_ref['host'] == FLAGS.host:
- raise exception.Error(_(
- 'Migration error: destination same as source!'))
+ self._instance_update(context,
+ instance_id,
+ vm_state=vm_states.ERROR)
+ msg = _('Migration error: destination same as source!')
+ raise exception.Error(msg)
old_instance_type = self.db.instance_type_get(context,
instance_ref['instance_type_id'])
@@ -977,6 +1021,11 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.finish_migration(context, instance_ref, disk_info,
network_info, resize_instance)
+ self._instance_update(context,
+ instance_id,
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.RESIZE_VERIFY)
+
self.db.migration_update(context, migration_id,
{'status': 'finished', })
@@ -1008,35 +1057,35 @@ class ComputeManager(manager.SchedulerDependentManager):
@checks_instance_lock
def pause_instance(self, context, instance_id):
"""Pause an instance on this host."""
+ LOG.audit(_('instance %s: pausing'), instance_id, context=context)
context = context.elevated()
+
instance_ref = self.db.instance_get(context, instance_id)
- LOG.audit(_('instance %s: pausing'), instance_id, context=context)
- self.db.instance_set_state(context,
- instance_id,
- power_state.NOSTATE,
- 'pausing')
- self.driver.pause(instance_ref,
- lambda result: self._update_state_callback(self,
- context,
- instance_id,
- result))
+ self.driver.pause(instance_ref, lambda result: None)
+
+ current_power_state = self._get_power_state(context, instance_ref)
+ self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ vm_state=vm_states.PAUSED,
+ task_state=None)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def unpause_instance(self, context, instance_id):
"""Unpause a paused instance on this host."""
+ LOG.audit(_('instance %s: unpausing'), instance_id, context=context)
context = context.elevated()
+
instance_ref = self.db.instance_get(context, instance_id)
- LOG.audit(_('instance %s: unpausing'), instance_id, context=context)
- self.db.instance_set_state(context,
- instance_id,
- power_state.NOSTATE,
- 'unpausing')
- self.driver.unpause(instance_ref,
- lambda result: self._update_state_callback(self,
- context,
- instance_id,
- result))
+ self.driver.unpause(instance_ref, lambda result: None)
+
+ current_power_state = self._get_power_state(context, instance_ref)
+ self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ task_state=None)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def host_power_action(self, context, host=None, action=None):
@@ -1052,7 +1101,7 @@ class ComputeManager(manager.SchedulerDependentManager):
def get_diagnostics(self, context, instance_id):
"""Retrieve diagnostics for an instance on this host."""
instance_ref = self.db.instance_get(context, instance_id)
- if instance_ref["state"] == power_state.RUNNING:
+ if instance_ref["power_state"] == power_state.RUNNING:
LOG.audit(_("instance %s: retrieving diagnostics"), instance_id,
context=context)
return self.driver.get_diagnostics(instance_ref)
@@ -1061,33 +1110,35 @@ class ComputeManager(manager.SchedulerDependentManager):
@checks_instance_lock
def suspend_instance(self, context, instance_id):
"""Suspend the given instance."""
+ LOG.audit(_('instance %s: suspending'), instance_id, context=context)
context = context.elevated()
+
instance_ref = self.db.instance_get(context, instance_id)
- LOG.audit(_('instance %s: suspending'), instance_id, context=context)
- self.db.instance_set_state(context, instance_id,
- power_state.NOSTATE,
- 'suspending')
- self.driver.suspend(instance_ref,
- lambda result: self._update_state_callback(self,
- context,
- instance_id,
- result))
+ self.driver.suspend(instance_ref, lambda result: None)
+
+ current_power_state = self._get_power_state(context, instance_ref)
+ self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ vm_state=vm_states.SUSPENDED,
+ task_state=None)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def resume_instance(self, context, instance_id):
"""Resume the given suspended instance."""
+ LOG.audit(_('instance %s: resuming'), instance_id, context=context)
context = context.elevated()
+
instance_ref = self.db.instance_get(context, instance_id)
- LOG.audit(_('instance %s: resuming'), instance_id, context=context)
- self.db.instance_set_state(context, instance_id,
- power_state.NOSTATE,
- 'resuming')
- self.driver.resume(instance_ref,
- lambda result: self._update_state_callback(self,
- context,
- instance_id,
- result))
+ self.driver.resume(instance_ref, lambda result: None)
+
+ current_power_state = self._get_power_state(context, instance_ref)
+ self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ task_state=None)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def lock_instance(self, context, instance_id):
@@ -1498,11 +1549,14 @@ class ComputeManager(manager.SchedulerDependentManager):
'block_migration': block_migration}})
# Restore instance state
- self.db.instance_update(ctxt,
- instance_ref['id'],
- {'state_description': 'running',
- 'state': power_state.RUNNING,
- 'host': dest})
+ current_power_state = self._get_power_state(ctxt, instance_ref)
+ self._instance_update(ctxt,
+ instance_ref["id"],
+ host=dest,
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ task_state=None)
+
# Restore volume state
for volume_ref in instance_ref['volumes']:
volume_id = volume_ref['id']
@@ -1548,11 +1602,11 @@ class ComputeManager(manager.SchedulerDependentManager):
This param specifies destination host.
"""
host = instance_ref['host']
- self.db.instance_update(context,
- instance_ref['id'],
- {'state_description': 'running',
- 'state': power_state.RUNNING,
- 'host': host})
+ self._instance_update(context,
+ instance_ref['id'],
+ host=host,
+ vm_state=vm_states.ACTIVE,
+ task_state=None)
for volume_ref in instance_ref['volumes']:
volume_id = volume_ref['id']
@@ -1600,10 +1654,9 @@ class ComputeManager(manager.SchedulerDependentManager):
error_list.append(ex)
try:
- self._poll_instance_states(context)
+ self._sync_power_states(context)
except Exception as ex:
- LOG.warning(_("Error during instance poll: %s"),
- unicode(ex))
+ LOG.warning(_("Error during power_state sync: %s"), unicode(ex))
error_list.append(ex)
return error_list
@@ -1618,68 +1671,40 @@ class ComputeManager(manager.SchedulerDependentManager):
self.update_service_capabilities(
self.driver.get_host_stats(refresh=True))
- def _poll_instance_states(self, context):
+ def _sync_power_states(self, context):
+ """Align power states between the database and the hypervisor.
+
+ The hypervisor is authoritative for the power_state data, so we
+ simply loop over all known instances for this host and update the
+ power_state according to the hypervisor. If the instance is not found
+ then it will be set to power_state.NOSTATE, because it doesn't exist
+ on the hypervisor.
+
+ """
vm_instances = self.driver.list_instances_detail()
vm_instances = dict((vm.name, vm) for vm in vm_instances)
+ db_instances = self.db.instance_get_all_by_host(context, self.host)
- # Keep a list of VMs not in the DB, cross them off as we find them
- vms_not_found_in_db = list(vm_instances.keys())
+ num_vm_instances = len(vm_instances)
+ num_db_instances = len(db_instances)
- db_instances = self.db.instance_get_all_by_host(context, self.host)
+ if num_vm_instances != num_db_instances:
+ LOG.info(_("Found %(num_db_instances)s in the database and "
+ "%(num_vm_instances)s on the hypervisor.") % locals())
for db_instance in db_instances:
- name = db_instance['name']
- db_state = db_instance['state']
+ name = db_instance["name"]
+ db_power_state = db_instance['power_state']
vm_instance = vm_instances.get(name)
if vm_instance is None:
- # NOTE(justinsb): We have to be very careful here, because a
- # concurrent operation could be in progress (e.g. a spawn)
- if db_state == power_state.BUILDING:
- # TODO(justinsb): This does mean that if we crash during a
- # spawn, the machine will never leave the spawning state,
- # but this is just the way nova is; this function isn't
- # trying to correct that problem.
- # We could have a separate task to correct this error.
- # TODO(justinsb): What happens during a live migration?
- LOG.info(_("Found instance '%(name)s' in DB but no VM. "
- "State=%(db_state)s, so assuming spawn is in "
- "progress.") % locals())
- vm_state = db_state
- else:
- LOG.info(_("Found instance '%(name)s' in DB but no VM. "
- "State=%(db_state)s, so setting state to "
- "shutoff.") % locals())
- vm_state = power_state.SHUTOFF
- if db_instance['state_description'] == 'stopping':
- self.db.instance_stop(context, db_instance['id'])
- continue
+ vm_power_state = power_state.NOSTATE
else:
- vm_state = vm_instance.state
- vms_not_found_in_db.remove(name)
-
- if (db_instance['state_description'] in ['migrating', 'stopping']):
- # A situation which db record exists, but no instance"
- # sometimes occurs while live-migration at src compute,
- # this case should be ignored.
- LOG.debug(_("Ignoring %(name)s, as it's currently being "
- "migrated.") % locals())
- continue
-
- if vm_state != db_state:
- LOG.info(_("DB/VM state mismatch. Changing state from "
- "'%(db_state)s' to '%(vm_state)s'") % locals())
- self._update_state(context, db_instance['id'], vm_state)
+ vm_power_state = vm_instance.state
- # NOTE(justinsb): We no longer auto-remove SHUTOFF instances
- # It's quite hard to get them back when we do.
-
- # Are there VMs not in the DB?
- for vm_not_found_in_db in vms_not_found_in_db:
- name = vm_not_found_in_db
+ if vm_power_state == db_power_state:
+ continue
- # We only care about instances that compute *should* know about
- if name.startswith("instance-"):
- # TODO(justinsb): What to do here? Adopt it? Shut it down?
- LOG.warning(_("Found VM not in DB: '%(name)s'. Ignoring")
- % locals())
+ self._instance_update(context,
+ db_instance["id"],
+ power_state=vm_power_state)
diff --git a/nova/compute/task_states.py b/nova/compute/task_states.py
new file mode 100644
index 000000000..e3315a542
--- /dev/null
+++ b/nova/compute/task_states.py
@@ -0,0 +1,59 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Possible task states for instances.
+
+Compute instance task states represent what is happening to the instance at the
+current moment. These tasks can be generic, such as 'spawning', or specific,
+such as 'block_device_mapping'. These task states allow for a better view into
+what an instance is doing and should be displayed to users/administrators as
+necessary.
+
+"""
+
+SCHEDULING = 'scheduling'
+BLOCK_DEVICE_MAPPING = 'block_device_mapping'
+NETWORKING = 'networking'
+SPAWNING = 'spawning'
+
+IMAGE_SNAPSHOT = 'image_snapshot'
+IMAGE_BACKUP = 'image_backup'
+
+UPDATING_PASSWORD = 'updating_password'
+
+RESIZE_PREP = 'resize_prep'
+RESIZE_MIGRATING = 'resize_migrating'
+RESIZE_MIGRATED = 'resize_migrated'
+RESIZE_FINISH = 'resize_finish'
+RESIZE_REVERTING = 'resize_reverting'
+RESIZE_CONFIRMING = 'resize_confirming'
+RESIZE_VERIFY = 'resize_verify'
+
+REBUILDING = 'rebuilding'
+
+REBOOTING = 'rebooting'
+PAUSING = 'pausing'
+UNPAUSING = 'unpausing'
+SUSPENDING = 'suspending'
+RESUMING = 'resuming'
+
+RESCUING = 'rescuing'
+UNRESCUING = 'unrescuing'
+
+DELETING = 'deleting'
+STOPPING = 'stopping'
+STARTING = 'starting'
diff --git a/nova/compute/vm_states.py b/nova/compute/vm_states.py
new file mode 100644
index 000000000..6f16c1f09
--- /dev/null
+++ b/nova/compute/vm_states.py
@@ -0,0 +1,39 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Possible vm states for instances.
+
+Compute instance vm states represent the state of an instance as it pertains to
+a user or administrator. When combined with task states (task_states.py), a
+better picture can be formed regarding the instance's health.
+
+"""
+
+ACTIVE = 'active'
+BUILDING = 'building'
+REBUILDING = 'rebuilding'
+
+PAUSED = 'paused'
+SUSPENDED = 'suspended'
+RESCUED = 'rescued'
+DELETED = 'deleted'
+STOPPED = 'stopped'
+
+MIGRATING = 'migrating'
+RESIZING = 'resizing'
+
+ERROR = 'error'
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 65b09a65d..631e53ceb 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -28,6 +28,7 @@ from nova import flags
from nova import ipv6
from nova import utils
from nova import log as logging
+from nova.compute import vm_states
from nova.db.sqlalchemy import models
from nova.db.sqlalchemy.session import get_session
from sqlalchemy import or_
@@ -1102,12 +1103,11 @@ def instance_destroy(context, instance_id):
def instance_stop(context, instance_id):
session = get_session()
with session.begin():
- from nova.compute import power_state
session.query(models.Instance).\
filter_by(id=instance_id).\
update({'host': None,
- 'state': power_state.SHUTOFF,
- 'state_description': 'stopped',
+ 'vm_state': vm_states.STOPPED,
+ 'task_state': None,
'updated_at': literal_column('updated_at')})
session.query(models.SecurityGroupInstanceAssociation).\
filter_by(instance_id=instance_id).\
@@ -1266,7 +1266,7 @@ def instance_get_all_by_filters(context, filters):
# Filters for exact matches that we can do along with the SQL query...
# For other filters that don't match this, we will do regexp matching
exact_match_filter_names = ['project_id', 'user_id', 'image_ref',
- 'state', 'instance_type_id', 'deleted']
+ 'vm_state', 'instance_type_id', 'deleted']
query_filters = [key for key in filters.iterkeys()
if key in exact_match_filter_names]
@@ -1484,18 +1484,6 @@ def instance_get_floating_address(context, instance_id):
return fixed_ip_refs[0].floating_ips[0]['address']
-@require_admin_context
-def instance_set_state(context, instance_id, state, description=None):
- # TODO(devcamcar): Move this out of models and into driver
- from nova.compute import power_state
- if not description:
- description = power_state.name(state)
- db.instance_update(context,
- instance_id,
- {'state': state,
- 'state_description': description})
-
-
@require_context
def instance_update(context, instance_id, values):
session = get_session()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py
new file mode 100644
index 000000000..e58ae5362
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py
@@ -0,0 +1,138 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy
+from sqlalchemy import MetaData, Table, Column, String
+
+from nova.compute import task_states
+from nova.compute import vm_states
+
+
+meta = MetaData()
+
+
+c_task_state = Column('task_state',
+ String(length=255, convert_unicode=False,
+ assert_unicode=None, unicode_error=None,
+ _warn_on_bytestring=False),
+ nullable=True)
+
+
+_upgrade_translations = {
+ "stopping": {
+ "state_description": vm_states.ACTIVE,
+ "task_state": task_states.STOPPING,
+ },
+ "stopped": {
+ "state_description": vm_states.STOPPED,
+ "task_state": None,
+ },
+ "terminated": {
+ "state_description": vm_states.DELETED,
+ "task_state": None,
+ },
+ "terminating": {
+ "state_description": vm_states.ACTIVE,
+ "task_state": task_states.DELETING,
+ },
+ "running": {
+ "state_description": vm_states.ACTIVE,
+ "task_state": None,
+ },
+ "scheduling": {
+ "state_description": vm_states.BUILDING,
+ "task_state": task_states.SCHEDULING,
+ },
+ "migrating": {
+ "state_description": vm_states.MIGRATING,
+ "task_state": None,
+ },
+ "pending": {
+ "state_description": vm_states.BUILDING,
+ "task_state": task_states.SCHEDULING,
+ },
+}
+
+
+_downgrade_translations = {
+ vm_states.ACTIVE: {
+ None: "running",
+ task_states.DELETING: "terminating",
+ task_states.STOPPING: "stopping",
+ },
+ vm_states.BUILDING: {
+ None: "pending",
+ task_states.SCHEDULING: "scheduling",
+ },
+ vm_states.STOPPED: {
+ None: "stopped",
+ },
+ vm_states.REBUILDING: {
+ None: "pending",
+ },
+ vm_states.DELETED: {
+ None: "terminated",
+ },
+ vm_states.MIGRATING: {
+ None: "migrating",
+ },
+}
+
+
+def upgrade(migrate_engine):
+ meta.bind = migrate_engine
+
+ instance_table = Table('instances', meta, autoload=True,
+ autoload_with=migrate_engine)
+
+ c_state = instance_table.c.state
+ c_state.alter(name='power_state')
+
+ c_vm_state = instance_table.c.state_description
+ c_vm_state.alter(name='vm_state')
+
+ instance_table.create_column(c_task_state)
+
+ for old_state, values in _upgrade_translations.iteritems():
+ instance_table.update().\
+ values(**values).\
+ where(c_vm_state == old_state).\
+ execute()
+
+
+def downgrade(migrate_engine):
+ meta.bind = migrate_engine
+
+ instance_table = Table('instances', meta, autoload=True,
+ autoload_with=migrate_engine)
+
+ c_task_state = instance_table.c.task_state
+
+ c_state = instance_table.c.power_state
+ c_state.alter(name='state')
+
+ c_vm_state = instance_table.c.vm_state
+ c_vm_state.alter(name='state_description')
+
+ for old_vm_state, old_task_states in _downgrade_translations.iteritems():
+ for old_task_state, new_state_desc in old_task_states.iteritems():
+ instance_table.update().\
+ where(c_task_state == old_task_state).\
+ where(c_vm_state == old_vm_state).\
+ values(vm_state=new_state_desc).\
+ execute()
+
+ instance_table.drop_column('task_state')
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index f8feb0b4f..854034f12 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -193,8 +193,9 @@ class Instance(BASE, NovaBase):
key_name = Column(String(255))
key_data = Column(Text)
- state = Column(Integer)
- state_description = Column(String(255))
+ power_state = Column(Integer)
+ vm_state = Column(String(255))
+ task_state = Column(String(255))
memory_mb = Column(Integer)
vcpus = Column(Integer)
@@ -238,17 +239,6 @@ class Instance(BASE, NovaBase):
access_ip_v4 = Column(String(255))
access_ip_v6 = Column(String(255))
- # TODO(vish): see Ewan's email about state improvements, probably
- # should be in a driver base class or some such
- # vmstate_state = running, halted, suspended, paused
- # power_state = what we have
- # task_state = transitory and may trigger power state transition
-
- #@validates('state')
- #def validate_state(self, key, state):
- # assert(state in ['nostate', 'running', 'blocked', 'paused',
- # 'shutdown', 'shutoff', 'crashed'])
-
class VirtualStorageArray(BASE, NovaBase):
"""
diff --git a/nova/exception.py b/nova/exception.py
index caa65146d..fca4586c3 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -61,7 +61,7 @@ class ApiError(Error):
super(ApiError, self).__init__(outstr)
-class BuildInProgress(Error):
+class RebuildRequiresActiveInstance(Error):
pass
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index f28353f05..22f4e14f9 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -30,6 +30,7 @@ from nova import log as logging
from nova import rpc
from nova import utils
from nova.compute import power_state
+from nova.compute import vm_states
from nova.api.ec2 import ec2utils
@@ -104,10 +105,8 @@ class Scheduler(object):
dest, block_migration)
# Changing instance_state.
- db.instance_set_state(context,
- instance_id,
- power_state.PAUSED,
- 'migrating')
+ values = {"vm_state": vm_states.MIGRATING}
+ db.instance_update(context, instance_id, values)
# Changing volume state
for volume_ref in instance_ref['volumes']:
@@ -129,8 +128,7 @@ class Scheduler(object):
"""
# Checking instance is running.
- if (power_state.RUNNING != instance_ref['state'] or \
- 'running' != instance_ref['state_description']):
+ if instance_ref['power_state'] != power_state.RUNNING:
instance_id = ec2utils.id_to_ec2_id(instance_ref['id'])
raise exception.InstanceNotRunning(instance_id=instance_id)
diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py
index 3dfdeb79c..b9ef41465 100644
--- a/nova/tests/api/openstack/test_server_actions.py
+++ b/nova/tests/api/openstack/test_server_actions.py
@@ -10,8 +10,8 @@ from nova import utils
from nova import exception
from nova import flags
from nova.api.openstack import create_instance_helper
+from nova.compute import vm_states
from nova.compute import instance_types
-from nova.compute import power_state
import nova.db.api
from nova import test
from nova.tests.api.openstack import common
@@ -35,17 +35,19 @@ def return_server_with_attributes(**kwargs):
return _return_server
-def return_server_with_power_state(power_state):
- return return_server_with_attributes(power_state=power_state)
+def return_server_with_state(vm_state, task_state=None):
+ return return_server_with_attributes(vm_state=vm_state,
+ task_state=task_state)
-def return_server_with_uuid_and_power_state(power_state):
- return return_server_with_power_state(power_state)
-
+def return_server_with_uuid_and_state(vm_state, task_state=None):
+ def _return_server(context, id):
+ return return_server_with_state(vm_state, task_state)
+ return _return_server
-def stub_instance(id, power_state=0, metadata=None,
- image_ref="10", flavor_id="1", name=None):
+def stub_instance(id, metadata=None, image_ref="10", flavor_id="1",
+ name=None, vm_state=None, task_state=None):
if metadata is not None:
metadata_items = [{'key':k, 'value':v} for k, v in metadata.items()]
else:
@@ -66,8 +68,8 @@ def stub_instance(id, power_state=0, metadata=None,
"launch_index": 0,
"key_name": "",
"key_data": "",
- "state": power_state,
- "state_description": "",
+ "vm_state": vm_state or vm_states.ACTIVE,
+ "task_state": task_state,
"memory_mb": 0,
"vcpus": 0,
"local_gb": 0,
@@ -175,11 +177,11 @@ class ServerActionsTest(test.TestCase):
},
}
- state = power_state.BUILDING
- new_return_server = return_server_with_power_state(state)
+ state = vm_states.BUILDING
+ new_return_server = return_server_with_state(state)
self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_server_with_uuid_and_power_state(state))
+ return_server_with_uuid_and_state(state))
req = webob.Request.blank('/v1.0/servers/1/action')
req.method = 'POST'
@@ -242,19 +244,6 @@ class ServerActionsTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 500)
- def test_resized_server_has_correct_status(self):
- req = self.webreq('/1', 'GET')
-
- def fake_migration_get(*args):
- return {}
-
- self.stubs.Set(nova.db, 'migration_get_by_instance_and_status',
- fake_migration_get)
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 200)
- body = json.loads(res.body)
- self.assertEqual(body['server']['status'], 'RESIZE-CONFIRM')
-
def test_confirm_resize_server(self):
req = self.webreq('/1/action', 'POST', dict(confirmResize=None))
@@ -642,11 +631,11 @@ class ServerActionsTestV11(test.TestCase):
},
}
- state = power_state.BUILDING
- new_return_server = return_server_with_power_state(state)
+ state = vm_states.BUILDING
+ new_return_server = return_server_with_state(state)
self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_server_with_uuid_and_power_state(state))
+ return_server_with_uuid_and_state(state))
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.method = 'POST'
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index 3559e6de5..f75263c45 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -37,7 +37,8 @@ from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
import nova.compute.api
from nova.compute import instance_types
-from nova.compute import power_state
+from nova.compute import task_states
+from nova.compute import vm_states
import nova.db.api
import nova.scheduler.api
from nova.db.sqlalchemy.models import Instance
@@ -91,15 +92,18 @@ def return_server_with_addresses(private, public):
return _return_server
-def return_server_with_power_state(power_state):
+def return_server_with_state(vm_state, task_state=None):
def _return_server(context, id):
- return stub_instance(id, power_state=power_state)
+ return stub_instance(id, vm_state=vm_state, task_state=task_state)
return _return_server
-def return_server_with_uuid_and_power_state(power_state):
+def return_server_with_uuid_and_state(vm_state, task_state):
def _return_server(context, id):
- return stub_instance(id, uuid=FAKE_UUID, power_state=power_state)
+ return stub_instance(id,
+ uuid=FAKE_UUID,
+ vm_state=vm_state,
+ task_state=task_state)
return _return_server
@@ -148,7 +152,8 @@ def instance_addresses(context, instance_id):
def stub_instance(id, user_id='fake', project_id='fake', private_address=None,
- public_addresses=None, host=None, power_state=0,
+ public_addresses=None, host=None,
+ vm_state=None, task_state=None,
reservation_id="", uuid=FAKE_UUID, image_ref="10",
flavor_id="1", interfaces=None, name=None,
access_ipv4=None, access_ipv6=None):
@@ -184,8 +189,8 @@ def stub_instance(id, user_id='fake', project_id='fake', private_address=None,
"launch_index": 0,
"key_name": "",
"key_data": "",
- "state": power_state,
- "state_description": "",
+ "vm_state": vm_state or vm_states.BUILDING,
+ "task_state": task_state,
"memory_mb": 0,
"vcpus": 0,
"local_gb": 0,
@@ -494,7 +499,7 @@ class ServersTest(test.TestCase):
},
]
new_return_server = return_server_with_attributes(
- interfaces=interfaces, power_state=1)
+ interfaces=interfaces, vm_state=vm_states.ACTIVE)
self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
req = webob.Request.blank('/v1.1/fake/servers/1')
@@ -587,8 +592,8 @@ class ServersTest(test.TestCase):
},
]
new_return_server = return_server_with_attributes(
- interfaces=interfaces, power_state=1, image_ref=image_ref,
- flavor_id=flavor_id)
+ interfaces=interfaces, vm_state=vm_states.ACTIVE,
+ image_ref=image_ref, flavor_id=flavor_id)
self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
req = webob.Request.blank('/v1.1/fake/servers/1')
@@ -1209,9 +1214,8 @@ class ServersTest(test.TestCase):
def test_get_servers_allows_status_v1_1(self):
def fake_get_all(compute_self, context, search_opts=None):
self.assertNotEqual(search_opts, None)
- self.assertTrue('state' in search_opts)
- self.assertEqual(set(search_opts['state']),
- set([power_state.RUNNING, power_state.BLOCKED]))
+ self.assertTrue('vm_state' in search_opts)
+ self.assertEqual(search_opts['vm_state'], vm_states.ACTIVE)
return [stub_instance(100)]
self.stubs.Set(nova.compute.API, 'get_all', fake_get_all)
@@ -1228,13 +1232,9 @@ class ServersTest(test.TestCase):
def test_get_servers_invalid_status_v1_1(self):
"""Test getting servers by invalid status"""
-
self.flags(allow_admin_api=False)
-
req = webob.Request.blank('/v1.1/fake/servers?status=running')
res = req.get_response(fakes.wsgi_app())
- # The following assert will fail if either of the asserts in
- # fake_get_all() fail
self.assertEqual(res.status_int, 400)
self.assertTrue(res.body.find('Invalid server status') > -1)
@@ -1738,6 +1738,7 @@ class ServersTest(test.TestCase):
server = json.loads(res.body)['server']
self.assertEqual(16, len(server['adminPass']))
self.assertEqual(1, server['id'])
+ self.assertEqual("BUILD", server["status"])
self.assertEqual(0, server['progress'])
self.assertEqual('server_test', server['name'])
self.assertEqual(expected_flavor, server['flavor'])
@@ -2467,23 +2468,51 @@ class ServersTest(test.TestCase):
self.assertEqual(res.status_int, 204)
self.assertEqual(self.server_delete_called, True)
- def test_shutdown_status(self):
- new_server = return_server_with_power_state(power_state.SHUTDOWN)
- self.stubs.Set(nova.db.api, 'instance_get', new_server)
- req = webob.Request.blank('/v1.0/servers/1')
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 200)
- res_dict = json.loads(res.body)
- self.assertEqual(res_dict['server']['status'], 'SHUTDOWN')
- def test_shutoff_status(self):
- new_server = return_server_with_power_state(power_state.SHUTOFF)
+class TestServerStatus(test.TestCase):
+
+ def _get_with_state(self, vm_state, task_state=None):
+ new_server = return_server_with_state(vm_state, task_state)
self.stubs.Set(nova.db.api, 'instance_get', new_server)
- req = webob.Request.blank('/v1.0/servers/1')
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 200)
- res_dict = json.loads(res.body)
- self.assertEqual(res_dict['server']['status'], 'SHUTOFF')
+ request = webob.Request.blank('/v1.0/servers/1')
+ response = request.get_response(fakes.wsgi_app())
+ self.assertEqual(response.status_int, 200)
+ return json.loads(response.body)
+
+ def test_active(self):
+ response = self._get_with_state(vm_states.ACTIVE)
+ self.assertEqual(response['server']['status'], 'ACTIVE')
+
+ def test_reboot(self):
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.REBOOTING)
+ self.assertEqual(response['server']['status'], 'REBOOT')
+
+ def test_rebuild(self):
+ response = self._get_with_state(vm_states.REBUILDING)
+ self.assertEqual(response['server']['status'], 'REBUILD')
+
+ def test_rebuild_error(self):
+ response = self._get_with_state(vm_states.ERROR)
+ self.assertEqual(response['server']['status'], 'ERROR')
+
+ def test_resize(self):
+ response = self._get_with_state(vm_states.RESIZING)
+ self.assertEqual(response['server']['status'], 'RESIZE')
+
+ def test_verify_resize(self):
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.RESIZE_VERIFY)
+ self.assertEqual(response['server']['status'], 'VERIFY_RESIZE')
+
+ def test_password_update(self):
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.UPDATING_PASSWORD)
+ self.assertEqual(response['server']['status'], 'PASSWORD')
+
+ def test_stopped(self):
+ response = self._get_with_state(vm_states.STOPPED)
+ self.assertEqual(response['server']['status'], 'STOPPED')
class TestServerCreateRequestXMLDeserializerV10(unittest.TestCase):
@@ -3536,8 +3565,8 @@ class ServersViewBuilderV11Test(test.TestCase):
"launch_index": 0,
"key_name": "",
"key_data": "",
- "state": 0,
- "state_description": "",
+ "vm_state": vm_states.BUILDING,
+ "task_state": None,
"memory_mb": 0,
"vcpus": 0,
"local_gb": 0,
@@ -3682,7 +3711,7 @@ class ServersViewBuilderV11Test(test.TestCase):
def test_build_server_detail_active_status(self):
#set the power state of the instance to running
- self.instance['state'] = 1
+ self.instance['vm_state'] = vm_states.ACTIVE
image_bookmark = "http://localhost/images/5"
flavor_bookmark = "http://localhost/flavors/1"
expected_server = {
diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py
index b9382038a..2cf604d06 100644
--- a/nova/tests/integrated/test_servers.py
+++ b/nova/tests/integrated/test_servers.py
@@ -28,6 +28,17 @@ LOG = logging.getLogger('nova.tests.integrated')
class ServersTest(integrated_helpers._IntegratedTestBase):
+ def _wait_for_creation(self, server):
+ retries = 0
+ while server['status'] == 'BUILD':
+ time.sleep(1)
+ server = self.api.get_server(server['id'])
+ print server
+ retries = retries + 1
+ if retries > 5:
+ break
+ return server
+
def test_get_servers(self):
"""Simple check that listing servers works."""
servers = self.api.get_servers()
@@ -36,9 +47,9 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
def test_create_and_delete_server(self):
"""Creates and deletes a server."""
+ self.flags(stub_network=True)
# Create server
-
# Build the server data gradually, checking errors along the way
server = {}
good_server = self._build_minimal_create_server_request()
@@ -91,19 +102,11 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
server_ids = [server['id'] for server in servers]
self.assertTrue(created_server_id in server_ids)
- # Wait (briefly) for creation
- retries = 0
- while found_server['status'] == 'build':
- LOG.debug("found server: %s" % found_server)
- time.sleep(1)
- found_server = self.api.get_server(created_server_id)
- retries = retries + 1
- if retries > 5:
- break
+ found_server = self._wait_for_creation(found_server)
# It should be available...
# TODO(justinsb): Mock doesn't yet do this...
- #self.assertEqual('available', found_server['status'])
+ self.assertEqual('ACTIVE', found_server['status'])
servers = self.api.get_servers(detail=True)
for server in servers:
self.assertTrue("image" in server)
@@ -181,6 +184,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
def test_create_and_rebuild_server(self):
"""Rebuild a server."""
+ self.flags(stub_network=True)
# create a server with initially has no metadata
server = self._build_minimal_create_server_request()
@@ -190,6 +194,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
+ created_server = self._wait_for_creation(created_server)
+
# rebuild the server with metadata
post = {}
post['rebuild'] = {
@@ -212,6 +218,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
def test_create_and_rebuild_server_with_metadata(self):
"""Rebuild a server with metadata."""
+ self.flags(stub_network=True)
# create a server with initially has no metadata
server = self._build_minimal_create_server_request()
@@ -221,6 +228,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
+ created_server = self._wait_for_creation(created_server)
+
# rebuild the server with metadata
post = {}
post['rebuild'] = {
@@ -248,6 +257,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
def test_create_and_rebuild_server_with_metadata_removal(self):
"""Rebuild a server with metadata."""
+ self.flags(stub_network=True)
# create a server with initially has no metadata
server = self._build_minimal_create_server_request()
@@ -264,6 +274,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
+ created_server = self._wait_for_creation(created_server)
+
# rebuild the server with metadata
post = {}
post['rebuild'] = {
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index 158df2a27..a52dd041a 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -40,6 +40,7 @@ from nova.scheduler import driver
from nova.scheduler import manager
from nova.scheduler import multi
from nova.compute import power_state
+from nova.compute import vm_states
FLAGS = flags.FLAGS
@@ -94,6 +95,9 @@ class SchedulerTestCase(test.TestCase):
inst['vcpus'] = kwargs.get('vcpus', 1)
inst['memory_mb'] = kwargs.get('memory_mb', 10)
inst['local_gb'] = kwargs.get('local_gb', 20)
+ inst['vm_state'] = kwargs.get('vm_state', vm_states.ACTIVE)
+ inst['power_state'] = kwargs.get('power_state', power_state.RUNNING)
+ inst['task_state'] = kwargs.get('task_state', None)
return db.instance_create(ctxt, inst)
def test_fallback(self):
@@ -271,8 +275,9 @@ class SimpleDriverTestCase(test.TestCase):
inst['memory_mb'] = kwargs.get('memory_mb', 20)
inst['local_gb'] = kwargs.get('local_gb', 30)
inst['launched_on'] = kwargs.get('launghed_on', 'dummy')
- inst['state_description'] = kwargs.get('state_description', 'running')
- inst['state'] = kwargs.get('state', power_state.RUNNING)
+ inst['vm_state'] = kwargs.get('vm_state', vm_states.ACTIVE)
+ inst['task_state'] = kwargs.get('task_state', None)
+ inst['power_state'] = kwargs.get('power_state', power_state.RUNNING)
return db.instance_create(self.context, inst)['id']
def _create_volume(self):
@@ -664,14 +669,14 @@ class SimpleDriverTestCase(test.TestCase):
block_migration=False)
i_ref = db.instance_get(self.context, instance_id)
- self.assertTrue(i_ref['state_description'] == 'migrating')
+ self.assertTrue(i_ref['vm_state'] == vm_states.MIGRATING)
db.instance_destroy(self.context, instance_id)
db.volume_destroy(self.context, v_ref['id'])
def test_live_migration_src_check_instance_not_running(self):
"""The instance given by instance_id is not running."""
- instance_id = self._create_instance(state_description='migrating')
+ instance_id = self._create_instance(power_state=power_state.NOSTATE)
i_ref = db.instance_get(self.context, instance_id)
try:
diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py
index 14ab64f33..d533a4794 100644
--- a/nova/tests/test_cloud.py
+++ b/nova/tests/test_cloud.py
@@ -38,6 +38,7 @@ from nova import test
from nova import utils
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
+from nova.compute import vm_states
from nova.image import fake
@@ -1161,7 +1162,7 @@ class CloudTestCase(test.TestCase):
self.compute = self.start_service('compute')
def _wait_for_state(self, ctxt, instance_id, predicate):
- """Wait for an stopping instance to be a given state"""
+ """Wait for a stopped instance to be a given state"""
id = ec2utils.ec2_id_to_id(instance_id)
while True:
info = self.cloud.compute_api.get(context=ctxt, instance_id=id)
@@ -1172,12 +1173,16 @@ class CloudTestCase(test.TestCase):
def _wait_for_running(self, instance_id):
def is_running(info):
- return info['state_description'] == 'running'
+ vm_state = info["vm_state"]
+ task_state = info["task_state"]
+ return vm_state == vm_states.ACTIVE and task_state == None
self._wait_for_state(self.context, instance_id, is_running)
def _wait_for_stopped(self, instance_id):
def is_stopped(info):
- return info['state_description'] == 'stopped'
+ vm_state = info["vm_state"]
+ task_state = info["task_state"]
+ return vm_state == vm_states.STOPPED and task_state == None
self._wait_for_state(self.context, instance_id, is_stopped)
def _wait_for_terminate(self, instance_id):
@@ -1560,7 +1565,7 @@ class CloudTestCase(test.TestCase):
'id': 0,
'root_device_name': '/dev/sdh',
'security_groups': [{'name': 'fake0'}, {'name': 'fake1'}],
- 'state_description': 'stopping',
+ 'vm_state': vm_states.STOPPED,
'instance_type': {'name': 'fake_type'},
'kernel_id': 1,
'ramdisk_id': 2,
@@ -1604,7 +1609,7 @@ class CloudTestCase(test.TestCase):
self.assertEqual(groupSet, expected_groupSet)
self.assertEqual(get_attribute('instanceInitiatedShutdownBehavior'),
{'instance_id': 'i-12345678',
- 'instanceInitiatedShutdownBehavior': 'stop'})
+ 'instanceInitiatedShutdownBehavior': 'stopped'})
self.assertEqual(get_attribute('instanceType'),
{'instance_id': 'i-12345678',
'instanceType': 'fake_type'})
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index 6659b81eb..766a7da9b 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -24,6 +24,7 @@ from nova import compute
from nova.compute import instance_types
from nova.compute import manager as compute_manager
from nova.compute import power_state
+from nova.compute import vm_states
from nova import context
from nova import db
from nova.db.sqlalchemy import models
@@ -763,8 +764,8 @@ class ComputeTestCase(test.TestCase):
'block_migration': False,
'disk': None}}).\
AndRaise(rpc.RemoteError('', '', ''))
- dbmock.instance_update(c, i_ref['id'], {'state_description': 'running',
- 'state': power_state.RUNNING,
+ dbmock.instance_update(c, i_ref['id'], {'vm_state': vm_states.ACTIVE,
+ 'task_state': None,
'host': i_ref['host']})
for v in i_ref['volumes']:
dbmock.volume_update(c, v['id'], {'status': 'in-use'})
@@ -795,8 +796,8 @@ class ComputeTestCase(test.TestCase):
'block_migration': False,
'disk': None}}).\
AndRaise(rpc.RemoteError('', '', ''))
- dbmock.instance_update(c, i_ref['id'], {'state_description': 'running',
- 'state': power_state.RUNNING,
+ dbmock.instance_update(c, i_ref['id'], {'vm_state': vm_states.ACTIVE,
+ 'task_state': None,
'host': i_ref['host']})
self.compute.db = dbmock
@@ -841,8 +842,8 @@ class ComputeTestCase(test.TestCase):
c = context.get_admin_context()
instance_id = self._create_instance()
i_ref = db.instance_get(c, instance_id)
- db.instance_update(c, i_ref['id'], {'state_description': 'migrating',
- 'state': power_state.PAUSED})
+ db.instance_update(c, i_ref['id'], {'vm_state': vm_states.MIGRATING,
+ 'power_state': power_state.PAUSED})
v_ref = db.volume_create(c, {'size': 1, 'instance_id': instance_id})
fix_addr = db.fixed_ip_create(c, {'address': '1.1.1.1',
'instance_id': instance_id})
@@ -903,7 +904,7 @@ class ComputeTestCase(test.TestCase):
instances = db.instance_get_all(context.get_admin_context())
LOG.info(_("After force-killing instances: %s"), instances)
self.assertEqual(len(instances), 1)
- self.assertEqual(power_state.SHUTOFF, instances[0]['state'])
+ self.assertEqual(power_state.NOSTATE, instances[0]['power_state'])
def test_get_all_by_name_regexp(self):
"""Test searching instances by name (display_name)"""
@@ -1323,25 +1324,28 @@ class ComputeTestCase(test.TestCase):
"""Test searching instances by state"""
c = context.get_admin_context()
- instance_id1 = self._create_instance({'state': power_state.SHUTDOWN})
+ instance_id1 = self._create_instance({
+ 'power_state': power_state.SHUTDOWN,
+ })
instance_id2 = self._create_instance({
- 'id': 2,
- 'state': power_state.RUNNING})
+ 'id': 2,
+ 'power_state': power_state.RUNNING,
+ })
instance_id3 = self._create_instance({
- 'id': 10,
- 'state': power_state.RUNNING})
-
+ 'id': 10,
+ 'power_state': power_state.RUNNING,
+ })
instances = self.compute_api.get_all(c,
- search_opts={'state': power_state.SUSPENDED})
+ search_opts={'power_state': power_state.SUSPENDED})
self.assertEqual(len(instances), 0)
instances = self.compute_api.get_all(c,
- search_opts={'state': power_state.SHUTDOWN})
+ search_opts={'power_state': power_state.SHUTDOWN})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0].id, instance_id1)
instances = self.compute_api.get_all(c,
- search_opts={'state': power_state.RUNNING})
+ search_opts={'power_state': power_state.RUNNING})
self.assertEqual(len(instances), 2)
instance_ids = [instance.id for instance in instances]
self.assertTrue(instance_id2 in instance_ids)
@@ -1349,7 +1353,7 @@ class ComputeTestCase(test.TestCase):
# Test passing a list as search arg
instances = self.compute_api.get_all(c,
- search_opts={'state': [power_state.SHUTDOWN,
+ search_opts={'power_state': [power_state.SHUTDOWN,
power_state.RUNNING]})
self.assertEqual(len(instances), 3)
diff --git a/nova/tests/vmwareapi/db_fakes.py b/nova/tests/vmwareapi/db_fakes.py
index afd672c7a..0d896239a 100644
--- a/nova/tests/vmwareapi/db_fakes.py
+++ b/nova/tests/vmwareapi/db_fakes.py
@@ -23,6 +23,8 @@ import time
from nova import db
from nova import utils
+from nova.compute import task_states
+from nova.compute import vm_states
def stub_out_db_instance_api(stubs):
@@ -64,7 +66,8 @@ def stub_out_db_instance_api(stubs):
'image_ref': values['image_ref'],
'kernel_id': values['kernel_id'],
'ramdisk_id': values['ramdisk_id'],
- 'state_description': 'scheduling',
+ 'vm_state': vm_states.BUILDING,
+ 'task_state': task_states.SCHEDULING,
'user_id': values['user_id'],
'project_id': values['project_id'],
'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),