summaryrefslogtreecommitdiffstats
path: root/nova/compute
diff options
context:
space:
mode:
authorDan Prince <dan.prince@rackspace.com>2011-03-24 12:09:43 -0400
committerDan Prince <dan.prince@rackspace.com>2011-03-24 12:09:43 -0400
commite30c4157323a58318fbdccbe335fa905154e169d (patch)
tree5bfba39061b30d9ecbdaccaffb57d1d5f4cdd59e /nova/compute
parentf4dee61638db068c03edd7fe0ab3488ac4670d89 (diff)
parentc53e56576ead57815f3542a7faa797e5d0135e0c (diff)
Merge w/ trunk.
Diffstat (limited to 'nova/compute')
-rw-r--r--nova/compute/api.py42
-rw-r--r--nova/compute/manager.py156
-rw-r--r--nova/compute/power_state.py18
3 files changed, 183 insertions, 33 deletions
diff --git a/nova/compute/api.py b/nova/compute/api.py
index e70817212..1fbaa399d 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -253,6 +253,16 @@ class API(base.Base):
return [dict(x.iteritems()) for x in instances]
+ def has_finished_migration(self, context, instance_id):
+ """Retrieves whether or not a finished migration exists for
+ an instance"""
+ try:
+ db.migration_get_by_instance_and_status(context, instance_id,
+ 'finished')
+ return True
+ except exception.NotFound:
+ return False
+
def ensure_default_security_group(self, context):
""" Create security group for the security context if it
does not already exist
@@ -464,6 +474,8 @@ class API(base.Base):
params = {'migration_id': migration_ref['id']}
self._cast_compute_message('revert_resize', context, instance_id,
migration_ref['dest_compute'], params=params)
+ self.db.migration_update(context, migration_ref['id'],
+ {'status': 'reverted'})
def confirm_resize(self, context, instance_id):
"""Confirms a migration/resize, deleting the 'old' instance in the
@@ -479,17 +491,41 @@ class API(base.Base):
self._cast_compute_message('confirm_resize', context, instance_id,
migration_ref['source_compute'], params=params)
- self.db.migration_update(context, migration_id,
+ self.db.migration_update(context, migration_ref['id'],
{'status': 'confirmed'})
self.db.instance_update(context, instance_id,
{'host': migration_ref['dest_compute'], })
- def resize(self, context, instance_id, flavor):
+ def resize(self, context, instance_id, flavor_id):
"""Resize a running instance."""
+ instance = self.db.instance_get(context, instance_id)
+ current_instance_type = self.db.instance_type_get_by_name(
+ context, instance['instance_type'])
+
+ new_instance_type = self.db.instance_type_get_by_flavor_id(
+ context, flavor_id)
+ current_instance_type_name = current_instance_type['name']
+ new_instance_type_name = new_instance_type['name']
+ LOG.debug(_("Old instance type %(current_instance_type_name)s, "
+ " new instance type %(new_instance_type_name)s") % locals())
+ if not new_instance_type:
+ raise exception.ApiError(_("Requested flavor %(flavor_id)d "
+ "does not exist") % locals())
+
+ current_memory_mb = current_instance_type['memory_mb']
+ new_memory_mb = new_instance_type['memory_mb']
+ if current_memory_mb > new_memory_mb:
+ raise exception.ApiError(_("Invalid flavor: cannot downsize"
+ "instances"))
+ if current_memory_mb == new_memory_mb:
+ raise exception.ApiError(_("Invalid flavor: cannot use"
+ "the same flavor. "))
+
self._cast_scheduler_message(context,
{"method": "prep_resize",
"args": {"topic": FLAGS.compute_topic,
- "instance_id": instance_id, }},)
+ "instance_id": instance_id,
+ "flavor_id": flavor_id}})
def pause(self, context, instance_id):
"""Pause the given instance."""
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 576937cd8..7316d1510 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -2,6 +2,7 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -41,9 +42,10 @@ import string
import socket
import sys
import tempfile
-import time
import functools
+from eventlet import greenthread
+
from nova import exception
from nova import flags
from nova import log as logging
@@ -51,6 +53,7 @@ from nova import manager
from nova import rpc
from nova import utils
from nova.compute import power_state
+from nova.virt import driver
FLAGS = flags.FLAGS
flags.DEFINE_string('instances_path', '$state_path/instances',
@@ -65,8 +68,11 @@ flags.DEFINE_string('console_host', socket.gethostname(),
'Console proxy host to use to connect to instances on'
'this host.')
flags.DEFINE_integer('live_migration_retry_count', 30,
- ("Retry count needed in live_migration."
- " sleep 1 sec for each count"))
+ "Retry count needed in live_migration."
+ " sleep 1 sec for each count")
+flags.DEFINE_integer("rescue_timeout", 0,
+ "Automatically unrescue an instance after N seconds."
+ " Set to 0 to disable.")
LOG = logging.getLogger('nova.compute.manager')
@@ -117,9 +123,11 @@ class ComputeManager(manager.Manager):
compute_driver = FLAGS.compute_driver
try:
- self.driver = utils.import_object(compute_driver)
- except ImportError:
- LOG.error("Unable to load the virtualization driver.")
+ self.driver = utils.check_isinstance(
+ utils.import_object(compute_driver),
+ driver.ComputeDriver)
+ except ImportError as e:
+ LOG.error(_("Unable to load the virtualization driver: %s") % (e))
sys.exit(1)
self.network_manager = utils.import_object(FLAGS.network_manager)
@@ -132,6 +140,12 @@ class ComputeManager(manager.Manager):
"""
self.driver.init_host(host=self.host)
+ def periodic_tasks(self, context=None):
+ """Tasks to be run at a periodic interval."""
+ super(ComputeManager, self).periodic_tasks(context)
+ if FLAGS.rescue_timeout > 0:
+ self.driver.poll_rescued_instances(FLAGS.rescue_timeout)
+
def _update_state(self, context, instance_id):
"""Update the state of an instance from the driver info."""
# FIXME(ja): include other fields from state?
@@ -437,25 +451,41 @@ class ComputeManager(manager.Manager):
instance_ref = self.db.instance_get(context, instance_id)
migration_ref = self.db.migration_get(context, migration_id)
- #TODO(mdietz): we may want to split these into separate methods.
- if migration_ref['source_compute'] == FLAGS.host:
- self.driver._start(instance_ref)
- self.db.migration_update(context, migration_id,
- {'status': 'reverted'})
- else:
- self.driver.destroy(instance_ref)
- topic = self.db.queue_get_for(context, FLAGS.compute_topic,
- instance_ref['host'])
- rpc.cast(context, topic,
- {'method': 'revert_resize',
- 'args': {
- 'migration_id': migration_ref['id'],
- 'instance_id': instance_id, },
- })
+ self.driver.destroy(instance_ref)
+ topic = self.db.queue_get_for(context, FLAGS.compute_topic,
+ instance_ref['host'])
+ rpc.cast(context, topic,
+ {'method': 'finish_revert_resize',
+ 'args': {
+ 'migration_id': migration_ref['id'],
+ 'instance_id': instance_id, },
+ })
@exception.wrap_exception
@checks_instance_lock
- def prep_resize(self, context, instance_id):
+ def finish_revert_resize(self, context, instance_id, migration_id):
+ """Finishes the second half of reverting a resize, powering back on
+ the source instance and reverting the resized attributes in the
+ database"""
+ instance_ref = self.db.instance_get(context, instance_id)
+ migration_ref = self.db.migration_get(context, migration_id)
+ instance_type = self.db.instance_type_get_by_flavor_id(context,
+ migration_ref['old_flavor_id'])
+
+ # Just roll back the record. There's no need to resize down since
+ # the 'old' VM already has the preferred attributes
+ self.db.instance_update(context, instance_id,
+ dict(memory_mb=instance_type['memory_mb'],
+ vcpus=instance_type['vcpus'],
+ local_gb=instance_type['local_gb']))
+
+ self.driver.revert_resize(instance_ref)
+ self.db.migration_update(context, migration_id,
+ {'status': 'reverted'})
+
+ @exception.wrap_exception
+ @checks_instance_lock
+ def prep_resize(self, context, instance_id, flavor_id):
"""Initiates the process of moving a running instance to another
host, possibly changing the RAM and disk size in the process"""
context = context.elevated()
@@ -464,12 +494,17 @@ class ComputeManager(manager.Manager):
raise exception.Error(_(
'Migration error: destination same as source!'))
+ instance_type = self.db.instance_type_get_by_flavor_id(context,
+ flavor_id)
migration_ref = self.db.migration_create(context,
{'instance_id': instance_id,
'source_compute': instance_ref['host'],
'dest_compute': FLAGS.host,
'dest_host': self.driver.get_host_ip_addr(),
+ 'old_flavor_id': instance_type['flavorid'],
+ 'new_flavor_id': flavor_id,
'status': 'pre-migrating'})
+
LOG.audit(_('instance %s: migrating to '), instance_id,
context=context)
topic = self.db.queue_get_for(context, FLAGS.compute_topic,
@@ -495,8 +530,6 @@ class ComputeManager(manager.Manager):
self.db.migration_update(context, migration_id,
{'status': 'post-migrating', })
- #TODO(mdietz): This is where we would update the VM record
- #after resizing
service = self.db.service_get_by_host_and_topic(context,
migration_ref['dest_compute'], FLAGS.compute_topic)
topic = self.db.queue_get_for(context, FLAGS.compute_topic,
@@ -517,7 +550,19 @@ class ComputeManager(manager.Manager):
migration_ref = self.db.migration_get(context, migration_id)
instance_ref = self.db.instance_get(context,
migration_ref['instance_id'])
-
+ # TODO(mdietz): apply the rest of the instance_type attributes going
+ # after they're supported
+ instance_type = self.db.instance_type_get_by_flavor_id(context,
+ migration_ref['new_flavor_id'])
+ self.db.instance_update(context, instance_id,
+ dict(instance_type=instance_type['name'],
+ memory_mb=instance_type['memory_mb'],
+ vcpus=instance_type['vcpus'],
+ local_gb=instance_type['local_gb']))
+
+ # reload the updated instance ref
+ # FIXME(mdietz): is there reload functionality?
+ instance_ref = self.db.instance_get(context, instance_id)
self.driver.finish_resize(instance_ref, disk_info)
self.db.migration_update(context, migration_id,
@@ -800,7 +845,7 @@ class ComputeManager(manager.Manager):
return self.driver.update_available_resource(context, self.host)
- def pre_live_migration(self, context, instance_id):
+ def pre_live_migration(self, context, instance_id, time=None):
"""Preparations for live migration at dest host.
:param context: security context
@@ -808,6 +853,9 @@ class ComputeManager(manager.Manager):
"""
+ if not time:
+ time = greenthread
+
# Getting instance info
instance_ref = self.db.instance_get(context, instance_id)
ec2_id = instance_ref['hostname']
@@ -976,3 +1024,59 @@ class ComputeManager(manager.Manager):
for volume in instance_ref['volumes']:
self.db.volume_update(ctxt, volume['id'], {'status': 'in-use'})
+
+ def periodic_tasks(self, context=None):
+ """Tasks to be run at a periodic interval."""
+ error_list = super(ComputeManager, self).periodic_tasks(context)
+ if error_list is None:
+ error_list = []
+
+ try:
+ self._poll_instance_states(context)
+ except Exception as ex:
+ LOG.warning(_("Error during instance poll: %s"),
+ unicode(ex))
+ error_list.append(ex)
+ return error_list
+
+ def _poll_instance_states(self, context):
+ vm_instances = self.driver.list_instances_detail()
+ vm_instances = dict((vm.name, vm) for vm in vm_instances)
+
+ # Keep a list of VMs not in the DB, cross them off as we find them
+ vms_not_found_in_db = list(vm_instances.keys())
+
+ db_instances = self.db.instance_get_all_by_host(context, self.host)
+
+ for db_instance in db_instances:
+ name = db_instance['name']
+ vm_instance = vm_instances.get(name)
+ if vm_instance is None:
+ LOG.info(_("Found instance '%(name)s' in DB but no VM. "
+ "Setting state to shutoff.") % locals())
+ vm_state = power_state.SHUTOFF
+ else:
+ vm_state = vm_instance.state
+ vms_not_found_in_db.remove(name)
+
+ db_state = db_instance['state']
+ if vm_state != db_state:
+ LOG.info(_("DB/VM state mismatch. Changing state from "
+ "'%(db_state)s' to '%(vm_state)s'") % locals())
+ self.db.instance_set_state(context,
+ db_instance['id'],
+ vm_state)
+
+ if vm_state == power_state.SHUTOFF:
+ # TODO(soren): This is what the compute manager does when you
+ # terminate an instance. At some point I figure we'll have a
+ # "terminated" state and some sort of cleanup job that runs
+ # occasionally, cleaning them out.
+ self.db.instance_destroy(context, db_instance['id'])
+
+ # Are there VMs not in the DB?
+ for vm_not_found_in_db in vms_not_found_in_db:
+ name = vm_not_found_in_db
+ # TODO(justinsb): What to do here? Adopt it? Shut it down?
+ LOG.warning(_("Found VM not in DB: '%(name)s'. Ignoring")
+ % locals())
diff --git a/nova/compute/power_state.py b/nova/compute/power_state.py
index adfc2dff0..ef013b2ef 100644
--- a/nova/compute/power_state.py
+++ b/nova/compute/power_state.py
@@ -2,6 +2,7 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
@@ -19,6 +20,7 @@
"""The various power states that a VM can be in."""
+#NOTE(justinsb): These are the virDomainState values from libvirt
NOSTATE = 0x00
RUNNING = 0x01
BLOCKED = 0x02
@@ -29,9 +31,10 @@ CRASHED = 0x06
SUSPENDED = 0x07
FAILED = 0x08
-
-def name(code):
- d = {
+# TODO(justinsb): Power state really needs to be a proper class,
+# so that we're not locked into the libvirt status codes and can put mapping
+# logic here rather than spread throughout the code
+_STATE_MAP = {
NOSTATE: 'pending',
RUNNING: 'running',
BLOCKED: 'blocked',
@@ -41,4 +44,11 @@ def name(code):
CRASHED: 'crashed',
SUSPENDED: 'suspended',
FAILED: 'failed to spawn'}
- return d[code]
+
+
+def name(code):
+ return _STATE_MAP[code]
+
+
+def valid_states():
+ return _STATE_MAP.keys()