summaryrefslogtreecommitdiffstats
path: root/nova
diff options
context:
space:
mode:
authorJohannes Erdfelt <johannes.erdfelt@rackspace.com>2011-09-21 15:54:30 +0000
committerTarmac <>2011-09-21 15:54:30 +0000
commit7e3bebbe8e911851a7398b8d5ad81afb421dfd62 (patch)
treef633b43d1f5fa1eabac24300ba951a322bec1fed /nova
parent1fc5abe0c63c6395e77c8031ae0a0b49e251f470 (diff)
parentad3f3d0f845fddb2658c427085e426e45b88ab4b (diff)
downloadnova-7e3bebbe8e911851a7398b8d5ad81afb421dfd62.tar.gz
nova-7e3bebbe8e911851a7398b8d5ad81afb421dfd62.tar.xz
nova-7e3bebbe8e911851a7398b8d5ad81afb421dfd62.zip
Instance deletions in Openstack are immediate. This can cause data to be lost accidentally.
This branch adds a new configuration flag, reclaim_instance_interval. The default of 0 results in the same behavior before this patch, immediate deletion of the instance. Any value greater than 0 will result in the instance being powered off immediately and then later the instance will be reclaimed. New actions, restore and forceDelete allow a previously deleted instance to be restored, or reclaimed immediately.
Diffstat (limited to 'nova')
-rw-r--r--nova/api/ec2/cloud.py1
-rw-r--r--nova/api/openstack/common.py3
-rw-r--r--nova/api/openstack/contrib/deferred_delete.py76
-rw-r--r--nova/api/openstack/servers.py10
-rw-r--r--nova/compute/api.py85
-rw-r--r--nova/compute/manager.py64
-rw-r--r--nova/compute/task_states.py2
-rw-r--r--nova/compute/vm_states.py1
-rw-r--r--nova/tests/api/openstack/test_extensions.py1
-rw-r--r--nova/tests/integrated/integrated_helpers.py8
-rw-r--r--nova/tests/integrated/test_servers.py146
-rw-r--r--nova/virt/driver.py8
-rw-r--r--nova/virt/xenapi/vmops.py10
-rw-r--r--nova/virt/xenapi_conn.py8
14 files changed, 396 insertions, 27 deletions
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 23ac30494..68d39042f 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -89,6 +89,7 @@ _STATE_DESCRIPTION_MAP = {
vm_states.BUILDING: 'pending',
vm_states.REBUILDING: 'pending',
vm_states.DELETED: 'terminated',
+ vm_states.SOFT_DELETE: 'terminated',
vm_states.STOPPED: 'stopped',
vm_states.MIGRATING: 'migrate',
vm_states.RESIZING: 'resize',
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index ca7848678..3ef9bdee5 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -78,6 +78,9 @@ _STATE_MAP = {
vm_states.DELETED: {
'default': 'DELETED',
},
+ vm_states.SOFT_DELETE: {
+ 'default': 'DELETED',
+ },
}
diff --git a/nova/api/openstack/contrib/deferred_delete.py b/nova/api/openstack/contrib/deferred_delete.py
new file mode 100644
index 000000000..13ee5511e
--- /dev/null
+++ b/nova/api/openstack/contrib/deferred_delete.py
@@ -0,0 +1,76 @@
+# Copyright 2011 Openstack, LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""The deferred instance delete extension."""
+
+import webob
+from webob import exc
+
+from nova import compute
+from nova import exception
+from nova import log as logging
+from nova.api.openstack import common
+from nova.api.openstack import extensions
+from nova.api.openstack import faults
+from nova.api.openstack import servers
+
+
+LOG = logging.getLogger("nova.api.contrib.deferred-delete")
+
+
+class Deferred_delete(extensions.ExtensionDescriptor):
+ def __init__(self):
+ super(Deferred_delete, self).__init__()
+ self.compute_api = compute.API()
+
+ def _restore(self, input_dict, req, instance_id):
+ """Restore a previously deleted instance."""
+
+ context = req.environ["nova.context"]
+ self.compute_api.restore(context, instance_id)
+ return webob.Response(status_int=202)
+
+ def _force_delete(self, input_dict, req, instance_id):
+ """Force delete of instance before deferred cleanup."""
+
+ context = req.environ["nova.context"]
+ self.compute_api.force_delete(context, instance_id)
+ return webob.Response(status_int=202)
+
+ def get_name(self):
+ return "DeferredDelete"
+
+ def get_alias(self):
+ return "os-deferred-delete"
+
+ def get_description(self):
+ return "Instance deferred delete"
+
+ def get_namespace(self):
+ return "http://docs.openstack.org/ext/deferred-delete/api/v1.1"
+
+ def get_updated(self):
+ return "2011-09-01T00:00:00+00:00"
+
+ def get_actions(self):
+ """Return the actions the extension adds, as required by contract."""
+ actions = [
+ extensions.ActionExtension("servers", "restore",
+ self._restore),
+ extensions.ActionExtension("servers", "forceDelete",
+ self._force_delete),
+ ]
+
+ return actions
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 856c3c613..0e7c37486 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -169,6 +169,12 @@ class Controller(object):
server['server']['adminPass'] = extra_values['password']
return server
+ def _delete(self, context, id):
+ if FLAGS.reclaim_instance_interval:
+ self.compute_api.soft_delete(context, id)
+ else:
+ self.compute_api.delete(context, id)
+
@scheduler_api.redirect_handler
def update(self, req, id, body):
"""Update server then pass on to version-specific controller"""
@@ -572,7 +578,7 @@ class ControllerV10(Controller):
def delete(self, req, id):
""" Destroys a server """
try:
- self.compute_api.delete(req.environ['nova.context'], id)
+ self._delete(req.environ['nova.context'], id)
except exception.NotFound:
raise exc.HTTPNotFound()
return webob.Response(status_int=202)
@@ -650,7 +656,7 @@ class ControllerV11(Controller):
def delete(self, req, id):
""" Destroys a server """
try:
- self.compute_api.delete(req.environ['nova.context'], id)
+ self._delete(req.environ['nova.context'], id)
except exception.NotFound:
raise exc.HTTPNotFound()
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 76e1e7a60..1b35f061d 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -92,6 +92,19 @@ def _is_able_to_shutdown(instance, instance_id):
return True
+def _is_queued_delete(instance, instance_id):
+ vm_state = instance["vm_state"]
+ task_state = instance["task_state"]
+
+ if vm_state != vm_states.SOFT_DELETE:
+ LOG.warn(_("Instance %(instance_id)s is not in a 'soft delete' "
+ "state. It is currently %(vm_state)s. Action aborted.") %
+ locals())
+ return False
+
+ return True
+
+
class API(base.Base):
"""API for interacting with the compute manager."""
@@ -752,15 +765,85 @@ class API(base.Base):
{'instance_id': instance_id, 'action_str': action_str})
raise
+ @scheduler_api.reroute_compute("soft_delete")
+ def soft_delete(self, context, instance_id):
+ """Terminate an instance."""
+ LOG.debug(_("Going to try to soft delete %s"), instance_id)
+ instance = self._get_instance(context, instance_id, 'soft delete')
+
+ if not _is_able_to_shutdown(instance, instance_id):
+ return
+
+ # NOTE(jerdfelt): The compute daemon handles reclaiming instances
+ # that are in soft delete. If there is no host assigned, there is
+ # no daemon to reclaim, so delete it immediately.
+ host = instance['host']
+ if host:
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.SOFT_DELETE,
+ task_state=task_states.POWERING_OFF,
+ deleted_at=utils.utcnow())
+
+ self._cast_compute_message('power_off_instance', context,
+ instance_id, host)
+ else:
+ LOG.warning(_("No host for instance %s, deleting immediately"),
+ instance_id)
+ terminate_volumes(self.db, context, instance_id)
+ self.db.instance_destroy(context, instance_id)
+
@scheduler_api.reroute_compute("delete")
def delete(self, context, instance_id):
"""Terminate an instance."""
LOG.debug(_("Going to try to terminate %s"), instance_id)
- instance = self._get_instance(context, instance_id, 'terminating')
+ instance = self._get_instance(context, instance_id, 'delete')
if not _is_able_to_shutdown(instance, instance_id):
return
+ host = instance['host']
+ if host:
+ self.update(context,
+ instance_id,
+ task_state=task_states.DELETING)
+
+ self._cast_compute_message('terminate_instance', context,
+ instance_id, host)
+ else:
+ terminate_volumes(self.db, context, instance_id)
+ self.db.instance_destroy(context, instance_id)
+
+ @scheduler_api.reroute_compute("restore")
+ def restore(self, context, instance_id):
+ """Restore a previously deleted (but not reclaimed) instance."""
+ instance = self._get_instance(context, instance_id, 'restore')
+
+ if not _is_queued_delete(instance, instance_id):
+ return
+
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.ACTIVE,
+ task_state=None,
+ deleted_at=None)
+
+ host = instance['host']
+ if host:
+ self.update(context,
+ instance_id,
+ task_state=task_states.POWERING_ON)
+ self._cast_compute_message('power_on_instance', context,
+ instance_id, host)
+
+ @scheduler_api.reroute_compute("force_delete")
+ def force_delete(self, context, instance_id):
+ """Force delete a previously deleted (but not reclaimed) instance."""
+ instance = self._get_instance(context, instance_id, 'force delete')
+
+ if not _is_queued_delete(instance, instance_id):
+ return
+
self.update(context,
instance_id,
task_state=task_states.DELETING)
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index cb5d10f83..d7c23c65d 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -35,12 +35,13 @@ terminating it.
"""
+import datetime
+import functools
import os
import socket
import sys
import tempfile
import time
-import functools
from eventlet import greenthread
@@ -84,6 +85,8 @@ flags.DEFINE_integer("resize_confirm_window", 0,
" Set to 0 to disable.")
flags.DEFINE_integer('host_state_interval', 120,
'Interval in seconds for querying the host status')
+flags.DEFINE_integer('reclaim_instance_interval', 0,
+ 'Interval in seconds for reclaiming deleted instances')
LOG = logging.getLogger('nova.compute.manager')
@@ -175,7 +178,7 @@ class ComputeManager(manager.SchedulerDependentManager):
'nova-compute restart.'), locals())
self.reboot_instance(context, instance['id'])
elif drv_state == power_state.RUNNING:
- # Hyper-V and VMWareAPI drivers will raise and exception
+ # Hyper-V and VMWareAPI drivers will raise an exception
try:
net_info = self._get_instance_nw_info(context, instance)
self.driver.ensure_filtering_rules_for_instance(instance,
@@ -487,10 +490,8 @@ class ComputeManager(manager.SchedulerDependentManager):
if action_str == 'Terminating':
terminate_volumes(self.db, context, instance_id)
- @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
- @checks_instance_lock
- def terminate_instance(self, context, instance_id):
- """Terminate an instance on this host."""
+ def _delete_instance(self, context, instance_id):
+ """Delete an instance on this host."""
self._shutdown_instance(context, instance_id, 'Terminating')
instance = self.db.instance_get(context.elevated(), instance_id)
self._instance_update(context,
@@ -508,6 +509,12 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
+ def terminate_instance(self, context, instance_id):
+ """Terminate an instance on this host."""
+ self._delete_instance(context, instance_id)
+
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
+ @checks_instance_lock
def stop_instance(self, context, instance_id):
"""Stopping an instance on this host."""
self._shutdown_instance(context, instance_id, 'Stopping')
@@ -518,6 +525,30 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
+ def power_off_instance(self, context, instance_id):
+ """Power off an instance on this host."""
+ instance = self.db.instance_get(context, instance_id)
+ self.driver.power_off(instance)
+ current_power_state = self._get_power_state(context, instance)
+ self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ task_state=None)
+
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
+ @checks_instance_lock
+ def power_on_instance(self, context, instance_id):
+ """Power on an instance on this host."""
+ instance = self.db.instance_get(context, instance_id)
+ self.driver.power_on(instance)
+ current_power_state = self._get_power_state(context, instance)
+ self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ task_state=None)
+
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
+ @checks_instance_lock
def rebuild_instance(self, context, instance_id, **kwargs):
"""Destroy and re-make this instance.
@@ -1676,6 +1707,13 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.warning(_("Error during power_state sync: %s"), unicode(ex))
error_list.append(ex)
+ try:
+ self._reclaim_queued_deletes(context)
+ except Exception as ex:
+ LOG.warning(_("Error during reclamation of queued deletes: %s"),
+ unicode(ex))
+ error_list.append(ex)
+
return error_list
def _report_driver_status(self):
@@ -1725,3 +1763,17 @@ class ComputeManager(manager.SchedulerDependentManager):
self._instance_update(context,
db_instance["id"],
power_state=vm_power_state)
+
+ def _reclaim_queued_deletes(self, context):
+ """Reclaim instances that are queued for deletion."""
+
+ instances = self.db.instance_get_all_by_host(context, self.host)
+
+ queue_time = datetime.timedelta(
+ seconds=FLAGS.reclaim_instance_interval)
+ curtime = utils.utcnow()
+ for instance in instances:
+ if instance['vm_state'] == vm_states.SOFT_DELETE and \
+ (curtime - instance['deleted_at']) >= queue_time:
+ LOG.info('Deleting %s' % instance['name'])
+ self._delete_instance(context, instance['id'])
diff --git a/nova/compute/task_states.py b/nova/compute/task_states.py
index e3315a542..b52140bf8 100644
--- a/nova/compute/task_states.py
+++ b/nova/compute/task_states.py
@@ -50,6 +50,8 @@ PAUSING = 'pausing'
UNPAUSING = 'unpausing'
SUSPENDING = 'suspending'
RESUMING = 'resuming'
+POWERING_OFF = 'powering-off'
+POWERING_ON = 'powering-on'
RESCUING = 'rescuing'
UNRESCUING = 'unrescuing'
diff --git a/nova/compute/vm_states.py b/nova/compute/vm_states.py
index 6f16c1f09..f219bf7f4 100644
--- a/nova/compute/vm_states.py
+++ b/nova/compute/vm_states.py
@@ -32,6 +32,7 @@ SUSPENDED = 'suspended'
RESCUED = 'rescued'
DELETED = 'deleted'
STOPPED = 'stopped'
+SOFT_DELETE = 'soft-delete'
MIGRATING = 'migrating'
RESIZING = 'resizing'
diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py
index 44f4eb055..ca36523e4 100644
--- a/nova/tests/api/openstack/test_extensions.py
+++ b/nova/tests/api/openstack/test_extensions.py
@@ -86,6 +86,7 @@ class ExtensionControllerTest(test.TestCase):
self.flags(osapi_extensions_path=ext_path)
self.ext_list = [
"Createserverext",
+ "DeferredDelete",
"FlavorExtraSpecs",
"FlavorExtraData",
"Floating_ips",
diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py
index 49de9c854..b53e4cec6 100644
--- a/nova/tests/integrated/integrated_helpers.py
+++ b/nova/tests/integrated/integrated_helpers.py
@@ -70,10 +70,10 @@ class _IntegratedTestBase(test.TestCase):
self.stubs.Set(nova.image, 'get_image_service', fake_get_image_service)
# set up services
- self.start_service('compute')
- self.start_service('volume')
- self.start_service('network')
- self.start_service('scheduler')
+ self.compute = self.start_service('compute')
+ self.volume = self.start_service('volume')
+ self.network = self.start_service('network')
+ self.scheduler = self.start_service('scheduler')
self._start_api_service()
diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py
index 2cf604d06..e9c79aa13 100644
--- a/nova/tests/integrated/test_servers.py
+++ b/nova/tests/integrated/test_servers.py
@@ -28,17 +28,25 @@ LOG = logging.getLogger('nova.tests.integrated')
class ServersTest(integrated_helpers._IntegratedTestBase):
- def _wait_for_creation(self, server):
- retries = 0
- while server['status'] == 'BUILD':
- time.sleep(1)
+ def _wait_for_state_change(self, server, status):
+ for i in xrange(0, 50):
server = self.api.get_server(server['id'])
print server
- retries = retries + 1
- if retries > 5:
+ if server['status'] != status:
break
+ time.sleep(.1)
+
return server
+ def _restart_compute_service(self, periodic_interval=None):
+ """restart compute service. NOTE: fake driver forgets all instances."""
+ self.compute.kill()
+ if periodic_interval:
+ self.compute = self.start_service(
+ 'compute', periodic_interval=periodic_interval)
+ else:
+ self.compute = self.start_service('compute')
+
def test_get_servers(self):
"""Simple check that listing servers works."""
servers = self.api.get_servers()
@@ -102,7 +110,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
server_ids = [server['id'] for server in servers]
self.assertTrue(created_server_id in server_ids)
- found_server = self._wait_for_creation(found_server)
+ found_server = self._wait_for_state_change(found_server, 'BUILD')
# It should be available...
# TODO(justinsb): Mock doesn't yet do this...
@@ -114,12 +122,117 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self._delete_server(created_server_id)
- def _delete_server(self, server_id):
+ def test_deferred_delete(self):
+ """Creates, deletes and waits for server to be reclaimed."""
+ self.flags(stub_network=True, reclaim_instance_interval=1)
+
+ # enforce periodic tasks run in short time to avoid wait for 60s.
+ self._restart_compute_service(periodic_interval=0.3)
+
+ # Create server
+ server = self._build_minimal_create_server_request()
+
+ created_server = self.api.post_server({'server': server})
+ LOG.debug("created_server: %s" % created_server)
+ self.assertTrue(created_server['id'])
+ created_server_id = created_server['id']
+
+ # Wait for it to finish being created
+ found_server = self._wait_for_state_change(created_server, 'BUILD')
+
+ # It should be available...
+ self.assertEqual('ACTIVE', found_server['status'])
+
+ # Cannot restore unless instance is deleted
+ self.api.post_server_action(created_server_id, {'restore': {}})
+
+ # Check it's still active
+ found_server = self.api.get_server(created_server_id)
+ self.assertEqual('ACTIVE', found_server['status'])
+
+ # Cannot forceDelete unless instance is deleted
+ self.api.post_server_action(created_server_id, {'forceDelete': {}})
+
+ # Check it's still active
+ found_server = self.api.get_server(created_server_id)
+ self.assertEqual('ACTIVE', found_server['status'])
+
# Delete the server
- self.api.delete_server(server_id)
+ self.api.delete_server(created_server_id)
+
+ # Wait for queued deletion
+ found_server = self._wait_for_state_change(found_server, 'ACTIVE')
+ self.assertEqual('DELETED', found_server['status'])
+ # Wait for real deletion
+ self._wait_for_deletion(created_server_id)
+
+ def test_deferred_delete_restore(self):
+ """Creates, deletes and restores a server."""
+ self.flags(stub_network=True, reclaim_instance_interval=1)
+
+ # Create server
+ server = self._build_minimal_create_server_request()
+
+ created_server = self.api.post_server({'server': server})
+ LOG.debug("created_server: %s" % created_server)
+ self.assertTrue(created_server['id'])
+ created_server_id = created_server['id']
+
+ # Wait for it to finish being created
+ found_server = self._wait_for_state_change(created_server, 'BUILD')
+
+ # It should be available...
+ self.assertEqual('ACTIVE', found_server['status'])
+
+ # Delete the server
+ self.api.delete_server(created_server_id)
+
+ # Wait for queued deletion
+ found_server = self._wait_for_state_change(found_server, 'ACTIVE')
+ self.assertEqual('DELETED', found_server['status'])
+
+ # Restore server
+ self.api.post_server_action(created_server_id, {'restore': {}})
+
+ # Wait for server to become active again
+ found_server = self._wait_for_state_change(found_server, 'DELETED')
+ self.assertEqual('ACTIVE', found_server['status'])
+
+ def test_deferred_delete_force(self):
+ """Creates, deletes and force deletes a server."""
+ self.flags(stub_network=True, reclaim_instance_interval=1)
+
+ # Create server
+ server = self._build_minimal_create_server_request()
+
+ created_server = self.api.post_server({'server': server})
+ LOG.debug("created_server: %s" % created_server)
+ self.assertTrue(created_server['id'])
+ created_server_id = created_server['id']
+
+ # Wait for it to finish being created
+ found_server = self._wait_for_state_change(created_server, 'BUILD')
+
+ # It should be available...
+ self.assertEqual('ACTIVE', found_server['status'])
+
+ # Delete the server
+ self.api.delete_server(created_server_id)
+
+ # Wait for queued deletion
+ found_server = self._wait_for_state_change(found_server, 'ACTIVE')
+ self.assertEqual('DELETED', found_server['status'])
+
+ # Force delete server
+ self.api.post_server_action(created_server_id, {'forceDelete': {}})
+
+ # Wait for real deletion
+ self._wait_for_deletion(created_server_id)
+
+ def _wait_for_deletion(self, server_id):
# Wait (briefly) for deletion
- for _retries in range(5):
+ for _retries in range(50):
try:
found_server = self.api.get_server(server_id)
except client.OpenStackApiNotFoundException:
@@ -132,11 +245,16 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
# TODO(justinsb): Mock doesn't yet do accurate state changes
#if found_server['status'] != 'deleting':
# break
- time.sleep(1)
+ time.sleep(.1)
# Should be gone
self.assertFalse(found_server)
+ def _delete_server(self, server_id):
+ # Delete the server
+ self.api.delete_server(server_id)
+ self._wait_for_deletion(server_id)
+
def test_create_server_with_metadata(self):
"""Creates a server with metadata."""
@@ -194,7 +312,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
- created_server = self._wait_for_creation(created_server)
+ created_server = self._wait_for_state_change(created_server, 'BUILD')
# rebuild the server with metadata
post = {}
@@ -228,7 +346,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
- created_server = self._wait_for_creation(created_server)
+ created_server = self._wait_for_state_change(created_server, 'BUILD')
# rebuild the server with metadata
post = {}
@@ -274,7 +392,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
- created_server = self._wait_for_creation(created_server)
+ created_server = self._wait_for_state_change(created_server, 'BUILD')
# rebuild the server with metadata
post = {}
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index fc47d8d2d..7edb2cf1a 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -287,6 +287,14 @@ class ComputeDriver(object):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
+ def power_off(self, instance):
+ """Power off the specified instance."""
+ raise NotImplementedError()
+
+ def power_on(self, instance):
+ """Power on the specified instance"""
+ raise NotImplementedError()
+
def update_available_resource(self, ctxt, host):
"""Updates compute manager resource info on ComputeNode table.
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 210b8fe65..988007bae 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -1002,6 +1002,16 @@ class VMOps(object):
self._release_bootlock(original_vm_ref)
self._start(instance, original_vm_ref)
+ def power_off(self, instance):
+ """Power off the specified instance."""
+ vm_ref = self._get_vm_opaque_ref(instance)
+ self._shutdown(instance, vm_ref, hard=True)
+
+ def power_on(self, instance):
+ """Power on the specified instance."""
+ vm_ref = self._get_vm_opaque_ref(instance)
+ self._start(instance, vm_ref)
+
def poll_rescued_instances(self, timeout):
"""Look for expirable rescued instances.
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index 7fc683a9f..79b02891d 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -250,6 +250,14 @@ class XenAPIConnection(driver.ComputeDriver):
"""Unrescue the specified instance"""
self._vmops.unrescue(instance, _callback)
+ def power_off(self, instance):
+ """Power off the specified instance"""
+ self._vmops.power_off(instance)
+
+ def power_on(self, instance):
+ """Power on the specified instance"""
+ self._vmops.power_on(instance)
+
def poll_rescued_instances(self, timeout):
"""Poll for rescued instances"""
self._vmops.poll_rescued_instances(timeout)