summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJenkins <jenkins@review.openstack.org>2012-10-16 15:31:13 +0000
committerGerrit Code Review <review@openstack.org>2012-10-16 15:31:13 +0000
commitfc31bc4990438619bde8fe7eb0f15afc2bc1ab7e (patch)
tree8d9bab3993713f9ab827dd9d9500df1e867c8218
parentbff0ee67779eafb4872e1d8ef8d36c311fd2424b (diff)
parent70275a6502d47d70fc006b84af1035138bc16d66 (diff)
downloadnova-fc31bc4990438619bde8fe7eb0f15afc2bc1ab7e.tar.gz
nova-fc31bc4990438619bde8fe7eb0f15afc2bc1ab7e.tar.xz
nova-fc31bc4990438619bde8fe7eb0f15afc2bc1ab7e.zip
Merge "Remove db access for block devices on terminate_instance"
-rw-r--r--nova/compute/api.py5
-rw-r--r--nova/compute/manager.py56
-rw-r--r--nova/compute/rpcapi.py8
-rw-r--r--nova/tests/compute/test_compute.py41
-rw-r--r--nova/tests/compute/test_rpcapi.py3
-rw-r--r--nova/tests/fake_volume.py2
6 files changed, 86 insertions, 29 deletions
diff --git a/nova/compute/api.py b/nova/compute/api.py
index f13fc55d4..b928ab50b 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -911,11 +911,14 @@ class API(base.Base):
services = self.db.service_get_all_compute_by_host(
context.elevated(), instance['host'])
is_up = False
+ bdms = self.db.block_device_mapping_get_all_by_instance(
+ context, instance["uuid"])
#Note(jogo): db allows for multiple compute services per host
for service in services:
if utils.service_is_up(service):
is_up = True
- self.compute_rpcapi.terminate_instance(context, instance)
+ self.compute_rpcapi.terminate_instance(context, instance,
+ bdms)
break
if is_up == False:
# If compute node isn't up, just delete from DB
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 7cd54bd75..23169375e 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -210,7 +210,7 @@ def _get_image_meta(context, image_ref):
class ComputeManager(manager.SchedulerDependentManager):
"""Manages the running instances from creation to destruction."""
- RPC_API_VERSION = '2.3'
+ RPC_API_VERSION = '2.4'
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
@@ -769,11 +769,16 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.debug(_('Deallocating network for instance'), instance=instance)
self.network_api.deallocate_for_instance(context, instance)
- def _get_instance_volume_bdms(self, context, instance_uuid):
- bdms = self.db.block_device_mapping_get_all_by_instance(context,
- instance_uuid)
+ def _get_volume_bdms(self, bdms):
+ """Return only bdms that have a volume_id"""
return [bdm for bdm in bdms if bdm['volume_id']]
+ # NOTE(danms): Legacy interface for digging up volumes in the database
+ def _get_instance_volume_bdms(self, context, instance_uuid):
+ return self._get_volume_bdms(
+ self.db.block_device_mapping_get_all_by_instance(context,
+ instance_uuid))
+
def _get_instance_volume_bdm(self, context, instance_uuid, volume_id):
bdms = self._get_instance_volume_bdms(context, instance_uuid)
for bdm in bdms:
@@ -783,10 +788,15 @@ class ComputeManager(manager.SchedulerDependentManager):
if str(bdm['volume_id']) == str(volume_id):
return bdm
+ # NOTE(danms): This is a transitional interface until all the callers
+ # can provide their own bdms
def _get_instance_volume_block_device_info(self, context, instance_uuid,
bdms=None):
if bdms is None:
bdms = self._get_instance_volume_bdms(context, instance_uuid)
+ return self._get_volume_block_device_info(bdms)
+
+ def _get_volume_block_device_info(self, bdms):
block_device_mapping = []
for bdm in bdms:
try:
@@ -825,7 +835,7 @@ class ComputeManager(manager.SchedulerDependentManager):
admin_password, is_first_time, instance)
do_run_instance()
- def _shutdown_instance(self, context, instance):
+ def _shutdown_instance(self, context, instance, bdms):
"""Shutdown an instance on this host."""
context = context.elevated()
LOG.audit(_('%(action_str)s instance') % {'action_str': 'Terminating'},
@@ -843,12 +853,12 @@ class ComputeManager(manager.SchedulerDependentManager):
self._deallocate_network(context, instance)
# NOTE(vish) get bdms before destroying the instance
- bdms = self._get_instance_volume_bdms(context, instance['uuid'])
+ vol_bdms = self._get_volume_bdms(bdms)
block_device_info = self._get_instance_volume_block_device_info(
context, instance['uuid'], bdms=bdms)
self.driver.destroy(instance, self._legacy_nw_info(network_info),
block_device_info)
- for bdm in bdms:
+ for bdm in vol_bdms:
try:
# NOTE(vish): actual driver detach done in driver.destroy, so
# just tell nova-volume that we are done with it.
@@ -867,9 +877,7 @@ class ComputeManager(manager.SchedulerDependentManager):
self._notify_about_instance_usage(context, instance, "shutdown.end")
- def _cleanup_volumes(self, context, instance_uuid):
- bdms = self.db.block_device_mapping_get_all_by_instance(context,
- instance_uuid)
+ def _cleanup_volumes(self, context, instance_uuid, bdms):
for bdm in bdms:
LOG.debug(_("terminating bdm %s") % bdm,
instance_uuid=instance_uuid)
@@ -878,12 +886,12 @@ class ComputeManager(manager.SchedulerDependentManager):
self.volume_api.delete(context, volume)
# NOTE(vish): bdms will be deleted on instance destroy
- def _delete_instance(self, context, instance):
+ def _delete_instance(self, context, instance, bdms):
"""Delete an instance on this host."""
instance_uuid = instance['uuid']
self.db.instance_info_cache_delete(context, instance_uuid)
self._notify_about_instance_usage(context, instance, "delete.start")
- self._shutdown_instance(context, instance)
+ self._shutdown_instance(context, instance, bdms)
# NOTE(vish): We have already deleted the instance, so we have
# to ignore problems cleaning up the volumes. It would
# be nice to let the user know somehow that the volume
@@ -893,7 +901,7 @@ class ComputeManager(manager.SchedulerDependentManager):
# the first time and to only ignore the failure if the
# instance is already in ERROR.
try:
- self._cleanup_volumes(context, instance_uuid)
+ self._cleanup_volumes(context, instance_uuid, bdms)
except Exception as exc:
LOG.warn(_("Ignoring volume cleanup failure due to %s") % exc,
instance_uuid=instance_uuid)
@@ -912,7 +920,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@wrap_instance_fault
- def terminate_instance(self, context, instance):
+ def terminate_instance(self, context, instance, bdms=None):
"""Terminate an instance on this host. """
# Note(eglynn): we do not decorate this action with reverts_task_state
# because a failure during termination should leave the task state as
@@ -921,11 +929,14 @@ class ComputeManager(manager.SchedulerDependentManager):
# in_use count (see bug 1046236).
elevated = context.elevated()
+ # NOTE(danms): remove this compatibility in the future
+ if not bdms:
+ bdms = self._get_instance_volume_bdms(context, instance["uuid"])
@utils.synchronized(instance['uuid'])
- def do_terminate_instance(instance):
+ def do_terminate_instance(instance, bdms):
try:
- self._delete_instance(context, instance)
+ self._delete_instance(context, instance, bdms)
except exception.InstanceTerminationFailure as error:
msg = _('%s. Setting instance vm_state to ERROR')
LOG.error(msg % error, instance=instance)
@@ -933,7 +944,7 @@ class ComputeManager(manager.SchedulerDependentManager):
except exception.InstanceNotFound as e:
LOG.warn(e, instance=instance)
- do_terminate_instance(instance)
+ do_terminate_instance(instance, bdms)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@@ -2795,8 +2806,10 @@ class ComputeManager(manager.SchedulerDependentManager):
soft_deleted = instance.vm_state == vm_states.SOFT_DELETED
if soft_deleted and old_enough:
+ bdms = self.db.block_device_mapping_get_all_by_instance(
+ context, instance['uuid'])
LOG.info(_('Reclaiming deleted instance'), instance=instance)
- self._delete_instance(context, instance)
+ self._delete_instance(context, instance, bdms)
@manager.periodic_task
def update_available_resource(self, context):
@@ -2839,6 +2852,9 @@ class ComputeManager(manager.SchedulerDependentManager):
# NOTE(sirp): admin contexts don't ordinarily return deleted records
with utils.temporary_mutation(context, read_deleted="yes"):
for instance in self._running_deleted_instances(context):
+ bdms = self.db.block_device_mapping_get_all_by_instance(
+ context, instance['uuid'])
+
if action == "log":
name = instance['name']
LOG.warning(_("Detected instance with name label "
@@ -2852,8 +2868,8 @@ class ComputeManager(manager.SchedulerDependentManager):
"'%(name)s' which is marked as "
"DELETED but still present on host."),
locals(), instance=instance)
- self._shutdown_instance(context, instance)
- self._cleanup_volumes(context, instance['uuid'])
+ self._shutdown_instance(context, instance, bdms)
+ self._cleanup_volumes(context, instance['uuid'], bdms)
else:
raise Exception(_("Unrecognized value '%(action)s'"
" for FLAGS.running_deleted_"
diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py
index 560b1ab05..e0c4bae3b 100644
--- a/nova/compute/rpcapi.py
+++ b/nova/compute/rpcapi.py
@@ -131,6 +131,7 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
2.2 - Adds slave_info parameter to add_aggregate_host() and
remove_aggregate_host()
2.3 - Adds volume_id to reserve_block_device_name()
+ 2.4 - Add bdms to terminate_instance
'''
#
@@ -494,11 +495,12 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
instance=instance_p),
topic=_compute_topic(self.topic, ctxt, None, instance))
- def terminate_instance(self, ctxt, instance):
+ def terminate_instance(self, ctxt, instance, bdms):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('terminate_instance',
- instance=instance_p),
- topic=_compute_topic(self.topic, ctxt, None, instance))
+ instance=instance_p, bdms=bdms),
+ topic=_compute_topic(self.topic, ctxt, None, instance),
+ version='2.4')
def unpause_instance(self, ctxt, instance):
instance_p = jsonutils.to_primitive(instance)
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index 76d0f6fb8..285fd7b22 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -1349,7 +1349,8 @@ class ComputeTestCase(BaseTestCase):
fake_cleanup_volumes)
self.compute._delete_instance(self.context,
- instance=jsonutils.to_primitive(instance))
+ instance=jsonutils.to_primitive(instance),
+ bdms={})
def test_instance_termination_exception_sets_error(self):
"""Test that we handle InstanceTerminationFailure
@@ -1357,7 +1358,7 @@ class ComputeTestCase(BaseTestCase):
"""
instance = self._create_fake_instance()
- def fake_delete_instance(context, instance):
+ def fake_delete_instance(context, instance, bdms):
raise exception.InstanceTerminationFailure(reason='')
self.stubs.Set(self.compute, '_delete_instance',
@@ -2369,13 +2370,17 @@ class ComputeTestCase(BaseTestCase):
self.compute.host
).AndReturn([instance])
+ bdms = []
+
self.mox.StubOutWithMock(self.compute, "_shutdown_instance")
self.compute._shutdown_instance(admin_context,
- instance).AndReturn(None)
+ instance,
+ bdms).AndReturn(None)
self.mox.StubOutWithMock(self.compute, "_cleanup_volumes")
self.compute._cleanup_volumes(admin_context,
- instance['uuid']).AndReturn(None)
+ instance['uuid'],
+ bdms).AndReturn(None)
self.mox.ReplayAll()
self.compute._cleanup_running_deleted_instances(admin_context)
@@ -4502,6 +4507,34 @@ class ComputeAPITestCase(BaseTestCase):
self.stubs.Set(compute_rpcapi.ComputeAPI, 'attach_volume',
fake_rpc_attach_volume)
+ def test_terminate_with_volumes(self):
+ """Make sure that volumes get detached during instance termination"""
+ admin = context.get_admin_context()
+ instance = self._create_fake_instance()
+
+ # Create a volume and attach it to our instance
+ volume_id = db.volume_create(admin, {'size': 1})['id']
+ values = {'instance_uuid': instance['uuid'],
+ 'device_name': '/dev/vdc',
+ 'delete_on_termination': False,
+ 'volume_id': volume_id,
+ }
+ db.block_device_mapping_create(admin, values)
+ db.volume_attached(admin, volume_id, instance["uuid"],
+ "/dev/vdc")
+
+ # Stub out and record whether it gets detached
+ result = {"detached": False}
+
+ def fake_detach(self, context, volume):
+ result["detached"] = volume["id"] == volume_id
+ self.stubs.Set(nova.volume.api.API, "detach", fake_detach)
+
+ # Kill the instance and check that it was detached
+ self.compute.terminate_instance(admin, instance=instance)
+ self.assertTrue(result["detached"])
+
+ def test_inject_network_info(self):
instance = self._create_fake_instance()
self.compute_api.attach_volume(self.context, instance, 1, device=None)
self.assertTrue(called.get('fake_check_attach'))
diff --git a/nova/tests/compute/test_rpcapi.py b/nova/tests/compute/test_rpcapi.py
index c5a5768c5..304f9adb3 100644
--- a/nova/tests/compute/test_rpcapi.py
+++ b/nova/tests/compute/test_rpcapi.py
@@ -325,7 +325,8 @@ class ComputeRpcAPITestCase(test.TestCase):
def test_terminate_instance(self):
self._test_compute_api('terminate_instance', 'cast',
- instance=self.fake_instance)
+ instance=self.fake_instance, bdms=[],
+ version='2.4')
def test_unpause_instance(self):
self._test_compute_api('unpause_instance', 'cast',
diff --git a/nova/tests/fake_volume.py b/nova/tests/fake_volume.py
index 8093ff06f..37aaa83b4 100644
--- a/nova/tests/fake_volume.py
+++ b/nova/tests/fake_volume.py
@@ -163,6 +163,8 @@ class API(object):
if v['id'] == str(volume_id):
return v
+ raise exception.VolumeNotFound(volume_id=volume_id)
+
def get_all(self, context):
return self.volume_list