summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--nova/compute/manager.py71
-rw-r--r--nova/compute/rpcapi.py12
-rw-r--r--nova/tests/compute/test_compute.py249
-rw-r--r--nova/tests/compute/test_rpcapi.py12
-rw-r--r--nova/virt/driver.py1
-rw-r--r--nova/virt/fake.py1
-rw-r--r--nova/virt/libvirt/driver.py1
7 files changed, 337 insertions, 10 deletions
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 5627687fc..df8f6b75c 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -299,7 +299,7 @@ class ComputeVirtAPI(virtapi.VirtAPI):
class ComputeManager(manager.SchedulerDependentManager):
"""Manages the running instances from creation to destruction."""
- RPC_API_VERSION = '2.21'
+ RPC_API_VERSION = '2.22'
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
@@ -1299,7 +1299,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@wrap_instance_fault
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata=None,
- bdms=None):
+ bdms=None, recreate=False, on_shared_storage=False):
"""Destroy and re-make this instance.
A 'rebuild' effectively purges all existing data from the system and
@@ -1312,12 +1312,51 @@ class ComputeManager(manager.SchedulerDependentManager):
:param injected_files: Files to inject
:param new_pass: password to set on rebuilt instance
:param orig_sys_metadata: instance system metadata from pre-rebuild
+ :param recreate: True if instance should be recreated with same disk
+ :param on_shared_storage: True if instance files on shared storage
"""
context = context.elevated()
+
+ orig_vm_state = instance['vm_state']
with self._error_out_instance_on_exception(context, instance['uuid']):
LOG.audit(_("Rebuilding instance"), context=context,
instance=instance)
+ if recreate:
+
+ if not self.driver.capabilities["supports_recreate"]:
+ # if driver doesn't support recreate return with failure
+ _msg = _('instance recreate is not implemented '
+ 'by this driver.')
+
+ LOG.warn(_msg, instance=instance)
+ self._instance_update(context,
+ instance['uuid'],
+ task_state=None,
+ expected_task_state=task_states.
+ REBUILDING)
+ raise exception.Invalid(_msg)
+
+ self._check_instance_not_already_created(context, instance)
+
+ # to cover case when admin expects that instance files are on
+ # shared storage, but not accessible and vice versa
+ if on_shared_storage != self.driver.instance_on_disk(instance):
+ _msg = _("Invalid state of instance files on "
+ "shared storage")
+ raise exception.Invalid(_msg)
+
+ if on_shared_storage:
+ LOG.info(_('disk on shared storage,'
+ 'recreating using existing disk'))
+ else:
+ image_ref = orig_image_ref = instance['image_ref']
+ LOG.info(_("disk not on shared storage"
+ "rebuilding from: '%s'") % str(image_ref))
+
+ instance = self._instance_update(context, instance['uuid'],
+ host=self.host)
+
if image_ref:
image_meta = _get_image_meta(context, image_ref)
else:
@@ -1344,8 +1383,23 @@ class ComputeManager(manager.SchedulerDependentManager):
task_state=task_states.REBUILDING,
expected_task_state=task_states.REBUILDING)
- network_info = self._get_instance_nw_info(context, instance)
- self.driver.destroy(instance, self._legacy_nw_info(network_info))
+ if recreate:
+ # Detaching volumes.
+ for bdm in self._get_instance_volume_bdms(context, instance):
+ volume = self.volume_api.get(context, bdm['volume_id'])
+
+ # We can't run volume disconnect on source because
+ # the host is down. Just marking volume as detached
+ # in db, anyway the zombie instance going to be deleted
+ # from source during init_host when host comes back
+ self.volume_api.detach(context.elevated(), volume)
+
+ self.network_api.setup_networks_on_host(context,
+ instance, self.host)
+ else:
+ network_info = self._get_instance_nw_info(context, instance)
+ self.driver.destroy(instance,
+ self._legacy_nw_info(network_info))
instance = self._instance_update(context,
instance['uuid'],
@@ -1388,6 +1442,15 @@ class ComputeManager(manager.SchedulerDependentManager):
REBUILD_SPAWNING,
launched_at=timeutils.utcnow())
+ LOG.info(_("bringing vm to original state: '%s'") % orig_vm_state)
+ if orig_vm_state == vm_states.STOPPED:
+ instance = self._instance_update(context, instance['uuid'],
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.STOPPING,
+ terminated_at=timeutils.utcnow(),
+ progress=0)
+ self.stop_instance(context, instance['uuid'])
+
self._notify_about_instance_usage(
context, instance, "rebuild.end",
network_info=network_info,
diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py
index 463bfe9e9..ae283283b 100644
--- a/nova/compute/rpcapi.py
+++ b/nova/compute/rpcapi.py
@@ -155,6 +155,8 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
2.19 - Add node to run_instance
2.20 - Add node to prep_resize
2.21 - Add migrate_data dict param to pre_live_migration()
+ 2.22 - Add recreate, on_shared_storage and host arguments to
+ rebuild_instance()
'''
#
@@ -393,16 +395,18 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
version='2.5')
def rebuild_instance(self, ctxt, instance, new_pass, injected_files,
- image_ref, orig_image_ref, orig_sys_metadata, bdms):
+ image_ref, orig_image_ref, orig_sys_metadata, bdms,
+ recreate=False, on_shared_storage=False, host=None):
instance_p = jsonutils.to_primitive(instance)
bdms_p = jsonutils.to_primitive(bdms)
self.cast(ctxt, self.make_msg('rebuild_instance',
instance=instance_p, new_pass=new_pass,
injected_files=injected_files, image_ref=image_ref,
orig_image_ref=orig_image_ref,
- orig_sys_metadata=orig_sys_metadata, bdms=bdms_p),
- topic=_compute_topic(self.topic, ctxt, None, instance),
- version='2.18')
+ orig_sys_metadata=orig_sys_metadata, bdms=bdms_p,
+ recreate=recreate, on_shared_storage=on_shared_storage),
+ topic=_compute_topic(self.topic, ctxt, host, instance),
+ version='2.22')
def refresh_provider_fw_rules(self, ctxt, host):
self.cast(ctxt, self.make_msg('refresh_provider_fw_rules'),
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index 4337fdba9..0a5aea881 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -3253,6 +3253,255 @@ class ComputeTestCase(BaseTestCase):
for instance in instances:
db.instance_destroy(c, instance['uuid'])
+ def test_rebuild_on_host_updated_target(self):
+ """Confirm evacuate scenario updates host."""
+
+ # creating testdata
+ c = self.context.elevated()
+
+ inst_ref = self._create_fake_instance({'host': 'someotherhost'})
+ db.instance_update(self.context, inst_ref['uuid'],
+ {"task_state": task_states.REBUILDING})
+ inst_id = inst_ref["id"]
+ inst_uuid = inst_ref["uuid"]
+ dest = self.compute.host
+
+ def set_shared_storage(instance):
+ return True
+
+ self.stubs.Set(self.compute.driver, 'instance_on_disk',
+ set_shared_storage)
+
+ self.compute.rebuild_instance(c, instance=inst_ref,
+ injected_files=None, image_ref=None,
+ orig_image_ref=None, new_pass=None,
+ orig_sys_metadata=None, bdms=[],
+ recreate=True, on_shared_storage=True)
+
+ # make sure instance is updated with destination hostname.
+ instance = db.instance_get(c, inst_id)
+ self.assertTrue(instance['host'])
+ self.assertEqual(instance['host'], dest)
+
+ # cleanup
+ db.instance_destroy(c, inst_uuid)
+
+ def test_rebuild_with_wrong_shared_storage(self):
+ """Confirm evacuate scenario updates host."""
+
+ # creating testdata
+ c = self.context.elevated()
+
+ inst_ref = self._create_fake_instance({'host': 'srchost'})
+ db.instance_update(self.context, inst_ref['uuid'],
+ {"task_state": task_states.REBUILDING})
+ inst_id = inst_ref["id"]
+ inst_uuid = inst_ref["uuid"]
+ dest = self.compute.host
+
+ def set_shared_storage(instance):
+ return True
+
+ self.stubs.Set(self.compute.driver, 'instance_on_disk',
+ set_shared_storage)
+
+ self.assertRaises(exception.Invalid,
+ self.compute.rebuild_instance, c, instance=inst_ref,
+ injected_files=None, image_ref=None,
+ orig_image_ref=None, new_pass=None,
+ orig_sys_metadata=None,
+ recreate=True, on_shared_storage=False)
+
+ # make sure instance was not updated with destination hostname.
+ instance = db.instance_get(c, inst_id)
+ self.assertTrue(instance['host'])
+ self.assertEqual(instance['host'], 'srchost')
+
+ # cleanup
+ db.instance_destroy(c, inst_uuid)
+
+ def test_rebuild_on_host_with_volumes(self):
+ """Confirm evacuate scenario reconnects volumes."""
+
+ # creating testdata
+ inst_ref = jsonutils.to_primitive(self._create_fake_instance
+ ({'host': 'fake_host_2'}))
+ db.instance_update(self.context, inst_ref['uuid'],
+ {"task_state": task_states.REBUILDING})
+
+ inst_id = inst_ref["id"]
+ inst_uuid = inst_ref["uuid"]
+
+ volume_id = 'fake'
+ values = {'instance_uuid': inst_ref['uuid'],
+ 'device_name': '/dev/vdc',
+ 'delete_on_termination': False,
+ 'volume_id': volume_id,
+ }
+
+ admin = context.get_admin_context()
+ db.block_device_mapping_create(admin, values)
+
+ def set_shared_storage(instance):
+ return True
+
+ self.stubs.Set(self.compute.driver, 'instance_on_disk',
+ set_shared_storage)
+
+ def fake_volume_get(self, context, volume):
+ return {'id': volume_id}
+ self.stubs.Set(cinder.API, "get", fake_volume_get)
+
+ # Stub out and record whether it gets detached
+ result = {"detached": False}
+
+ def fake_detach(self, context, volume):
+ result["detached"] = volume["id"] == volume_id
+ self.stubs.Set(cinder.API, "detach", fake_detach)
+
+ def fake_terminate_connection(self, context, volume, connector):
+ return {}
+ self.stubs.Set(cinder.API, "terminate_connection",
+ fake_terminate_connection)
+
+ # make sure volumes attach, detach are called
+ self.mox.StubOutWithMock(self.compute.volume_api, 'detach')
+ self.compute.volume_api.detach(mox.IsA(admin), mox.IgnoreArg())
+
+ self.mox.StubOutWithMock(self.compute, '_setup_block_device_mapping')
+ self.compute._setup_block_device_mapping(mox.IsA(admin),
+ mox.IsA(inst_ref),
+ mox.IgnoreArg())
+
+ # start test
+ self.mox.ReplayAll()
+
+ self.compute.rebuild_instance(admin, instance=inst_ref,
+ injected_files=None, image_ref=None,
+ orig_image_ref=None, new_pass=None,
+ orig_sys_metadata=None, bdms=[],
+ recreate=True, on_shared_storage=True)
+
+ # cleanup
+ for bdms in db.block_device_mapping_get_all_by_instance(
+ admin, inst_uuid):
+ db.block_device_mapping_destroy(admin, bdms['id'])
+ db.instance_destroy(admin, inst_uuid)
+
+ def test_rebuild_on_host_with_shared_storage(self):
+ """Confirm evacuate scenario on shared storage."""
+
+ # creating testdata
+ c = self.context.elevated()
+
+ inst_ref = jsonutils.to_primitive(self._create_fake_instance
+ ({'host': 'fake_host_2'}))
+
+ inst_uuid = inst_ref["uuid"]
+ dest = self.compute.host
+
+ def set_shared_storage(instance):
+ return True
+
+ self.stubs.Set(self.compute.driver, 'instance_on_disk',
+ set_shared_storage)
+
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'spawn')
+ self.compute.driver.spawn(mox.IsA(c), mox.IsA(inst_ref), {},
+ mox.IgnoreArg(), None,
+ mox.IgnoreArg(), mox.IgnoreArg())
+
+ # start test
+ self.mox.ReplayAll()
+ db.instance_update(self.context, inst_ref['uuid'],
+ {"task_state": task_states.REBUILDING})
+
+ self.compute.rebuild_instance(c, instance=inst_ref,
+ injected_files=None, image_ref=None,
+ orig_image_ref=None, new_pass=None,
+ orig_sys_metadata=None, bdms=[],
+ recreate=True, on_shared_storage=True)
+
+ # cleanup
+ db.instance_destroy(c, inst_uuid)
+
+ def test_rebuild_on_host_without_shared_storage(self):
+ """Confirm evacuate scenario without shared storage
+ (rebuild from image)"""
+
+ # creating testdata
+ c = self.context.elevated()
+
+ inst_ref = jsonutils.to_primitive(self._create_fake_instance
+ ({'host': 'fake_host_2'}))
+
+ inst_uuid = inst_ref["uuid"]
+ dest = self.compute.host
+
+ fake_image = {
+ 'id': 1,
+ 'name': 'fake_name',
+ 'properties': {'kernel_id': 'fake_kernel_id',
+ 'ramdisk_id': 'fake_ramdisk_id'},
+ }
+
+ def set_shared_storage(instance):
+ return False
+
+ self.stubs.Set(self.compute.driver, 'instance_on_disk',
+ set_shared_storage)
+
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'spawn')
+ self.compute.driver.spawn(mox.IsA(c), mox.IsA(inst_ref),
+ mox.IsA(fake_image), mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg())
+
+ # start test
+ self.mox.ReplayAll()
+
+ db.instance_update(self.context, inst_ref['uuid'],
+ {"task_state": task_states.REBUILDING})
+
+ self.compute.rebuild_instance(c, instance=inst_ref,
+ injected_files=None, image_ref=None,
+ orig_image_ref=None, new_pass='newpass',
+ orig_sys_metadata=None, bdms=[],
+ recreate=True, on_shared_storage=False)
+
+ # cleanup
+ db.instance_destroy(c, inst_uuid)
+
+ def test_rebuild_on_host_instance_exists(self):
+ """Rebuild if instance exists raise an exception"""
+
+ # creating testdata
+ c = self.context.elevated()
+ inst_ref = self._create_fake_instance({'host': 'fake_host_2'})
+ dest = self.compute.host
+
+ instance = jsonutils.to_primitive(self._create_fake_instance())
+ instance_uuid = instance['uuid']
+ dest = self.compute.host
+
+ self.compute.run_instance(self.context, instance=instance)
+
+ db.instance_update(self.context, inst_ref['uuid'],
+ {"task_state": task_states.REBUILDING})
+
+ self.assertRaises(exception.Invalid,
+ self.compute.rebuild_instance, c, instance=inst_ref,
+ injected_files=None, image_ref=None,
+ orig_image_ref=None, new_pass=None,
+ orig_sys_metadata=None,
+ recreate=True, on_shared_storage=True)
+
+ # cleanup
+ db.instance_destroy(c, inst_ref['uuid'])
+ self.compute.terminate_instance(self.context, instance=instance)
+
class ComputeAPITestCase(BaseTestCase):
diff --git a/nova/tests/compute/test_rpcapi.py b/nova/tests/compute/test_rpcapi.py
index b854c0288..a31d9a14b 100644
--- a/nova/tests/compute/test_rpcapi.py
+++ b/nova/tests/compute/test_rpcapi.py
@@ -244,8 +244,16 @@ class ComputeRpcAPITestCase(test.TestCase):
self._test_compute_api('rebuild_instance', 'cast',
instance=self.fake_instance, new_pass='pass',
injected_files='files', image_ref='ref',
- orig_image_ref='orig_ref', bdms=[],
- orig_sys_metadata='orig_sys_metadata', version='2.18')
+ orig_image_ref='orig_ref', bdms=[], recreate=False,
+ on_shared_storage=False, orig_sys_metadata='orig_sys_metadata',
+ version='2.22')
+
+ def test_rebuild_instance_with_shared(self):
+ self._test_compute_api('rebuild_instance', 'cast', new_pass='None',
+ injected_files='None', image_ref='None', orig_image_ref='None',
+ bdms=[], instance=self.fake_instance, host='new_host',
+ orig_sys_metadata=None, recreate=True, on_shared_storage=True,
+ version='2.22')
def test_reserve_block_device_name(self):
self._test_compute_api('reserve_block_device_name', 'call',
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index 2c495e5e0..35834388e 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -109,6 +109,7 @@ class ComputeDriver(object):
capabilities = {
"has_imagecache": False,
+ "supports_recreate": False,
}
def __init__(self, virtapi):
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index f719b1a74..96e2d943f 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -76,6 +76,7 @@ class FakeInstance(object):
class FakeDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
+ "supports_recreate": True,
}
"""Fake hypervisor driver"""
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index b79a2ba92..2390fc207 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -263,6 +263,7 @@ class LibvirtDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
+ "supports_recreate": True,
}
def __init__(self, virtapi, read_only=False):