summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Nemec <bnemec@us.ibm.com>2013-03-19 03:14:10 +0000
committerBen Nemec <bnemec@us.ibm.com>2013-05-03 16:20:01 +0000
commit8c8e44cf37be9c6a9f418e923c9ff16df8619832 (patch)
tree91edea9bef9b220b6e1c6e49bc419e7f021eead4
parenta7a81b887384f51bc8c0218778601e6565dfba21 (diff)
downloadnova-8c8e44cf37be9c6a9f418e923c9ff16df8619832.tar.gz
nova-8c8e44cf37be9c6a9f418e923c9ff16df8619832.tar.xz
nova-8c8e44cf37be9c6a9f418e923c9ff16df8619832.zip
Update hypervisor_hostname after live migration
After a live migration, the OS-EXT-SRV-ATTR:hypervisor_hostname attribute of the instance is not updated correctly. This change adds a parameter to the call to _instance_update that fixes the problem. Fixes bug 1173376 Change-Id: I40f1ead2d93c7a9979a914e0fb96e3e5bb287b48
-rwxr-xr-xnova/compute/manager.py16
-rw-r--r--nova/tests/compute/test_compute.py71
2 files changed, 59 insertions, 28 deletions
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index b1f493a9e..70d88117d 100755
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -3391,10 +3391,18 @@ class ComputeManager(manager.SchedulerDependentManager):
block_migration, block_device_info)
# Restore instance state
current_power_state = self._get_power_state(context, instance)
- instance = self._instance_update(context, instance['uuid'],
- host=self.host, power_state=current_power_state,
- vm_state=vm_states.ACTIVE, task_state=None,
- expected_task_state=task_states.MIGRATING)
+ node_name = None
+ try:
+ compute_node = self._get_compute_info(context, self.host)
+ node_name = compute_node['hypervisor_hostname']
+ except exception.NotFound:
+ LOG.exception(_('Failed to get compute_info for %s') % self.host)
+ finally:
+ instance = self._instance_update(context, instance['uuid'],
+ host=self.host, power_state=current_power_state,
+ vm_state=vm_states.ACTIVE, task_state=None,
+ expected_task_state=task_states.MIGRATING,
+ node=node_name)
# NOTE(vish): this is necessary to update dhcp
self.network_api.setup_networks_on_host(context, instance, self.host)
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index cd70e0cb2..f59b876e2 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -3643,51 +3643,74 @@ class ComputeTestCase(BaseTestCase):
self.mox.ReplayAll()
self.compute._post_live_migration(c, inst_ref, dest)
- def test_post_live_migration_at_destination(self):
+ def _begin_post_live_migration_at_destination(self):
self.mox.StubOutWithMock(self.compute.network_api,
'setup_networks_on_host')
self.mox.StubOutWithMock(self.compute.conductor_api,
'network_migrate_instance_finish')
self.mox.StubOutWithMock(self.compute, '_get_power_state')
- self.mox.StubOutWithMock(self.compute, '_instance_update')
+ self.mox.StubOutWithMock(self.compute, '_get_compute_info')
params = {'task_state': task_states.MIGRATING,
'power_state': power_state.PAUSED, }
- instance = jsonutils.to_primitive(self._create_fake_instance(params))
+ self.instance = jsonutils.to_primitive(
+ self._create_fake_instance(params))
- admin_ctxt = context.get_admin_context()
- instance = db.instance_get_by_uuid(admin_ctxt, instance['uuid'])
+ self.admin_ctxt = context.get_admin_context()
+ self.instance = db.instance_get_by_uuid(self.admin_ctxt,
+ self.instance['uuid'])
- self.compute.network_api.setup_networks_on_host(admin_ctxt, instance,
+ self.compute.network_api.setup_networks_on_host(self.admin_ctxt,
+ self.instance,
self.compute.host)
- migration = {'source_compute': instance['host'],
+ migration = {'source_compute': self.instance['host'],
'dest_compute': self.compute.host, }
- self.compute.conductor_api.network_migrate_instance_finish(admin_ctxt,
- instance, migration)
+ self.compute.conductor_api.network_migrate_instance_finish(
+ self.admin_ctxt, self.instance, migration)
fake_net_info = []
fake_block_dev_info = {'foo': 'bar'}
- self.compute.driver.post_live_migration_at_destination(admin_ctxt,
- instance,
+ self.compute.driver.post_live_migration_at_destination(self.admin_ctxt,
+ self.instance,
fake_net_info,
False,
fake_block_dev_info)
- self.compute._get_power_state(admin_ctxt, instance).AndReturn(
- 'fake_power_state')
+ self.compute._get_power_state(self.admin_ctxt,
+ self.instance).AndReturn(
+ 'fake_power_state')
- updated_instance = 'fake_updated_instance'
- self.compute._instance_update(admin_ctxt, instance['uuid'],
- host=self.compute.host,
- power_state='fake_power_state',
- vm_state=vm_states.ACTIVE,
- task_state=None,
- expected_task_state=task_states.MIGRATING).AndReturn(
- updated_instance)
- self.compute.network_api.setup_networks_on_host(admin_ctxt,
- updated_instance, self.compute.host)
+ def _finish_post_live_migration_at_destination(self):
+ self.compute.network_api.setup_networks_on_host(self.admin_ctxt,
+ mox.IgnoreArg(), self.compute.host)
self.mox.ReplayAll()
- self.compute.post_live_migration_at_destination(admin_ctxt, instance)
+ self.compute.post_live_migration_at_destination(self.admin_ctxt,
+ self.instance)
+
+ return self.compute.conductor_api.instance_get_by_uuid(self.admin_ctxt,
+ self.instance['uuid'])
+
+ def test_post_live_migration_at_destination_with_compute_info(self):
+ """The instance's node property should be updated correctly."""
+ self._begin_post_live_migration_at_destination()
+ hypervisor_hostname = 'fake_hypervisor_hostname'
+ fake_compute_info = {'hypervisor_hostname': hypervisor_hostname}
+ self.compute._get_compute_info(mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(
+ fake_compute_info)
+ updated = self._finish_post_live_migration_at_destination()
+ self.assertEqual(updated['node'], hypervisor_hostname)
+
+ def test_post_live_migration_at_destination_without_compute_info(self):
+ """The instance's node property should be set to None if we fail to
+ get compute_info.
+ """
+ self._begin_post_live_migration_at_destination()
+ self.compute._get_compute_info(mox.IgnoreArg(),
+ mox.IgnoreArg()).AndRaise(
+ exception.NotFound())
+ updated = self._finish_post_live_migration_at_destination()
+ self.assertIsNone(updated['node'])
def test_run_kill_vm(self):
# Detect when a vm is terminated behind the scenes.