diff options
| author | Renuka Apte <renuka.apte@citrix.com> | 2012-07-12 16:22:26 -0700 |
|---|---|---|
| committer | Renuka Apte <renuka.apte@citrix.com> | 2012-08-14 16:25:29 -0700 |
| commit | a7d0934a97ead360ecb378de7e29dbea513a6b30 (patch) | |
| tree | 93141a17267c124d032fb2ab3b75d7cdf92df9ac | |
| parent | 0eb53c035ab290c9574c1388bde59f7d05f64efc (diff) | |
| download | nova-a7d0934a97ead360ecb378de7e29dbea513a6b30.tar.gz nova-a7d0934a97ead360ecb378de7e29dbea513a6b30.tar.xz nova-a7d0934a97ead360ecb378de7e29dbea513a6b30.zip | |
xenapi: Support live migration without pools
Implement blueprint xenapi-live-block-migration.
Add ability to live migrate VMs to hosts that are not
a part of a host aggregate (block migration).
This requires XenServer 6.1/later, or XCP 1.6/later. As
of this change, we still do not support instances with
(nova/cinder) volumes attached. External kernels are also
not supported.
Change-Id: I5feb6756d78804aa37780a7d0cda1600f7060afe
| -rw-r--r-- | nova/compute/manager.py | 12 | ||||
| -rw-r--r-- | nova/compute/rpcapi.py | 25 | ||||
| -rw-r--r-- | nova/scheduler/driver.py | 7 | ||||
| -rw-r--r-- | nova/tests/compute/test_rpcapi.py | 3 | ||||
| -rw-r--r-- | nova/tests/scheduler/test_scheduler.py | 9 | ||||
| -rw-r--r-- | nova/tests/test_xenapi.py | 141 | ||||
| -rw-r--r-- | nova/tests/xenapi/stubs.py | 16 | ||||
| -rw-r--r-- | nova/virt/driver.py | 5 | ||||
| -rw-r--r-- | nova/virt/fake.py | 6 | ||||
| -rw-r--r-- | nova/virt/libvirt/driver.py | 4 | ||||
| -rw-r--r-- | nova/virt/xenapi/driver.py | 25 | ||||
| -rw-r--r-- | nova/virt/xenapi/fake.py | 26 | ||||
| -rw-r--r-- | nova/virt/xenapi/vmops.py | 96 |
13 files changed, 322 insertions, 53 deletions
diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 61703318f..1b8115531 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -247,7 +247,7 @@ def _get_image_meta(context, image_ref): class ComputeManager(manager.SchedulerDependentManager): """Manages the running instances from creation to destruction.""" - RPC_API_VERSION = '1.42' + RPC_API_VERSION = '1.43' def __init__(self, compute_driver=None, *args, **kwargs): """Load configuration options and connect to the hypervisor.""" @@ -2181,6 +2181,9 @@ class ComputeManager(manager.SchedulerDependentManager): in) nova.db.sqlalchemy.models.Instance.Id :param block_migration: if true, prepare for block migration :param disk_over_commit: if true, allow disk over commit + + Returns a mapping of values required in case of block migration + and None otherwise. """ if not instance: instance = self.db.instance_get(ctxt, instance_id) @@ -2192,6 +2195,8 @@ class ComputeManager(manager.SchedulerDependentManager): finally: self.driver.check_can_live_migrate_destination_cleanup(ctxt, dest_check_data) + if dest_check_data and 'migrate_data' in dest_check_data: + return dest_check_data['migrate_data'] @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) def check_can_live_migrate_source(self, ctxt, dest_check_data, @@ -2261,7 +2266,7 @@ class ComputeManager(manager.SchedulerDependentManager): self.driver.pre_block_migration(context, instance, disk) def live_migration(self, context, dest, block_migration=False, - instance=None, instance_id=None): + instance=None, instance_id=None, migrate_data=None): """Executing live migration. :param context: security context @@ -2269,6 +2274,7 @@ class ComputeManager(manager.SchedulerDependentManager): :param instance: instance dict :param dest: destination host :param block_migration: if true, prepare for block migration + :param migrate_data: implementation specific params """ # Get instance for error handling. @@ -2306,7 +2312,7 @@ class ComputeManager(manager.SchedulerDependentManager): self.driver.live_migration(context, instance, dest, self._post_live_migration, self.rollback_live_migration, - block_migration) + block_migration, migrate_data) def _post_live_migration(self, ctxt, instance_ref, dest, block_migration=False): diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py index 366eaaacf..c81d75356 100644 --- a/nova/compute/rpcapi.py +++ b/nova/compute/rpcapi.py @@ -123,6 +123,7 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy): 1.42 - Add reservations arg to prep_resize(), resize_instance(), finish_resize(), confirm_resize(), revert_resize() and finish_revert_resize() + 1.43 - Add migrate_data to live_migration() ''' BASE_RPC_API_VERSION = '1.0' @@ -168,14 +169,16 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy): version='1.36') def check_can_live_migrate_destination(self, ctxt, instance, destination, - block_migration, disk_over_commit): + block_migration, disk_over_commit): instance_p = jsonutils.to_primitive(instance) - self.call(ctxt, self.make_msg('check_can_live_migrate_destination', - instance=instance_p, - block_migration=block_migration, - disk_over_commit=disk_over_commit), - topic=_compute_topic(self.topic, ctxt, destination, None), - version='1.10') + return self.call(ctxt, + self.make_msg('check_can_live_migrate_destination', + instance=instance_p, + block_migration=block_migration, + disk_over_commit=disk_over_commit), + topic=_compute_topic(self.topic, + ctxt, destination, None), + version='1.10') def check_can_live_migrate_source(self, ctxt, instance, dest_check_data): instance_p = jsonutils.to_primitive(instance) @@ -283,12 +286,14 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy): topic=_compute_topic(self.topic, ctxt, None, instance), version='1.19') - def live_migration(self, ctxt, instance, dest, block_migration, host): + def live_migration(self, ctxt, instance, dest, block_migration, host, + migrate_data=None): instance_p = jsonutils.to_primitive(instance) self.cast(ctxt, self.make_msg('live_migration', instance=instance_p, - dest=dest, block_migration=block_migration), + dest=dest, block_migration=block_migration, + migrate_data=migrate_data), topic=_compute_topic(self.topic, ctxt, host, None), - version='1.40') + version='1.43') def pause_instance(self, ctxt, instance): instance_p = jsonutils.to_primitive(instance) diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index 5a013aff5..3a71a6a87 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -223,8 +223,8 @@ class Scheduler(object): self._live_migration_src_check(context, instance) self._live_migration_dest_check(context, instance, dest) self._live_migration_common_check(context, instance, dest) - self.compute_rpcapi.check_can_live_migrate_destination(context, - instance, dest, block_migration, disk_over_commit) + migrate_data = self.compute_rpcapi.check_can_live_migrate_destination( + context, instance, dest, block_migration, disk_over_commit) # Change instance_state values = {"task_state": task_states.MIGRATING} @@ -239,7 +239,8 @@ class Scheduler(object): src = instance['host'] self.compute_rpcapi.live_migration(context, host=src, instance=new_instance_ref, dest=dest, - block_migration=block_migration) + block_migration=block_migration, + migrate_data=migrate_data) def _live_migration_src_check(self, context, instance_ref): """Live migration check routine (for src host). diff --git a/nova/tests/compute/test_rpcapi.py b/nova/tests/compute/test_rpcapi.py index 619ec016d..e88cb2096 100644 --- a/nova/tests/compute/test_rpcapi.py +++ b/nova/tests/compute/test_rpcapi.py @@ -191,7 +191,8 @@ class ComputeRpcAPITestCase(test.TestCase): def test_live_migration(self): self._test_compute_api('live_migration', 'cast', instance=self.fake_instance, dest='dest', - block_migration='blockity_block', host='tsoh', version='1.40') + block_migration='blockity_block', host='tsoh', version='1.43', + migrate_data={}) def test_post_live_migration_at_destination(self): self._test_compute_api('post_live_migration_at_destination', 'call', diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index d21e206c0..c242bb62b 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -427,7 +427,8 @@ class SchedulerTestCase(test.TestCase): self.driver._live_migration_common_check(self.context, instance, dest) self.driver.compute_rpcapi.check_can_live_migrate_destination( - self.context, instance, dest, block_migration, disk_over_commit) + self.context, instance, dest, block_migration, + disk_over_commit).AndReturn({}) db.instance_update_and_get_original(self.context, instance_uuid, {"task_state": task_states.MIGRATING}).AndReturn( (instance, instance)) @@ -436,7 +437,7 @@ class SchedulerTestCase(test.TestCase): compute_rpcapi.ComputeAPI.live_migration(self.context, host=instance['host'], instance=instance, dest=dest, - block_migration=block_migration) + block_migration=block_migration, migrate_data={}) self.mox.ReplayAll() self.driver.schedule_live_migration(self.context, @@ -494,7 +495,7 @@ class SchedulerTestCase(test.TestCase): 'block_migration': block_migration, 'disk_over_commit': disk_over_commit}, "version": "1.10"}, - None) + None).AndReturn({}) db.instance_update_and_get_original(self.context, instance_uuid, {"task_state": task_states.MIGRATING}).AndReturn( @@ -502,7 +503,7 @@ class SchedulerTestCase(test.TestCase): compute_rpcapi.ComputeAPI.live_migration(self.context, host=instance['host'], instance=instance, dest=dest, - block_migration=block_migration) + block_migration=block_migration, migrate_data={}) self.mox.ReplayAll() result = self.driver.schedule_live_migration(self.context, diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 321065657..92d3000b3 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -2116,14 +2116,17 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase): 'Dom0IptablesFirewallDriver', host='host') db_fakes.stub_out_db_instance_api(self.stubs) - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) self.context = context.get_admin_context() - self.conn = xenapi_conn.XenAPIDriver(False) + xenapi_fake.create_local_pifs() def test_live_migration_calls_vmops(self): + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + self.conn = xenapi_conn.XenAPIDriver(False) + def fake_live_migrate(context, instance_ref, dest, post_method, - recover_method, block_migration): + recover_method, block_migration, migrate_data): fake_live_migrate.called = True + self.stubs.Set(self.conn._vmops, "live_migrate", fake_live_migrate) self.conn.live_migration(None, None, None, None, None) @@ -2131,18 +2134,78 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase): def test_pre_live_migration(self): # ensure method is present + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + self.conn = xenapi_conn.XenAPIDriver(False) self.conn.pre_live_migration(None, None, None, None) def test_post_live_migration_at_destination(self): # ensure method is present + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + self.conn = xenapi_conn.XenAPIDriver(False) self.conn.post_live_migration_at_destination(None, None, None, None) - def test_check_can_live_migrate_raises_on_block_migrate(self): - self.assertRaises(NotImplementedError, + def test_check_can_live_migrate_destination_with_block_migration(self): + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + self.conn = xenapi_conn.XenAPIDriver(False) + expected = {'block_migration': True, + 'migrate_data': {'xenops': '', + 'host': '', + 'master': '', + 'session_id': '', + 'SM': ''} + } + fake_data = self.conn.check_can_live_migrate_destination(self.context, + {'host': 'host'}, True, False) + self.assertEqual(expected.keys(), fake_data.keys()) + self.assertEqual(expected['migrate_data'].keys(), + fake_data['migrate_data'].keys()) + + def test_check_can_live_migrate_destination_block_migration_fails(self): + stubs.stubout_session(self.stubs, + stubs.FakeSessionForFailedMigrateTests) + self.conn = xenapi_conn.XenAPIDriver(False) + self.assertRaises(exception.MigrationError, self.conn.check_can_live_migrate_destination, - None, None, True, None) + self.context, {'host': 'host'}, True, False) + + def test_check_can_live_migrate_source_with_block_migrate(self): + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + self.conn = xenapi_conn.XenAPIDriver(False) + + def fake_get_vm_opaque_ref(instance): + return "fake_vm" + + self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref", + fake_get_vm_opaque_ref) + dest_check_data = {'block_migration': True, + 'migrate_data': {}} + self.assertNotRaises(None, + self.conn.check_can_live_migrate_source, + self.context, + {'host': 'host'}, + dest_check_data) + + def test_check_can_live_migrate_source_with_block_migrate_fails(self): + def fake_get_vm_opaque_ref(instance): + return "fake_vm" + stubs.stubout_session(self.stubs, + stubs.FakeSessionForFailedMigrateTests) + self.conn = xenapi_conn.XenAPIDriver(False) + self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref", + fake_get_vm_opaque_ref) + + dest_check_data = {'block_migration': True, + 'migrate_data': {}} + self.assertRaises(exception.MigrationError, + self.conn.check_can_live_migrate_source, + self.context, + {'host': 'host'}, + dest_check_data) def test_check_can_live_migrate_works(self): + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + self.conn = xenapi_conn.XenAPIDriver(False) + class fake_aggregate: def __init__(self): self.metadetails = {"host": "test_host_uuid"} @@ -2157,6 +2220,9 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase): {'host': 'host'}, False, False) def test_check_can_live_migrate_fails(self): + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + self.conn = xenapi_conn.XenAPIDriver(False) + class fake_aggregate: def __init__(self): self.metadetails = {"dest_other": "test_host_uuid"} @@ -2172,6 +2238,9 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase): self.context, {'host': 'host'}, None, None) def test_live_migration(self): + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + self.conn = xenapi_conn.XenAPIDriver(False) + def fake_get_vm_opaque_ref(instance): return "fake_vm" self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref", @@ -2191,6 +2260,9 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase): self.assertTrue(post_method.called, "post_method.called") def test_live_migration_on_failure(self): + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + self.conn = xenapi_conn.XenAPIDriver(False) + def fake_get_vm_opaque_ref(instance): return "fake_vm" self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref", @@ -2214,6 +2286,63 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase): self.conn, None, None, None, recover_method) self.assertTrue(recover_method.called, "recover_method.called") + def test_live_migration_with_block_migration(self): + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + self.conn = xenapi_conn.XenAPIDriver(False) + + def fake_get_vm_opaque_ref(instance): + return "fake_vm" + self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref", + fake_get_vm_opaque_ref) + + def post_method(context, instance, destination_hostname, + block_migration): + post_method.called = True + + # pass block_migration = True and migrate data + migrate_data = {"test": "data"} + self.conn.live_migration(self.conn, None, None, post_method, None, + True, migrate_data) + self.assertTrue(post_method.called, "post_method.called") + + def test_live_migration_with_block_migration_raises_invalid_param(self): + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + self.conn = xenapi_conn.XenAPIDriver(False) + + def fake_get_vm_opaque_ref(instance): + return "fake_vm" + self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref", + fake_get_vm_opaque_ref) + + def recover_method(context, instance, destination_hostname, + block_migration): + recover_method.called = True + # pass block_migration = True and no migrate data + self.assertRaises(exception.InvalidParameterValue, + self.conn.live_migration, self.conn, + None, None, None, recover_method, True, None) + self.assertTrue(recover_method.called, "recover_method.called") + + def test_live_migration_with_block_migration_fails_migrate_send(self): + stubs.stubout_session(self.stubs, + stubs.FakeSessionForFailedMigrateTests) + self.conn = xenapi_conn.XenAPIDriver(False) + + def fake_get_vm_opaque_ref(instance): + return "fake_vm" + self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref", + fake_get_vm_opaque_ref) + + def recover_method(context, instance, destination_hostname, + block_migration): + recover_method.called = True + # pass block_migration = True and migrate data + migrate_data = {"test": "data"} + self.assertRaises(exception.MigrationError, + self.conn.live_migration, self.conn, + None, None, None, recover_method, True, migrate_data) + self.assertTrue(recover_method.called, "recover_method.called") + class XenAPIInjectMetadataTestCase(stubs.XenAPITestBase): def setUp(self): diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 4813bfa57..d9c1d510d 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -356,6 +356,22 @@ def stub_out_migration_methods(stubs): stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral) +class FakeSessionForFailedMigrateTests(FakeSessionForVMTests): + def __init__(self, uri): + super(FakeSessionForFailedMigrateTests, self).__init__(uri) + + def VM_assert_can_migrate(self, session, vmref, migrate_data, + live, vdi_map, vif_map, options): + raise fake.Failure("XenAPI VM.assert_can_migrate failed") + + def host_migrate_receive(self, session, hostref, networkref, options): + raise fake.Failure("XenAPI host.migrate_receive failed") + + def VM_migrate_send(self, session, vmref, migrate_data, islive, vdi_map, + vif_map, options): + raise fake.Failure("XenAPI VM.migrate_send failed") + + class XenAPITestBase(test.TestCase): def setUp(self): super(XenAPITestBase, self).setUp() diff --git a/nova/virt/driver.py b/nova/virt/driver.py index dafb83568..53b37ada6 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -337,7 +337,8 @@ class ComputeDriver(object): raise NotImplementedError() def live_migration(self, ctxt, instance_ref, dest, - post_method, recover_method, block_migration=False): + post_method, recover_method, block_migration=False, + migrate_data=None): """Live migration of an instance to another host. :params ctxt: security context @@ -352,6 +353,8 @@ class ComputeDriver(object): recovery method when any exception occurs. expected nova.compute.manager.recover_live_migration. :params block_migration: if true, migrate VM disk. + :params migrate_data: implementation specific params. + """ raise NotImplementedError() diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 823a6e310..2d4672876 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -257,12 +257,11 @@ class FakeDriver(driver.ComputeDriver): raise NotImplementedError('This method is supported only by libvirt.') def get_instance_disk_info(self, instance_name): - """This method is supported only by libvirt.""" return def live_migration(self, context, instance_ref, dest, - post_method, recover_method, block_migration=False): - """This method is supported only by libvirt.""" + post_method, recover_method, block_migration=False, + migrate_data=None): return def finish_migration(self, context, migration, instance, disk_info, @@ -274,7 +273,6 @@ class FakeDriver(driver.ComputeDriver): def pre_live_migration(self, context, instance_ref, block_device_info, network_info): - """This method is supported only by libvirt.""" return def unfilter_instance(self, instance_ref, network_info): diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index e521fca59..2ca5c2edf 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -2399,7 +2399,8 @@ class LibvirtDriver(driver.ComputeDriver): self.firewall_driver.filter_defer_apply_off() def live_migration(self, ctxt, instance_ref, dest, - post_method, recover_method, block_migration=False): + post_method, recover_method, block_migration=False, + migrate_data=None): """Spawning live_migration operation for distributing high-load. :params ctxt: security context @@ -2415,6 +2416,7 @@ class LibvirtDriver(driver.ComputeDriver): recovery method when any exception occurs. expected nova.compute.manager.recover_live_migration. :params block_migration: if true, do block migration. + :params migrate_data: implementation specific params """ diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py index 6af06a283..2f472fc7b 100644 --- a/nova/virt/xenapi/driver.py +++ b/nova/virt/xenapi/driver.py @@ -430,8 +430,10 @@ class XenAPIDriver(driver.ComputeDriver): :param disk_over_commit: if true, allow disk over commit """ - self._vmops.check_can_live_migrate_destination(ctxt, instance_ref, - block_migration, disk_over_commit) + return self._vmops.check_can_live_migrate_destination(ctxt, + instance_ref, + block_migration, + disk_over_commit) def check_can_live_migrate_destination_cleanup(self, ctxt, dest_check_data): @@ -452,11 +454,25 @@ class XenAPIDriver(driver.ComputeDriver): :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance :param dest_check_data: result of check_can_live_migrate_destination + includes the block_migration flag """ + self._vmops.check_can_live_migrate_source(ctxt, instance_ref, + dest_check_data) + + def get_instance_disk_info(self, instance_name): + """Used by libvirt for live migration. We rely on xenapi + checks to do this for us.""" + pass + + def pre_block_migration(self, ctxt, instance_ref, disk_info_json): + """Used by libvirt for live migration. We rely on xenapi + checks to do this for us. May be used in the future to + populate the vdi/vif maps""" pass def live_migration(self, ctxt, instance_ref, dest, - post_method, recover_method, block_migration=False): + post_method, recover_method, block_migration=False, + migrate_data=None): """Performs the live migration of the specified instance. :params ctxt: security context @@ -471,9 +487,10 @@ class XenAPIDriver(driver.ComputeDriver): recovery method when any exception occurs. expected nova.compute.manager.recover_live_migration. :params block_migration: if true, migrate VM disk. + :params migrate_data: implementation specific params """ self._vmops.live_migrate(ctxt, instance_ref, dest, post_method, - recover_method, block_migration) + recover_method, block_migration, migrate_data) def pre_live_migration(self, context, instance_ref, block_device_info, network_info): diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index cbfa72413..0fcd30ebe 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -292,7 +292,8 @@ def _create_local_pif(host_ref): 'physical': True, 'VLAN': -1, 'device': 'fake0', - 'host_uuid': host_ref}) + 'host_uuid': host_ref, + 'network': ''}) return pif_ref @@ -491,6 +492,10 @@ class SessionBase(object): def VM_pool_migrate(self, _1, vm_ref, host_ref, options): pass + def VM_migrate_send(self, vmref, migrate_data, islive, vdi_map, + vif_map, options): + pass + def VDI_remove_from_other_config(self, _1, vdi_ref, key): db_ref = _db_content['VDI'][vdi_ref] if not 'other_config' in db_ref: @@ -599,6 +604,25 @@ class SessionBase(object): def pool_set_name_label(self, session, pool_ref, name): pass + def host_migrate_receive(self, session, destref, nwref, options): + # The dictionary below represents the true keys, as + # returned by a destination host, but fake values. + return {'xenops': 'http://localhost/services/xenops?' + 'session_id=OpaqueRef:81d00b97-b205-b34d-924e-6f9597854cc0', + 'host': 'OpaqueRef:5e4a3dd1-b71c-74ba-bbc6-58ee9ff6a889', + 'master': 'http://localhost/', + 'session_id': 'OpaqueRef:81d00b97-b205-b34d-924e-6f9597854cc0', + 'SM': 'http://localhost/services/SM?' + 'session_id=OpaqueRef:81d00b97-b205-b34d-924e-6f9597854cc0'} + + def VM_assert_can_migrate(self, session, vmref, migrate_data, live, + vdi_map, vif_map, options): + pass + + def VM_migrate_send(self, session, mref, migrate_data, live, vdi_map, + vif_map, options): + pass + def network_get_all_records_where(self, _1, filter): return self.xenapi.network.get_all_records() diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index dbff55d1f..c74c19773 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -542,6 +542,7 @@ class VMOps(object): 'weight', str(vcpu_weight)) def _get_vm_opaque_ref(self, instance): + """Get xapi OpaqueRef from a db record.""" vm_ref = vm_utils.lookup(self._session, instance['name']) if vm_ref is None: raise exception.NotFound(_('Could not find VM with name %s') % @@ -1482,6 +1483,30 @@ class VMOps(object): host_uuid = self._get_host_uuid_from_aggregate(context, hostname) return self._session.call_xenapi("host.get_by_uuid", host_uuid) + def _migrate_receive(self, ctxt): + destref = self._session.get_xenapi_host() + # Get the network to for migrate. + # This is the one associated with the pif marked management. From cli: + # uuid=`xe pif-list --minimal management=true` + # xe pif-param-get param-name=network-uuid uuid=$uuid + expr = 'field "management" = "true"' + pifs = self._session.call_xenapi('PIF.get_all_records_where', + expr) + if len(pifs) != 1: + raise exception.MigrationError('No suitable network for migrate') + + nwref = pifs[pifs.keys()[0]]['network'] + try: + options = {} + migrate_data = self._session.call_xenapi("host.migrate_receive", + destref, + nwref, + options) + except self._session.XenAPI.Failure as exc: + LOG.exception(exc) + raise exception.MigrationError(_('Migrate Receive failed')) + return migrate_data + def check_can_live_migrate_destination(self, ctxt, instance_ref, block_migration=False, disk_over_commit=False): @@ -1494,30 +1519,71 @@ class VMOps(object): """ if block_migration: - #TODO(johngarbutt): XenServer feature coming soon fixes this - raise NotImplementedError() + migrate_data = self._migrate_receive(ctxt) + dest_check_data = {} + dest_check_data["block_migration"] = block_migration + dest_check_data["migrate_data"] = migrate_data + return dest_check_data else: src = instance_ref['host'] self._ensure_host_in_aggregate(ctxt, src) # TODO(johngarbutt) we currently assume # instance is on a SR shared with other destination # block migration work will be able to resolve this + return None - def live_migrate(self, context, instance, destination_hostname, - post_method, recover_method, block_migration): - if block_migration: - #TODO(johngarbutt): see above - raise NotImplementedError() - else: + def check_can_live_migrate_source(self, ctxt, instance_ref, + dest_check_data): + """ Check if it is possible to execute live migration + on the source side. + :param context: security context + :param instance_ref: nova.db.sqlalchemy.models.Instance object + :param dest_check_data: data returned by the check on the + destination, includes block_migration flag + + """ + if dest_check_data and 'migrate_data' in dest_check_data: + vmref = self._get_vm_opaque_ref(instance_ref) + migrate_data = dest_check_data['migrate_data'] try: - vm_ref = self._get_vm_opaque_ref(instance) + vdi_map = {} + vif_map = {} + options = {} + self._session.call_xenapi("VM.assert_can_migrate", vmref, + migrate_data, True, vdi_map, vif_map, + options) + except self._session.XenAPI.Failure as exc: + LOG.exception(exc) + raise exception.MigrationError(_('VM.assert_can_migrate' + 'failed')) + + def live_migrate(self, context, instance, destination_hostname, + post_method, recover_method, block_migration, + migrate_data=None): + try: + vm_ref = self._get_vm_opaque_ref(instance) + if block_migration: + if not migrate_data: + raise exception.InvalidParameterValue('Block Migration ' + 'requires migrate data from destination') + try: + vdi_map = {} + vif_map = {} + options = {} + self._session.call_xenapi("VM.migrate_send", vm_ref, + migrate_data, True, + vdi_map, vif_map, options) + except self._session.XenAPI.Failure as exc: + LOG.exception(exc) + raise exception.MigrationError(_('Migrate Send failed')) + else: host_ref = self._get_host_opaque_ref(context, destination_hostname) self._session.call_xenapi("VM.pool_migrate", vm_ref, host_ref, {}) - post_method(context, instance, destination_hostname, - block_migration) - except Exception: - with excutils.save_and_reraise_exception(): - recover_method(context, instance, destination_hostname, - block_migration) + post_method(context, instance, destination_hostname, + block_migration) + except Exception: + with excutils.save_and_reraise_exception(): + recover_method(context, instance, destination_hostname, + block_migration) |
