From 6b6633521c57422dfcdd1c0fbd8004c01bbe99cc Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Mon, 12 Dec 2011 10:34:33 -0800 Subject: bug 899767: fix vif-plugging with live migration Expose vif_unplug method in virt layer, as the compute manager has to explicitly call it on during live migration, so that network devices are torn down even if the disk image is not. Change-Id: Iae88c72f4fd2c7c097c23ef9e5f8fd392052f73e --- nova/compute/manager.py | 6 ++++++ nova/tests/test_compute.py | 2 ++ nova/virt/driver.py | 6 +++++- nova/virt/fake.py | 6 +++++- nova/virt/hyperv.py | 8 ++++++++ nova/virt/libvirt/connection.py | 11 ++++++++--- nova/virt/vmwareapi_conn.py | 6 +++++- nova/virt/xenapi/vmops.py | 9 ++++++--- nova/virt/xenapi_conn.py | 5 +++++ 9 files changed, 50 insertions(+), 9 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 8d9076b33..b85612cc6 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1735,6 +1735,12 @@ class ComputeManager(manager.SchedulerDependentManager): # must be deleted for preparing next block migration if block_migration: self.driver.destroy(instance_ref, network_info) + else: + # self.driver.destroy() usually performs vif unplugging + # but we must do it explicitly here when block_migration + # is false, as the network devices at the source must be + # torn down + self.driver.unplug_vifs(instance_ref, network_info) LOG.info(_('Migrating %(i_name)s to %(dest)s finished successfully.') % locals()) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index e6167dc51..4f49eb53b 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -1022,6 +1022,8 @@ class ComputeTestCase(BaseTestCase): rpc.call(c, db.queue_get_for(c, FLAGS.compute_topic, dest), {"method": "post_live_migration_at_destination", "args": {'instance_id': i_ref['id'], 'block_migration': False}}) + self.mox.StubOutWithMock(self.compute.driver, 'unplug_vifs') + self.compute.driver.unplug_vifs(i_ref, []) # start test self.mox.ReplayAll() diff --git a/nova/virt/driver.py b/nova/virt/driver.py index f2e20bd3e..0342d394d 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -516,10 +516,14 @@ class ComputeDriver(object): raise NotImplementedError() def plug_vifs(self, instance, network_info): - """Plugs in VIFs to networks.""" + """Plug VIFs into networks.""" # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() + def unplug_vifs(self, instance, network_info): + """Unplug VIFs from networks.""" + raise NotImplementedError() + def update_host_status(self): """Refresh host stats""" raise NotImplementedError() diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 1a3b237ea..f6d93bc53 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -94,7 +94,11 @@ class FakeConnection(driver.ComputeDriver): return info_list def plug_vifs(self, instance, network_info): - """Plugin VIFs into networks.""" + """Plug VIFs into networks.""" + pass + + def unplug_vifs(self, instance, network_info): + """Unplug VIFs from networks.""" pass def spawn(self, context, instance, image_meta, diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py index 3f76ee4ed..294841992 100644 --- a/nova/virt/hyperv.py +++ b/nova/virt/hyperv.py @@ -516,3 +516,11 @@ class HyperVConnection(driver.ComputeDriver): def set_host_enabled(self, host, enabled): """Sets the specified host's ability to accept new instances.""" pass + + def plug_vifs(self, instance, network_info): + """Plug VIFs into networks.""" + pass + + def unplug_vifs(self, instance, network_info): + """Unplug VIFs from networks.""" + pass diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index c8618d735..566db2d77 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -290,10 +290,15 @@ class LibvirtConnection(driver.ComputeDriver): return infos def plug_vifs(self, instance, network_info): - """Plugin VIFs into networks.""" + """Plug VIFs into networks.""" for (network, mapping) in network_info: self.vif_driver.plug(instance, network, mapping) + def unplug_vifs(self, instance, network_info): + """Unplug VIFs from networks.""" + for (network, mapping) in network_info: + self.vif_driver.unplug(instance, network, mapping) + def destroy(self, instance, network_info, block_device_info=None, cleanup=True): instance_name = instance['name'] @@ -350,8 +355,7 @@ class LibvirtConnection(driver.ComputeDriver): locals()) raise - for (network, mapping) in network_info: - self.vif_driver.unplug(instance, network, mapping) + self.unplug_vifs(instance, network_info) def _wait_for_destroy(): """Called at an interval until the VM is gone.""" @@ -545,6 +549,7 @@ class LibvirtConnection(driver.ComputeDriver): # better because we cannot ensure flushing dirty buffers # in the guest OS. But, in case of KVM, shutdown() does not work... self.destroy(instance, network_info, cleanup=False) + self.unplug_vifs(instance, network_info) self.plug_vifs(instance, network_info) self.firewall_driver.setup_basic_filtering(instance, network_info) self.firewall_driver.prepare_instance_filter(instance, network_info) diff --git a/nova/virt/vmwareapi_conn.py b/nova/virt/vmwareapi_conn.py index ed8e98cb8..a3898c90f 100644 --- a/nova/virt/vmwareapi_conn.py +++ b/nova/virt/vmwareapi_conn.py @@ -201,9 +201,13 @@ class VMWareESXConnection(driver.ComputeDriver): pass def plug_vifs(self, instance, network_info): - """Plugs in VIFs to networks.""" + """Plug VIFs into networks.""" self._vmops.plug_vifs(instance, network_info) + def unplug_vifs(self, instance, network_info): + """Unplug VIFs from networks.""" + self._vmops.unplug_vifs(instance, network_info) + class VMWareAPISession(object): """ diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 375af567d..8971a83de 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -1139,9 +1139,7 @@ class VMOps(object): self._destroy_kernel_ramdisk(instance, vm_ref) self._destroy_vm(instance, vm_ref) - if network_info: - for (network, mapping) in network_info: - self.vif_driver.unplug(instance, network, mapping) + self.unplug_vifs(instance, network_info) def pause(self, instance): """Pause VM instance.""" @@ -1459,6 +1457,11 @@ class VMOps(object): for (network, mapping) in network_info: self.vif_driver.plug(self._session, instance, network, mapping) + def unplug_vifs(self, instance, network_info): + if network_info: + for (network, mapping) in network_info: + self.vif_driver.unplug(instance, network, mapping) + def reset_network(self, instance, vm_ref=None): """Creates uuid arg to pass to make_agent_call and calls it.""" if not vm_ref: diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index f3699f5fb..6bd0a902d 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -297,8 +297,13 @@ class XenAPIConnection(driver.ComputeDriver): self._vmops.inject_network_info(instance, network_info) def plug_vifs(self, instance_ref, network_info): + """Plug VIFs into networks.""" self._vmops.plug_vifs(instance_ref, network_info) + def unplug_vifs(self, instance_ref, network_info): + """Unplug VIFs from networks.""" + self._vmops.unplug_vifs(instance_ref, network_info) + def get_info(self, instance_name): """Return data about VM instance""" return self._vmops.get_info(instance_name) -- cgit