summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--nova/tests/test_hypervapi.py219
-rw-r--r--nova/virt/hyperv/basevolumeutils.py41
-rw-r--r--nova/virt/hyperv/livemigrationops.py14
-rw-r--r--nova/virt/hyperv/livemigrationutils.py160
-rw-r--r--nova/virt/hyperv/vmutils.py28
-rw-r--r--nova/virt/hyperv/volumeops.py68
-rw-r--r--nova/virt/hyperv/volumeutilsv2.py31
7 files changed, 419 insertions, 142 deletions
diff --git a/nova/tests/test_hypervapi.py b/nova/tests/test_hypervapi.py
index c6d75aea1..0ddfe080d 100644
--- a/nova/tests/test_hypervapi.py
+++ b/nova/tests/test_hypervapi.py
@@ -42,6 +42,7 @@ from nova.tests.image import fake as fake_image
from nova.tests import matchers
from nova import utils
from nova.virt import configdrive
+from nova.virt import driver
from nova.virt.hyperv import basevolumeutils
from nova.virt.hyperv import constants
from nova.virt.hyperv import driver as driver_hyperv
@@ -51,6 +52,7 @@ from nova.virt.hyperv import networkutils
from nova.virt.hyperv import pathutils
from nova.virt.hyperv import vhdutils
from nova.virt.hyperv import vmutils
+from nova.virt.hyperv import volumeops
from nova.virt.hyperv import volumeutils
from nova.virt.hyperv import volumeutilsv2
from nova.virt import images
@@ -88,10 +90,6 @@ class HyperVAPITestCase(test.TestCase):
self.flags(instances_path=r'C:\Hyper-V\test\instances',
network_api_class='nova.network.quantumv2.api.API')
- self.flags(vswitch_name='external',
- force_volumeutils_v1=True,
- group='hyperv')
-
self._conn = driver_hyperv.HyperVDriver(None)
def _setup_stubs(self):
@@ -118,6 +116,14 @@ class HyperVAPITestCase(test.TestCase):
pass
self.stubs.Set(time, 'sleep', fake_sleep)
+ def fake_vmutils__init__(self, host='.'):
+ pass
+ vmutils.VMUtils.__init__ = fake_vmutils__init__
+
+ def fake_get_volume_utils(self):
+ return volumeutils.VolumeUtils()
+ volumeops.VolumeOps._get_volume_utils = fake_get_volume_utils
+
self.stubs.Set(pathutils, 'PathUtils', fake.PathUtils)
self._mox.StubOutWithMock(fake.PathUtils, 'open')
self._mox.StubOutWithMock(fake.PathUtils, 'copyfile')
@@ -141,7 +147,7 @@ class HyperVAPITestCase(test.TestCase):
self._mox.StubOutWithMock(vmutils.VMUtils, 'take_vm_snapshot')
self._mox.StubOutWithMock(vmutils.VMUtils, 'remove_vm_snapshot')
self._mox.StubOutWithMock(vmutils.VMUtils, 'set_nic_connection')
- self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_iscsi_controller')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_scsi_controller')
self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_ide_controller')
self._mox.StubOutWithMock(vmutils.VMUtils, 'get_attached_disks_count')
self._mox.StubOutWithMock(vmutils.VMUtils,
@@ -150,6 +156,8 @@ class HyperVAPITestCase(test.TestCase):
'get_mounted_disk_by_drive_number')
self._mox.StubOutWithMock(vmutils.VMUtils, 'detach_vm_disk')
self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_storage_paths')
+ self._mox.StubOutWithMock(vmutils.VMUtils,
+ 'get_controller_volume_paths')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'create_differencing_vhd')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'reconnect_parent_vhd')
@@ -183,6 +191,8 @@ class HyperVAPITestCase(test.TestCase):
'get_session_id_from_mounted_disk')
self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
'get_device_number_for_target')
+ self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
+ 'get_target_from_disk_path')
self._mox.StubOutWithMock(volumeutils.VolumeUtils,
'login_storage_target')
@@ -523,16 +533,21 @@ class HyperVAPITestCase(test.TestCase):
self._conn.destroy(self._instance_data, None)
self._mox.VerifyAll()
- def test_live_migration(self):
- self._test_live_migration(False)
+ def test_live_migration_without_volumes(self):
+ self._test_live_migration()
+
+ def test_live_migration_with_volumes(self):
+ self._test_live_migration(with_volumes=True)
def test_live_migration_with_target_failure(self):
- self._test_live_migration(True)
+ self._test_live_migration(test_failure=True)
- def _test_live_migration(self, test_failure):
+ def _test_live_migration(self, test_failure=False,
+ with_volumes=False):
dest_server = 'fake_server'
instance_data = self._get_instance_data()
+ instance_name = instance_data['name']
fake_post_method = self._mox.CreateMockAnything()
if not test_failure:
@@ -544,10 +559,27 @@ class HyperVAPITestCase(test.TestCase):
fake_recover_method(self._context, instance_data, dest_server,
False)
+ fake_ide_controller_path = 'fakeide'
+ fake_scsi_controller_path = 'fakescsi'
+
+ if with_volumes:
+ fake_scsi_disk_path = 'fake_scsi_disk_path'
+ fake_target_iqn = 'fake_target_iqn'
+ fake_target_lun = 1
+ fake_scsi_paths = {0: fake_scsi_disk_path}
+ else:
+ fake_scsi_paths = {}
+
m = livemigrationutils.LiveMigrationUtils.live_migrate_vm(
instance_data['name'], dest_server)
if test_failure:
- m.AndRaise(Exception('Simulated failure'))
+ m.AndRaise(vmutils.HyperVException('Simulated failure'))
+
+ if with_volumes:
+ m.AndReturn([(fake_target_iqn, fake_target_lun)])
+ volumeutils.VolumeUtils.logout_storage_target(fake_target_iqn)
+ else:
+ m.AndReturn([])
self._mox.ReplayAll()
try:
@@ -555,19 +587,22 @@ class HyperVAPITestCase(test.TestCase):
dest_server, fake_post_method,
fake_recover_method)
exception_raised = False
- except Exception:
+ except vmutils.HyperVException:
exception_raised = True
self.assertTrue(not test_failure ^ exception_raised)
self._mox.VerifyAll()
def test_pre_live_migration_cow_image(self):
- self._test_pre_live_migration(True)
+ self._test_pre_live_migration(True, False)
def test_pre_live_migration_no_cow_image(self):
- self._test_pre_live_migration(False)
+ self._test_pre_live_migration(False, False)
- def _test_pre_live_migration(self, cow):
+ def test_pre_live_migration_with_volumes(self):
+ self._test_pre_live_migration(False, True)
+
+ def _test_pre_live_migration(self, cow, with_volumes):
self.flags(use_cow_images=cow)
instance_data = self._get_instance_data()
@@ -591,9 +626,29 @@ class HyperVAPITestCase(test.TestCase):
fake.PathUtils.copyfile(mox.IsA(str), mox.IsA(str))
vhdutils.VHDUtils.resize_vhd(mox.IsA(str), mox.IsA(object))
+ if with_volumes:
+ block_device_info = db_fakes.get_fake_block_device_info(
+ self._volume_target_portal, self._volume_id)
+
+ mapping = driver.block_device_info_get_mapping(block_device_info)
+ data = mapping[0]['connection_info']['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+ target_portal = data['target_portal']
+
+ fake_mounted_disk = "fake_mounted_disk"
+ fake_device_number = 0
+
+ self._mock_login_storage_target(target_iqn, target_lun,
+ target_portal,
+ fake_mounted_disk,
+ fake_device_number)
+ else:
+ block_device_info = None
+
self._mox.ReplayAll()
self._conn.pre_live_migration(self._context, instance,
- None, network_info)
+ block_device_info, network_info)
self._mox.VerifyAll()
if cow:
@@ -734,7 +789,8 @@ class HyperVAPITestCase(test.TestCase):
return image_path == self._fetched_image
def _setup_create_instance_mocks(self, setup_vif_mocks_func=None,
- boot_from_volume=False):
+ boot_from_volume=False,
+ block_device_info=None):
vmutils.VMUtils.create_vm(mox.Func(self._check_vm_name), mox.IsA(int),
mox.IsA(int), mox.IsA(bool))
@@ -750,6 +806,16 @@ class HyperVAPITestCase(test.TestCase):
m = vmutils.VMUtils.create_scsi_controller(func)
m.InAnyOrder()
+ if boot_from_volume:
+ mapping = driver.block_device_info_get_mapping(block_device_info)
+ data = mapping[0]['connection_info']['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+ target_portal = data['target_portal']
+
+ self._mock_attach_volume(mox.Func(self._check_vm_name), target_iqn,
+ target_lun, target_portal, True)
+
vmutils.VMUtils.create_nic(mox.Func(self._check_vm_name), mox.IsA(str),
mox.IsA(str)).InAnyOrder()
@@ -787,7 +853,8 @@ class HyperVAPITestCase(test.TestCase):
fake.PathUtils.copyfile(mox.IsA(str), mox.IsA(str))
self._setup_create_instance_mocks(setup_vif_mocks_func,
- boot_from_volume)
+ boot_from_volume,
+ block_device_info)
# TODO(alexpilotti) Based on where the exception is thrown
# some of the above mock calls need to be skipped
@@ -818,41 +885,57 @@ class HyperVAPITestCase(test.TestCase):
vhd_path = pathutils.PathUtils().get_vhd_path(self._test_vm_name)
self.assertEquals(vhd_path, self._instance_ide_disks[0])
- def test_attach_volume(self):
- instance_data = self._get_instance_data()
- instance_name = instance_data['name']
+ def _mock_get_mounted_disk_from_lun(self, target_iqn, target_lun,
+ fake_mounted_disk,
+ fake_device_number):
+ m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
+ target_lun)
+ m.AndReturn(fake_device_number)
- connection_info = db_fakes.get_fake_volume_info_data(
- self._volume_target_portal, self._volume_id)
- data = connection_info['data']
- target_lun = data['target_lun']
- target_iqn = data['target_iqn']
- target_portal = data['target_portal']
+ m = vmutils.VMUtils.get_mounted_disk_by_drive_number(
+ fake_device_number)
+ m.AndReturn(fake_mounted_disk)
- mount_point = '/dev/sdc'
+ def _mock_login_storage_target(self, target_iqn, target_lun, target_portal,
+ fake_mounted_disk, fake_device_number):
+ m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
+ target_lun)
+ m.AndReturn(fake_device_number)
volumeutils.VolumeUtils.login_storage_target(target_lun,
target_iqn,
target_portal)
+ self._mock_get_mounted_disk_from_lun(target_iqn, target_lun,
+ fake_mounted_disk,
+ fake_device_number)
+
+ def _mock_attach_volume(self, instance_name, target_iqn, target_lun,
+ target_portal=None, boot_from_volume=False):
fake_mounted_disk = "fake_mounted_disk"
fake_device_number = 0
fake_controller_path = 'fake_scsi_controller_path'
- fake_free_slot = 1
- m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
- target_lun)
- m.AndReturn(fake_device_number)
+ self._mock_login_storage_target(target_iqn, target_lun,
+ target_portal,
+ fake_mounted_disk,
+ fake_device_number)
- m = vmutils.VMUtils.get_mounted_disk_by_drive_number(
- fake_device_number)
- m.AndReturn(fake_mounted_disk)
+ self._mock_get_mounted_disk_from_lun(target_iqn, target_lun,
+ fake_mounted_disk,
+ fake_device_number)
- m = vmutils.VMUtils.get_vm_iscsi_controller(instance_name)
- m.AndReturn(fake_controller_path)
+ if boot_from_volume:
+ m = vmutils.VMUtils.get_vm_ide_controller(instance_name, 0)
+ m.AndReturn(fake_controller_path)
+ fake_free_slot = 0
+ else:
+ m = vmutils.VMUtils.get_vm_scsi_controller(instance_name)
+ m.AndReturn(fake_controller_path)
- m = vmutils.VMUtils.get_attached_disks_count(fake_controller_path)
- m.AndReturn(fake_free_slot)
+ fake_free_slot = 1
+ m = vmutils.VMUtils.get_attached_disks_count(fake_controller_path)
+ m.AndReturn(fake_free_slot)
m = vmutils.VMUtils.attach_volume_to_controller(instance_name,
fake_controller_path,
@@ -860,15 +943,8 @@ class HyperVAPITestCase(test.TestCase):
fake_mounted_disk)
m.WithSideEffects(self._add_volume_disk)
- self._mox.ReplayAll()
- self._conn.attach_volume(connection_info, instance_data, mount_point)
- self._mox.VerifyAll()
-
- self.assertEquals(len(self._instance_volume_disks), 1)
-
- def test_detach_volume(self):
+ def test_attach_volume(self):
instance_data = self._get_instance_data()
- instance_name = instance_data['name']
connection_info = db_fakes.get_fake_volume_info_data(
self._volume_target_portal, self._volume_id)
@@ -878,6 +954,18 @@ class HyperVAPITestCase(test.TestCase):
target_portal = data['target_portal']
mount_point = '/dev/sdc'
+ self._mock_attach_volume(instance_data['name'], target_iqn, target_lun,
+ target_portal)
+
+ self._mox.ReplayAll()
+ self._conn.attach_volume(connection_info, instance_data, mount_point)
+ self._mox.VerifyAll()
+
+ self.assertEquals(len(self._instance_volume_disks), 1)
+
+ def _mock_detach_volume(self, target_iqn, target_lun):
+ mount_point = '/dev/sdc'
+
fake_mounted_disk = "fake_mounted_disk"
fake_device_number = 0
fake_free_slot = 1
@@ -893,11 +981,10 @@ class HyperVAPITestCase(test.TestCase):
volumeutils.VolumeUtils.logout_storage_target(mox.IsA(str))
- self._mox.ReplayAll()
- self._conn.detach_volume(connection_info, instance_data, mount_point)
- self._mox.VerifyAll()
+ def test_detach_volume(self):
+ instance_data = self._get_instance_data()
+ instance_name = instance_data['name']
- def test_boot_from_volume(self):
connection_info = db_fakes.get_fake_volume_info_data(
self._volume_target_portal, self._volume_id)
data = connection_info['data']
@@ -905,33 +992,17 @@ class HyperVAPITestCase(test.TestCase):
target_iqn = data['target_iqn']
target_portal = data['target_portal']
- block_device_info = db_fakes.get_fake_block_device_info(
- self._volume_target_portal, self._volume_id)
-
- fake_mounted_disk = "fake_mounted_disk"
- fake_device_number = 0
- fake_controller_path = 'fake_scsi_controller_path'
-
- volumeutils.VolumeUtils.login_storage_target(target_lun,
- target_iqn,
- target_portal)
+ mount_point = '/dev/sdc'
- m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
- target_lun)
- m.AndReturn(fake_device_number)
+ self._mock_detach_volume(target_iqn, target_lun)
- m = vmutils.VMUtils.get_mounted_disk_by_drive_number(
- fake_device_number)
- m.AndReturn(fake_mounted_disk)
-
- m = vmutils.VMUtils.get_vm_ide_controller(mox.IsA(str), mox.IsA(int))
- m.AndReturn(fake_controller_path)
+ self._mox.ReplayAll()
+ self._conn.detach_volume(connection_info, instance_data, mount_point)
+ self._mox.VerifyAll()
- m = vmutils.VMUtils.attach_volume_to_controller(mox.IsA(str),
- fake_controller_path,
- 0,
- fake_mounted_disk)
- m.WithSideEffects(self._add_volume_disk)
+ def test_boot_from_volume(self):
+ block_device_info = db_fakes.get_fake_block_device_info(
+ self._volume_target_portal, self._volume_id)
self._setup_spawn_instance_mocks(cow=False,
block_device_info=block_device_info,
diff --git a/nova/virt/hyperv/basevolumeutils.py b/nova/virt/hyperv/basevolumeutils.py
index 7437bf52f..8f880652e 100644
--- a/nova/virt/hyperv/basevolumeutils.py
+++ b/nova/virt/hyperv/basevolumeutils.py
@@ -37,10 +37,10 @@ LOG = logging.getLogger(__name__)
class BaseVolumeUtils(object):
- def __init__(self):
+ def __init__(self, host='.'):
if sys.platform == 'win32':
- self._conn_wmi = wmi.WMI(moniker='//./root/wmi')
- self._conn_cimv2 = wmi.WMI(moniker='//./root/cimv2')
+ self._conn_wmi = wmi.WMI(moniker='//%s/root/wmi' % host)
+ self._conn_cimv2 = wmi.WMI(moniker='//%s/root/cimv2' % host)
@abc.abstractmethod
def login_storage_target(self, target_lun, target_iqn, target_portal):
@@ -96,27 +96,48 @@ class BaseVolumeUtils(object):
start_device_id = disk_path.find('"', disk_path.find('DeviceID'))
end_device_id = disk_path.find('"', start_device_id + 1)
device_id = disk_path[start_device_id + 1:end_device_id]
- return device_id[device_id.find("\\") + 2:]
+ drive_number = device_id[device_id.find("\\") + 2:]
+ if drive_number == 'NODRIVE':
+ return None
+ return int(drive_number)
def get_session_id_from_mounted_disk(self, physical_drive_path):
drive_number = self._get_drive_number_from_disk_path(
physical_drive_path)
+ if not drive_number:
+ return None
+
initiator_sessions = self._conn_wmi.query("SELECT * FROM "
"MSiSCSIInitiator_Session"
"Class")
for initiator_session in initiator_sessions:
devices = initiator_session.Devices
for device in devices:
- device_number = str(device.DeviceNumber)
+ device_number = device.DeviceNumber
if device_number == drive_number:
return initiator_session.SessionId
def get_device_number_for_target(self, target_iqn, target_lun):
- initiator_session = self._conn_wmi.query("SELECT * FROM "
- "MSiSCSIInitiator_Session"
- "Class WHERE TargetName='%s'"
- % target_iqn)[0]
- devices = initiator_session.Devices
+ initiator_sessions = self._conn_wmi.query("SELECT * FROM "
+ "MSiSCSIInitiator_Session"
+ "Class WHERE TargetName='%s'"
+ % target_iqn)
+ if not initiator_sessions:
+ return None
+
+ devices = initiator_sessions[0].Devices
for device in devices:
if device.ScsiLun == target_lun:
return device.DeviceNumber
+
+ def get_target_from_disk_path(self, disk_path):
+ initiator_sessions = self._conn_wmi.MSiSCSIInitiator_SessionClass()
+ drive_number = self._get_drive_number_from_disk_path(disk_path)
+ if not drive_number:
+ return None
+
+ for initiator_session in initiator_sessions:
+ devices = initiator_session.Devices
+ for device in devices:
+ if device.DeviceNumber == drive_number:
+ return (device.TargetName, device.ScsiLun)
diff --git a/nova/virt/hyperv/livemigrationops.py b/nova/virt/hyperv/livemigrationops.py
index 94f6f74d8..adca7b8f3 100644
--- a/nova/virt/hyperv/livemigrationops.py
+++ b/nova/virt/hyperv/livemigrationops.py
@@ -25,7 +25,6 @@ from nova.openstack.common import log as logging
from nova.virt.hyperv import imagecache
from nova.virt.hyperv import livemigrationutils
from nova.virt.hyperv import pathutils
-from nova.virt.hyperv import vmutils
from nova.virt.hyperv import volumeops
LOG = logging.getLogger(__name__)
@@ -35,9 +34,7 @@ CONF.import_opt('use_cow_images', 'nova.virt.driver')
class LiveMigrationOps(object):
def __init__(self):
-
self._pathutils = pathutils.PathUtils()
- self._vmutils = vmutils.VMUtils()
self._livemigrutils = livemigrationutils.LiveMigrationUtils()
self._volumeops = volumeops.VolumeOps()
self._imagecache = imagecache.ImageCache()
@@ -49,7 +46,10 @@ class LiveMigrationOps(object):
instance_name = instance_ref["name"]
try:
- self._livemigrutils.live_migrate_vm(instance_name, dest)
+ iscsi_targets = self._livemigrutils.live_migrate_vm(instance_name,
+ dest)
+ for (target_iqn, target_lun) in iscsi_targets:
+ self._volumeops.logout_storage_target(target_iqn)
except Exception:
with excutils.save_and_reraise_exception():
LOG.debug(_("Calling live migration recover_method "
@@ -66,11 +66,13 @@ class LiveMigrationOps(object):
self._livemigrutils.check_live_migration_config()
if CONF.use_cow_images:
- ebs_root = self._volumeops.ebs_root_in_block_devices(
+ boot_from_volume = self._volumeops.ebs_root_in_block_devices(
block_device_info)
- if not ebs_root:
+ if not boot_from_volume:
self._imagecache.get_cached_image(context, instance)
+ self._volumeops.login_storage_targets(block_device_info)
+
def post_live_migration_at_destination(self, ctxt, instance_ref,
network_info, block_migration):
LOG.debug(_("post_live_migration_at_destination called"),
diff --git a/nova/virt/hyperv/livemigrationutils.py b/nova/virt/hyperv/livemigrationutils.py
index d039a5016..2563e1182 100644
--- a/nova/virt/hyperv/livemigrationutils.py
+++ b/nova/virt/hyperv/livemigrationutils.py
@@ -20,8 +20,10 @@ import sys
if sys.platform == 'win32':
import wmi
+from nova import exception
from nova.openstack.common import log as logging
from nova.virt.hyperv import vmutils
+from nova.virt.hyperv import volumeutilsv2
LOG = logging.getLogger(__name__)
@@ -30,6 +32,7 @@ class LiveMigrationUtils(object):
def __init__(self):
self._vmutils = vmutils.VMUtils()
+ self._volutils = volumeutilsv2.VolumeUtilsV2()
def _get_conn_v2(self, host='localhost'):
try:
@@ -64,19 +67,107 @@ class LiveMigrationUtils(object):
vms = conn_v2.Msvm_ComputerSystem(ElementName=vm_name)
n = len(vms)
if not n:
- raise vmutils.HyperVException(_('VM not found: %s') % vm_name)
+ raise exception.NotFound(_('VM not found: %s') % vm_name)
elif n > 1:
raise vmutils.HyperVException(_('Duplicate VM name found: %s')
% vm_name)
return vms[0]
- def live_migrate_vm(self, vm_name, dest_host):
- self.check_live_migration_config()
+ def _destroy_planned_vm(self, conn_v2_remote, planned_vm):
+ LOG.debug(_("Destroying existing remote planned VM: %s"),
+ planned_vm.ElementName)
+ vs_man_svc = conn_v2_remote.Msvm_VirtualSystemManagementService()[0]
+ (job_path, ret_val) = vs_man_svc.DestroySystem(planned_vm.path_())
+ self._vmutils.check_ret_val(ret_val, job_path)
- # We need a v2 namespace VM object
- conn_v2_local = self._get_conn_v2()
+ def _check_existing_planned_vm(self, conn_v2_remote, vm):
+ # Make sure that there's not yet a remote planned VM on the target
+ # host for this VM
+ planned_vms = conn_v2_remote.Msvm_PlannedComputerSystem(Name=vm.Name)
+ if planned_vms:
+ self._destroy_planned_vm(conn_v2_remote, planned_vms[0])
- vm = self._get_vm(conn_v2_local, vm_name)
+ def _create_remote_planned_vm(self, conn_v2_local, conn_v2_remote,
+ vm, rmt_ip_addr_list, dest_host):
+ # Staged
+ vsmsd = conn_v2_local.query("select * from "
+ "Msvm_VirtualSystemMigrationSettingData "
+ "where MigrationType = 32770")[0]
+ vsmsd.DestinationIPAddressList = rmt_ip_addr_list
+ migration_setting_data = vsmsd.GetText_(1)
+
+ LOG.debug(_("Creating remote planned VM for VM: %s"),
+ vm.ElementName)
+ migr_svc = conn_v2_local.Msvm_VirtualSystemMigrationService()[0]
+ (job_path, ret_val) = migr_svc.MigrateVirtualSystemToHost(
+ ComputerSystem=vm.path_(),
+ DestinationHost=dest_host,
+ MigrationSettingData=migration_setting_data)
+ self._vmutils.check_ret_val(ret_val, job_path)
+
+ return conn_v2_remote.Msvm_PlannedComputerSystem(Name=vm.Name)[0]
+
+ def _get_physical_disk_paths(self, vm_name):
+ ide_ctrl_path = self._vmutils.get_vm_ide_controller(vm_name, 0)
+ ide_paths = self._vmutils.get_controller_volume_paths(ide_ctrl_path)
+
+ scsi_ctrl_path = self._vmutils.get_vm_scsi_controller(vm_name)
+ scsi_paths = self._vmutils.get_controller_volume_paths(scsi_ctrl_path)
+
+ return dict(ide_paths.items() + scsi_paths.items())
+
+ def _get_remote_disk_data(self, vmutils_remote, disk_paths, dest_host):
+ volutils_remote = volumeutilsv2.VolumeUtilsV2(dest_host)
+
+ disk_paths_remote = {}
+ iscsi_targets = []
+ for (rasd_rel_path, disk_path) in disk_paths.items():
+ (target_iqn,
+ target_lun) = self._volutils.get_target_from_disk_path(disk_path)
+ iscsi_targets.append((target_iqn, target_lun))
+
+ dev_num = volutils_remote.get_device_number_for_target(target_iqn,
+ target_lun)
+ disk_path_remote = vmutils_remote.get_mounted_disk_by_drive_number(
+ dev_num)
+
+ disk_paths_remote[rasd_rel_path] = disk_path_remote
+
+ return (disk_paths_remote, iscsi_targets)
+
+ def _update_planned_vm_disk_resources(self, vmutils_remote, conn_v2_remote,
+ planned_vm, vm_name,
+ disk_paths_remote):
+ vm_settings = planned_vm.associators(
+ wmi_association_class='Msvm_SettingsDefineState',
+ wmi_result_class='Msvm_VirtualSystemSettingData')[0]
+
+ updated_resource_setting_data = []
+ sasds = vm_settings.associators(
+ wmi_association_class='Msvm_VirtualSystemSettingDataComponent')
+ for sasd in sasds:
+ if (sasd.ResourceType == 17 and sasd.ResourceSubType ==
+ "Microsoft:Hyper-V:Physical Disk Drive" and
+ sasd.HostResource):
+ # Replace the local disk target with the correct remote one
+ old_disk_path = sasd.HostResource[0]
+ new_disk_path = disk_paths_remote.pop(sasd.path().RelPath)
+
+ LOG.debug(_("Replacing host resource "
+ "%(old_disk_path)s with "
+ "%(new_disk_path)s on planned VM %(vm_name)s") %
+ locals())
+ sasd.HostResource = [new_disk_path]
+ updated_resource_setting_data.append(sasd.GetText_(1))
+
+ LOG.debug(_("Updating remote planned VM disk paths for VM: %s"),
+ vm_name)
+ vsmsvc = conn_v2_remote.Msvm_VirtualSystemManagementService()[0]
+ (res_settings, job_path, ret_val) = vsmsvc.ModifyResourceSettings(
+ ResourceSettings=updated_resource_setting_data)
+ vmutils_remote.check_ret_val(ret_val, job_path)
+
+ def _get_vhd_setting_data(self, vm):
vm_settings = vm.associators(
wmi_association_class='Msvm_SettingsDefineState',
wmi_result_class='Msvm_VirtualSystemSettingData')[0]
@@ -90,26 +181,69 @@ class LiveMigrationUtils(object):
"Microsoft:Hyper-V:Virtual Hard Disk"):
#sasd.PoolId = ""
new_resource_setting_data.append(sasd.GetText_(1))
+ return new_resource_setting_data
- LOG.debug(_("Getting live migration networks for remote host: %s"),
- dest_host)
- conn_v2_remote = self._get_conn_v2(dest_host)
- migr_svc_rmt = conn_v2_remote.Msvm_VirtualSystemMigrationService()[0]
- rmt_ip_addr_list = migr_svc_rmt.MigrationServiceListenerIPAddressList
-
+ def _live_migrate_vm(self, conn_v2_local, vm, planned_vm, rmt_ip_addr_list,
+ new_resource_setting_data, dest_host):
# VirtualSystemAndStorage
vsmsd = conn_v2_local.query("select * from "
"Msvm_VirtualSystemMigrationSettingData "
"where MigrationType = 32771")[0]
vsmsd.DestinationIPAddressList = rmt_ip_addr_list
+ if planned_vm:
+ vsmsd.DestinationPlannedVirtualSystemId = planned_vm.Name
migration_setting_data = vsmsd.GetText_(1)
migr_svc = conn_v2_local.Msvm_VirtualSystemMigrationService()[0]
- LOG.debug(_("Starting live migration for VM: %s"), vm_name)
+ LOG.debug(_("Starting live migration for VM: %s"), vm.ElementName)
(job_path, ret_val) = migr_svc.MigrateVirtualSystemToHost(
ComputerSystem=vm.path_(),
DestinationHost=dest_host,
MigrationSettingData=migration_setting_data,
NewResourceSettingData=new_resource_setting_data)
self._vmutils.check_ret_val(ret_val, job_path)
+
+ def _get_remote_ip_address_list(self, conn_v2_remote, dest_host):
+ LOG.debug(_("Getting live migration networks for remote host: %s"),
+ dest_host)
+ migr_svc_rmt = conn_v2_remote.Msvm_VirtualSystemMigrationService()[0]
+ return migr_svc_rmt.MigrationServiceListenerIPAddressList
+
+ def live_migrate_vm(self, vm_name, dest_host):
+ self.check_live_migration_config()
+
+ conn_v2_local = self._get_conn_v2()
+ conn_v2_remote = self._get_conn_v2(dest_host)
+
+ vm = self._get_vm(conn_v2_local, vm_name)
+ self._check_existing_planned_vm(conn_v2_remote, vm)
+
+ rmt_ip_addr_list = self._get_remote_ip_address_list(conn_v2_remote,
+ dest_host)
+
+ iscsi_targets = []
+ planned_vm = None
+ disk_paths = self._get_physical_disk_paths(vm_name)
+ if disk_paths:
+ vmutils_remote = vmutils.VMUtils(dest_host)
+ (disk_paths_remote,
+ iscsi_targets) = self._get_remote_disk_data(vmutils_remote,
+ disk_paths,
+ dest_host)
+
+ planned_vm = self._create_remote_planned_vm(conn_v2_local,
+ conn_v2_remote,
+ vm, rmt_ip_addr_list,
+ dest_host)
+
+ self._update_planned_vm_disk_resources(vmutils_remote,
+ conn_v2_remote, planned_vm,
+ vm_name, disk_paths_remote)
+
+ new_resource_setting_data = self._get_vhd_setting_data(vm)
+ self._live_migrate_vm(conn_v2_local, vm, planned_vm, rmt_ip_addr_list,
+ new_resource_setting_data, dest_host)
+
+ # In case the caller wants to log off the targets after migration
+ return iscsi_targets
diff --git a/nova/virt/hyperv/vmutils.py b/nova/virt/hyperv/vmutils.py
index 6536efe1e..45fea329d 100644
--- a/nova/virt/hyperv/vmutils.py
+++ b/nova/virt/hyperv/vmutils.py
@@ -44,10 +44,10 @@ class HyperVException(exception.NovaException):
class VMUtils(object):
- def __init__(self):
+ def __init__(self, host='.'):
if sys.platform == 'win32':
- self._conn = wmi.WMI(moniker='//./root/virtualization')
- self._conn_cimv2 = wmi.WMI(moniker='//./root/cimv2')
+ self._conn = wmi.WMI(moniker='//%s/root/virtualization' % host)
+ self._conn_cimv2 = wmi.WMI(moniker='//%s/root/cimv2' % host)
def list_instances(self):
"""Return the names of all the instances known to Hyper-V."""
@@ -92,7 +92,7 @@ class VMUtils(object):
def _lookup_vm_check(self, vm_name):
vm = self._lookup_vm(vm_name)
if not vm:
- raise HyperVException(_('VM not found: %s') % vm_name)
+ raise exception.NotFound(_('VM not found: %s') % vm_name)
return vm
def _lookup_vm(self, vm_name):
@@ -164,7 +164,7 @@ class VMUtils(object):
LOG.debug(_('Set vCPUs for vm %s'), vm_name)
self._set_vm_vcpus(vm, vmsetting, vcpus_num, limit_cpu_features)
- def get_vm_iscsi_controller(self, vm_name):
+ def get_vm_scsi_controller(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vmsettings = vm.associators(
@@ -263,7 +263,7 @@ class VMUtils(object):
scsicontrl = self._clone_wmi_obj('Msvm_ResourceAllocationSettingData',
scsicontrldflt)
scsicontrl.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
- scsiresource = self._add_virt_resource(scsicontrl, vm.path_())
+ self._add_virt_resource(scsicontrl, vm.path_())
def attach_volume_to_controller(self, vm_name, controller_path, address,
mounted_disk_path):
@@ -316,7 +316,6 @@ class VMUtils(object):
def set_vm_state(self, vm_name, req_state):
"""Set the desired state of the VM."""
-
vm = self._lookup_vm_check(vm_name)
(job_path, ret_val) = vm.RequestStateChange(req_state)
#Invalid state for current operation (32775) typically means that
@@ -470,7 +469,8 @@ class VMUtils(object):
vm = self._lookup_vm_check(vm_name)
physical_disk = self._get_mounted_disk_resource_from_path(
disk_path)
- self._remove_virt_resource(physical_disk, vm.path_())
+ if physical_disk:
+ self._remove_virt_resource(physical_disk, vm.path_())
def _get_mounted_disk_resource_from_path(self, disk_path):
physical_disks = self._conn.query("SELECT * FROM "
@@ -488,3 +488,15 @@ class VMUtils(object):
str(device_number))
if len(mounted_disks):
return mounted_disks[0].path_()
+
+ def get_controller_volume_paths(self, controller_path):
+ disks = self._conn.query("SELECT * FROM "
+ "Msvm_ResourceAllocationSettingData "
+ "WHERE ResourceSubType="
+ "'Microsoft Physical Disk Drive' AND "
+ "Parent='%s'" % controller_path)
+ disk_data = {}
+ for disk in disks:
+ if disk.HostResource:
+ disk_data[disk.path().RelPath] = disk.HostResource[0]
+ return disk_data
diff --git a/nova/virt/hyperv/volumeops.py b/nova/virt/hyperv/volumeops.py
index 3542a6194..74953435a 100644
--- a/nova/virt/hyperv/volumeops.py
+++ b/nova/virt/hyperv/volumeops.py
@@ -23,6 +23,7 @@ import time
from oslo.config import cfg
+from nova import exception
from nova.openstack.common import log as logging
from nova.virt import driver
from nova.virt.hyperv import hostutils
@@ -87,6 +88,30 @@ class VolumeOps(object):
for vol in mapping:
self.attach_volume(vol['connection_info'], instance_name)
+ def login_storage_targets(self, block_device_info):
+ mapping = driver.block_device_info_get_mapping(block_device_info)
+ for vol in mapping:
+ self._login_storage_target(vol['connection_info'])
+
+ def _login_storage_target(self, connection_info):
+ data = connection_info['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+ target_portal = data['target_portal']
+ # Check if we already logged in
+ if self._volutils.get_device_number_for_target(target_iqn, target_lun):
+ LOG.debug(_("Already logged in on storage target. No need to "
+ "login. Portal: %(target_portal)s, "
+ "IQN: %(target_iqn)s, LUN: %(target_lun)s") % locals())
+ else:
+ LOG.debug(_("Logging in on storage target. Portal: "
+ "%(target_portal)s, IQN: %(target_iqn)s, "
+ "LUN: %(target_lun)s") % locals())
+ self._volutils.login_storage_target(target_lun, target_iqn,
+ target_portal)
+ # Wait for the target to be mounted
+ self._get_mounted_disk_from_lun(target_iqn, target_lun, True)
+
def attach_volume(self, connection_info, instance_name, ebs_root=False):
"""
Attach a volume to the SCSI controller or to the IDE controller if
@@ -94,13 +119,13 @@ class VolumeOps(object):
"""
LOG.debug(_("Attach_volume: %(connection_info)s to %(instance_name)s")
% locals())
- data = connection_info['data']
- target_lun = data['target_lun']
- target_iqn = data['target_iqn']
- target_portal = data['target_portal']
- self._volutils.login_storage_target(target_lun, target_iqn,
- target_portal)
try:
+ self._login_storage_target(connection_info)
+
+ data = connection_info['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+
#Getting the mounted disk
mounted_disk_path = self._get_mounted_disk_from_lun(target_iqn,
target_lun)
@@ -113,7 +138,7 @@ class VolumeOps(object):
slot = 0
else:
#Find the SCSI controller for the vm
- ctrller_path = self._vmutils.get_vm_iscsi_controller(
+ ctrller_path = self._vmutils.get_vm_scsi_controller(
instance_name)
slot = self._get_free_controller_slot(ctrller_path)
@@ -136,13 +161,19 @@ class VolumeOps(object):
for vol in mapping:
self.detach_volume(vol['connection_info'], instance_name)
+ def logout_storage_target(self, target_iqn):
+ LOG.debug(_("Logging off storage target %(target_iqn)s") % locals())
+ self._volutils.logout_storage_target(target_iqn)
+
def detach_volume(self, connection_info, instance_name):
"""Dettach a volume to the SCSI controller."""
LOG.debug(_("Detach_volume: %(connection_info)s "
"from %(instance_name)s") % locals())
+
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
+
#Getting the mounted disk
mounted_disk_path = self._get_mounted_disk_from_lun(target_iqn,
target_lun)
@@ -151,8 +182,7 @@ class VolumeOps(object):
mounted_disk_path)
self._vmutils.detach_vm_disk(instance_name, mounted_disk_path)
- #Sending logout
- self._volutils.logout_storage_target(target_iqn)
+ self.logout_storage_target(target_iqn)
def get_volume_connector(self, instance):
if not self._initiator:
@@ -165,27 +195,26 @@ class VolumeOps(object):
'initiator': self._initiator,
}
- def _get_mounted_disk_from_lun(self, target_iqn, target_lun):
+ def _get_mounted_disk_from_lun(self, target_iqn, target_lun,
+ wait_for_device=False):
device_number = self._volutils.get_device_number_for_target(target_iqn,
target_lun)
if device_number is None:
- raise vmutils.HyperVException(_('Unable to find a mounted '
- 'disk for target_iqn: %s')
- % target_iqn)
+ raise exception.NotFound(_('Unable to find a mounted disk for '
+ 'target_iqn: %s') % target_iqn)
LOG.debug(_('Device number: %(device_number)s, '
'target lun: %(target_lun)s') % locals())
#Finding Mounted disk drive
- for i in range(1, CONF.hyperv.volume_attach_retry_count):
+ for i in range(0, CONF.hyperv.volume_attach_retry_count):
mounted_disk_path = self._vmutils.get_mounted_disk_by_drive_number(
device_number)
- if mounted_disk_path:
+ if mounted_disk_path or not wait_for_device:
break
time.sleep(CONF.hyperv.volume_attach_retry_interval)
if not mounted_disk_path:
- raise vmutils.HyperVException(_('Unable to find a mounted disk '
- 'for target_iqn: %s')
- % target_iqn)
+ raise exception.NotFound(_('Unable to find a mounted disk '
+ 'for target_iqn: %s') % target_iqn)
return mounted_disk_path
def disconnect_volume(self, physical_drive_path):
@@ -194,3 +223,6 @@ class VolumeOps(object):
physical_drive_path)
#Logging out the target
self._volutils.execute_log_out(session_id)
+
+ def get_target_from_disk_path(self, physical_drive_path):
+ return self._volutils.get_target_from_disk_path(physical_drive_path)
diff --git a/nova/virt/hyperv/volumeutilsv2.py b/nova/virt/hyperv/volumeutilsv2.py
index cdc0803ee..5fafe4c32 100644
--- a/nova/virt/hyperv/volumeutilsv2.py
+++ b/nova/virt/hyperv/volumeutilsv2.py
@@ -37,10 +37,10 @@ CONF = cfg.CONF
class VolumeUtilsV2(basevolumeutils.BaseVolumeUtils):
- def __init__(self):
- super(VolumeUtilsV2, self).__init__()
+ def __init__(self, host='.'):
+ super(VolumeUtilsV2, self).__init__(host)
- storage_namespace = '//./root/microsoft/windows/storage'
+ storage_namespace = '//%s/root/microsoft/windows/storage' % host
if sys.platform == 'win32':
self._conn_storage = wmi.WMI(moniker=storage_namespace)
@@ -64,16 +64,21 @@ class VolumeUtilsV2(basevolumeutils.BaseVolumeUtils):
def logout_storage_target(self, target_iqn):
"""Logs out storage target through its session id."""
+ targets = self._conn_storage.MSFT_iSCSITarget(NodeAddress=target_iqn)
+ if targets:
+ target = targets[0]
+ if target.IsConnected:
+ sessions = self._conn_storage.MSFT_iSCSISession(
+ TargetNodeAddress=target_iqn)
- target = self._conn_storage.MSFT_iSCSITarget(NodeAddress=target_iqn)[0]
- if target.IsConnected:
- session = self._conn_storage.MSFT_iSCSISession(
- TargetNodeAddress=target_iqn)[0]
- if session.IsPersistent:
- session.Unregister()
- target.Disconnect()
+ for session in sessions:
+ if session.IsPersistent:
+ session.Unregister()
+
+ target.Disconnect()
def execute_log_out(self, session_id):
- session = self._conn_wmi.MSiSCSIInitiator_SessionClass(
- SessionId=session_id)[0]
- self.logout_storage_target(session.TargetName)
+ sessions = self._conn_wmi.MSiSCSIInitiator_SessionClass(
+ SessionId=session_id)
+ if sessions:
+ self.logout_storage_target(sessions[0].TargetName)