From cdfa0d0b3395ce3db6aab21a8f4d39059565e641 Mon Sep 17 00:00:00 2001 From: Alessandro Pilotti Date: Tue, 29 Jan 2013 00:53:24 +0200 Subject: Nova Hyper-V driver refactoring Blueprint: hyper-v-testing-serialization-improvements This patchset contains a major refactoring of the Hyper-V driver. The main reason for this refactoring is to provide proper component abstraction and decoupling, thus replacing almost entirely the remaining pre-Essex code previously available. This leads to a considerable semplification of the testing framework, which is now entirely based on stubs and mocks (mox), without any serialized stub / mock. From an architectural perspective, the main driver class delegates operations to "ops" classes (e.g. VMOps, VolumeOps, etc) which contain the main logic and delegate OS specific actions to "utils" classes (e.g. VMUtils, LiveMigrationUtils, etc) where the WMI and Win32 API OS specific code resides. Additional attention has been put also into a better PEP8 code formatting, including compliance with not mandatory checks like E121 through E128. Change-Id: I900719c02b7c6b48d44ca68903813a1dcd023f9f --- nova/virt/hyperv/__init__.py | 16 ++ nova/virt/hyperv/baseops.py | 69 ----- nova/virt/hyperv/basevolumeutils.py | 67 ++++- nova/virt/hyperv/constants.py | 9 - nova/virt/hyperv/driver.py | 96 ++----- nova/virt/hyperv/hostops.py | 164 +++++------ nova/virt/hyperv/hostutils.py | 74 +++++ nova/virt/hyperv/ioutils.py | 26 -- nova/virt/hyperv/livemigrationops.py | 114 ++------ nova/virt/hyperv/livemigrationutils.py | 115 ++++++++ nova/virt/hyperv/networkutils.py | 62 ++++ nova/virt/hyperv/pathutils.py | 67 +++++ nova/virt/hyperv/snapshotops.py | 160 +++-------- nova/virt/hyperv/vhdutils.py | 72 +++++ nova/virt/hyperv/vif.py | 71 +---- nova/virt/hyperv/vmops.py | 511 +++++++++------------------------ nova/virt/hyperv/vmutils.py | 510 ++++++++++++++++++++++++++------ nova/virt/hyperv/volumeops.py | 277 +++++------------- nova/virt/hyperv/volumeutils.py | 80 +++--- nova/virt/hyperv/volumeutilsV2.py | 70 ----- nova/virt/hyperv/volumeutilsv2.py | 75 +++++ 21 files changed, 1373 insertions(+), 1332 deletions(-) delete mode 100644 nova/virt/hyperv/baseops.py create mode 100644 nova/virt/hyperv/hostutils.py delete mode 100644 nova/virt/hyperv/ioutils.py create mode 100644 nova/virt/hyperv/livemigrationutils.py create mode 100644 nova/virt/hyperv/networkutils.py create mode 100644 nova/virt/hyperv/pathutils.py create mode 100644 nova/virt/hyperv/vhdutils.py delete mode 100644 nova/virt/hyperv/volumeutilsV2.py create mode 100644 nova/virt/hyperv/volumeutilsv2.py (limited to 'nova/virt') diff --git a/nova/virt/hyperv/__init__.py b/nova/virt/hyperv/__init__.py index e69de29bb..090fc0639 100644 --- a/nova/virt/hyperv/__init__.py +++ b/nova/virt/hyperv/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cloudbase Solutions Srl +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/nova/virt/hyperv/baseops.py b/nova/virt/hyperv/baseops.py deleted file mode 100644 index 5b617f898..000000000 --- a/nova/virt/hyperv/baseops.py +++ /dev/null @@ -1,69 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Management base class for Hyper-V operations. -""" -import sys - -from nova.openstack.common import log as logging - -# Check needed for unit testing on Unix -if sys.platform == 'win32': - import wmi - -LOG = logging.getLogger(__name__) - - -class BaseOps(object): - def __init__(self): - self.__conn = None - self.__conn_v2 = None - self.__conn_cimv2 = None - self.__conn_wmi = None - self.__conn_storage = None - - @property - def _conn(self): - if self.__conn is None: - self.__conn = wmi.WMI(moniker='//./root/virtualization') - return self.__conn - - @property - def _conn_v2(self): - if self.__conn_v2 is None: - self.__conn_v2 = wmi.WMI(moniker='//./root/virtualization/v2') - return self.__conn_v2 - - @property - def _conn_cimv2(self): - if self.__conn_cimv2 is None: - self.__conn_cimv2 = wmi.WMI(moniker='//./root/cimv2') - return self.__conn_cimv2 - - @property - def _conn_wmi(self): - if self.__conn_wmi is None: - self.__conn_wmi = wmi.WMI(moniker='//./root/wmi') - return self.__conn_wmi - - @property - def _conn_storage(self): - if self.__conn_storage is None: - storage_namespace = '//./Root/Microsoft/Windows/Storage' - self.__conn_storage = wmi.WMI(moniker=storage_namespace) - return self.__conn_storage diff --git a/nova/virt/hyperv/basevolumeutils.py b/nova/virt/hyperv/basevolumeutils.py index 2352c3bef..34b15ea53 100644 --- a/nova/virt/hyperv/basevolumeutils.py +++ b/nova/virt/hyperv/basevolumeutils.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2012 Pedro Navarro Perez +# Copyright 2013 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -20,17 +21,18 @@ Helper methods for operations related to the management of volumes, and storage repositories """ +import abc import sys +if sys.platform == 'win32': + import _winreg + import wmi + from nova import block_device from nova.openstack.common import cfg from nova.openstack.common import log as logging from nova.virt import driver -# Check needed for unit testing on Unix -if sys.platform == 'win32': - import _winreg - LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.import_opt('my_ip', 'nova.netconf') @@ -38,25 +40,40 @@ CONF.import_opt('my_ip', 'nova.netconf') class BaseVolumeUtils(object): + def __init__(self): + if sys.platform == 'win32': + self._conn_wmi = wmi.WMI(moniker='//./root/wmi') + + @abc.abstractmethod + def login_storage_target(self, target_lun, target_iqn, target_portal): + pass + + @abc.abstractmethod + def logout_storage_target(self, target_iqn): + pass + + @abc.abstractmethod + def execute_log_out(self, session_id): + pass + def get_iscsi_initiator(self, cim_conn): """Get iscsi initiator name for this machine.""" computer_system = cim_conn.Win32_ComputerSystem()[0] hostname = computer_system.name - keypath = \ - r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\iSCSI\Discovery" + keypath = ("SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\" + "iSCSI\\Discovery") try: key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, keypath, 0, - _winreg.KEY_ALL_ACCESS) + _winreg.KEY_ALL_ACCESS) temp = _winreg.QueryValueEx(key, 'DefaultInitiatorName') initiator_name = str(temp[0]) _winreg.CloseKey(key) except Exception: LOG.info(_("The ISCSI initiator name can't be found. " - "Choosing the default one")) + "Choosing the default one")) computer_system = cim_conn.Win32_ComputerSystem()[0] - initiator_name = "iqn.1991-05.com.microsoft:" + \ - hostname.lower() + initiator_name = "iqn.1991-05.com.microsoft:" + hostname.lower() return { 'ip': CONF.my_ip, 'initiator': initiator_name, @@ -78,3 +95,33 @@ class BaseVolumeUtils(object): LOG.debug(_("block_device_list %s"), block_device_list) return block_device.strip_dev(mount_device) in block_device_list + + def _get_drive_number_from_disk_path(self, disk_path): + # TODO(pnavarro) replace with regex + start_device_id = disk_path.find('"', disk_path.find('DeviceID')) + end_device_id = disk_path.find('"', start_device_id + 1) + device_id = disk_path[start_device_id + 1:end_device_id] + return device_id[device_id.find("\\") + 2:] + + def get_session_id_from_mounted_disk(self, physical_drive_path): + drive_number = self._get_drive_number_from_disk_path( + physical_drive_path) + initiator_sessions = self._conn_wmi.query("SELECT * FROM " + "MSiSCSIInitiator_Session" + "Class") + for initiator_session in initiator_sessions: + devices = initiator_session.Devices + for device in devices: + device_number = str(device.DeviceNumber) + if device_number == drive_number: + return initiator_session.SessionId + + def get_device_number_for_target(self, target_iqn, target_lun): + initiator_session = self._conn_wmi.query("SELECT * FROM " + "MSiSCSIInitiator_Session" + "Class WHERE TargetName='%s'" + % target_iqn)[0] + devices = initiator_session.Devices + for device in devices: + if device.ScsiLun == target_lun: + return device.DeviceNumber diff --git a/nova/virt/hyperv/constants.py b/nova/virt/hyperv/constants.py index 4be569e88..31323f0f4 100644 --- a/nova/virt/hyperv/constants.py +++ b/nova/virt/hyperv/constants.py @@ -35,15 +35,6 @@ HYPERV_POWER_STATE = { HYPERV_VM_STATE_SUSPENDED: power_state.SUSPENDED } -REQ_POWER_STATE = { - 'Enabled': HYPERV_VM_STATE_ENABLED, - 'Disabled': HYPERV_VM_STATE_DISABLED, - 'Reboot': HYPERV_VM_STATE_REBOOT, - 'Reset': HYPERV_VM_STATE_RESET, - 'Paused': HYPERV_VM_STATE_PAUSED, - 'Suspended': HYPERV_VM_STATE_SUSPENDED, -} - WMI_WIN32_PROCESSOR_ARCHITECTURE = { 0: 'x86', 1: 'MIPS', diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py index 9316b2598..e8bf8c416 100644 --- a/nova/virt/hyperv/driver.py +++ b/nova/virt/hyperv/driver.py @@ -16,49 +16,7 @@ # under the License. """ -A connection to Hyper-V . -Uses Windows Management Instrumentation (WMI) calls to interact with Hyper-V -Hyper-V WMI usage: - http://msdn.microsoft.com/en-us/library/cc723875%28v=VS.85%29.aspx -The Hyper-V object model briefly: - The physical computer and its hosted virtual machines are each represented - by the Msvm_ComputerSystem class. - - Each virtual machine is associated with a - Msvm_VirtualSystemGlobalSettingData (vs_gs_data) instance and one or more - Msvm_VirtualSystemSettingData (vmsetting) instances. For each vmsetting - there is a series of Msvm_ResourceAllocationSettingData (rasd) objects. - The rasd objects describe the settings for each device in a VM. - Together, the vs_gs_data, vmsettings and rasds describe the configuration - of the virtual machine. - - Creating new resources such as disks and nics involves cloning a default - rasd object and appropriately modifying the clone and calling the - AddVirtualSystemResources WMI method - Changing resources such as memory uses the ModifyVirtualSystemResources - WMI method - -Using the Python WMI library: - Tutorial: - http://timgolden.me.uk/python/wmi/tutorial.html - Hyper-V WMI objects can be retrieved simply by using the class name - of the WMI object and optionally specifying a column to filter the - result set. More complex filters can be formed using WQL (sql-like) - queries. - The parameters and return tuples of WMI method calls can gleaned by - examining the doc string. For example: - >>> vs_man_svc.ModifyVirtualSystemResources.__doc__ - ModifyVirtualSystemResources (ComputerSystem, ResourceSettingData[]) - => (Job, ReturnValue)' - When passing setting data (ResourceSettingData) to the WMI method, - an XML representation of the data is passed in using GetText_(1). - Available methods on a service can be determined using method.keys(): - >>> vs_man_svc.methods.keys() - vmsettings and rasds for a vm can be retrieved using the 'associators' - method with the appropriate return class. - Long running WMI commands generally return a Job (an instance of - Msvm_ConcreteJob) whose state can be polled to determine when it finishes - +A Hyper-V Nova Compute driver. """ from nova.openstack.common import log as logging @@ -84,7 +42,7 @@ class HyperVDriver(driver.ComputeDriver): self._volumeops) def init_host(self, host): - self._host = host + pass def list_instances(self): return self._vmops.list_instances() @@ -92,7 +50,7 @@ class HyperVDriver(driver.ComputeDriver): def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None): self._vmops.spawn(context, instance, image_meta, injected_files, - admin_password, network_info, block_device_info) + admin_password, network_info, block_device_info) def reboot(self, instance, network_info, reboot_type, block_device_info=None): @@ -106,16 +64,12 @@ class HyperVDriver(driver.ComputeDriver): return self._vmops.get_info(instance) def attach_volume(self, connection_info, instance, mountpoint): - """Attach volume storage to VM instance.""" return self._volumeops.attach_volume(connection_info, - instance['name'], - mountpoint) + instance['name']) def detach_volume(self, connection_info, instance, mountpoint): - """Detach volume storage to VM instance.""" return self._volumeops.detach_volume(connection_info, - instance['name'], - mountpoint) + instance['name']) def get_volume_connector(self, instance): return self._volumeops.get_volume_connector(instance) @@ -151,30 +105,38 @@ class HyperVDriver(driver.ComputeDriver): self._vmops.power_on(instance) def live_migration(self, context, instance_ref, dest, post_method, - recover_method, block_migration=False, migrate_data=None): + recover_method, block_migration=False, + migrate_data=None): self._livemigrationops.live_migration(context, instance_ref, dest, - post_method, recover_method, block_migration, migrate_data) + post_method, recover_method, + block_migration, migrate_data) def compare_cpu(self, cpu_info): return self._livemigrationops.compare_cpu(cpu_info) def pre_live_migration(self, context, instance, block_device_info, - network_info, migrate_data=None): + network_info, migrate_data=None): self._livemigrationops.pre_live_migration(context, instance, - block_device_info, network_info) + block_device_info, + network_info) def post_live_migration_at_destination(self, ctxt, instance_ref, - network_info, block_migration, block_device_info=None): + network_info, + block_migr=False, + block_device_info=None): self._livemigrationops.post_live_migration_at_destination(ctxt, - instance_ref, network_info, block_migration) - - def check_can_live_migrate_destination(self, ctxt, instance, - src_compute_info, dst_compute_info, - block_migration, disk_over_commit): + instance_ref, + network_info, + block_migr) + + def check_can_live_migrate_destination(self, ctxt, instance_ref, + src_compute_info, dst_compute_info, + block_migration=False, + disk_over_commit=False): pass def check_can_live_migrate_destination_cleanup(self, ctxt, - dest_check_data): + dest_check_data): pass def check_can_live_migrate_source(self, ctxt, instance, dest_check_data): @@ -188,25 +150,21 @@ class HyperVDriver(driver.ComputeDriver): def ensure_filtering_rules_for_instance(self, instance_ref, network_info): LOG.debug(_("ensure_filtering_rules_for_instance called"), - instance=instance_ref) + instance=instance_ref) def unfilter_instance(self, instance, network_info): - """Stop filtering instance.""" LOG.debug(_("unfilter_instance called"), instance=instance) def confirm_migration(self, migration, instance, network_info): - """Confirms a resize, destroying the source VM.""" LOG.debug(_("confirm_migration called"), instance=instance) def finish_revert_migration(self, instance, network_info, block_device_info=None): - """Finish reverting a resize, powering back on the instance.""" LOG.debug(_("finish_revert_migration called"), instance=instance) def finish_migration(self, context, migration, instance, disk_info, - network_info, image_meta, resize_instance=False, - block_device_info=None): - """Completes a resize, turning on the migrated instance.""" + network_info, image_meta, resize_instance=False, + block_device_info=None): LOG.debug(_("finish_migration called"), instance=instance) def get_console_output(self, instance): diff --git a/nova/virt/hyperv/hostops.py b/nova/virt/hyperv/hostops.py index 5cbe46c1c..5a22b60de 100644 --- a/nova/virt/hyperv/hostops.py +++ b/nova/virt/hyperv/hostops.py @@ -18,25 +18,23 @@ """ Management class for host operations. """ -import ctypes -import multiprocessing import os import platform -from nova.openstack.common import cfg from nova.openstack.common import jsonutils from nova.openstack.common import log as logging -from nova.virt.hyperv import baseops from nova.virt.hyperv import constants +from nova.virt.hyperv import hostutils +from nova.virt.hyperv import pathutils -CONF = cfg.CONF LOG = logging.getLogger(__name__) -class HostOps(baseops.BaseOps): +class HostOps(object): def __init__(self): - super(HostOps, self).__init__() self._stats = None + self._hostutils = hostutils.HostUtils() + self._pathutils = pathutils.PathUtils() def _get_cpu_info(self): """Get the CPU information. @@ -44,94 +42,51 @@ class HostOps(baseops.BaseOps): of the central processor in the hypervisor. """ cpu_info = dict() - processor = self._conn_cimv2.query( - "SELECT * FROM Win32_Processor WHERE ProcessorType = 3") - cpu_info['arch'] = constants.WMI_WIN32_PROCESSOR_ARCHITECTURE\ - .get(processor[0].Architecture, 'Unknown') - cpu_info['model'] = processor[0].Name - cpu_info['vendor'] = processor[0].Manufacturer + processors = self._hostutils.get_cpus_info() + + w32_arch_dict = constants.WMI_WIN32_PROCESSOR_ARCHITECTURE + cpu_info['arch'] = w32_arch_dict.get(processors[0]['Architecture'], + 'Unknown') + cpu_info['model'] = processors[0]['Name'] + cpu_info['vendor'] = processors[0]['Manufacturer'] topology = dict() - topology['sockets'] = len(processor) - topology['cores'] = processor[0].NumberOfCores - topology['threads'] = processor[0].NumberOfLogicalProcessors\ - / processor[0].NumberOfCores + topology['sockets'] = len(processors) + topology['cores'] = processors[0]['NumberOfCores'] + topology['threads'] = (processors[0]['NumberOfLogicalProcessors'] / + processors[0]['NumberOfCores']) cpu_info['topology'] = topology features = list() for fkey, fname in constants.PROCESSOR_FEATURE.items(): - if ctypes.windll.kernel32.IsProcessorFeaturePresent(fkey): + if self._hostutils.is_cpu_feature_present(fkey): features.append(fname) cpu_info['features'] = features - return jsonutils.dumps(cpu_info) + return cpu_info - def _get_vcpu_total(self): - """Get vcpu number of physical computer. - :returns: the number of cpu core. - """ - # On certain platforms, this will raise a NotImplementedError. - try: - return multiprocessing.cpu_count() - except NotImplementedError: - LOG.warn(_("Cannot get the number of cpu, because this " - "function is not implemented for this platform. " - "This error can be safely ignored for now.")) - return 0 - - def _get_memory_mb_total(self): - """Get the total memory size(MB) of physical computer. - :returns: the total amount of memory(MB). - """ - total_kb = self._conn_cimv2.query( - "SELECT TotalVisibleMemorySize FROM win32_operatingsystem")[0]\ - .TotalVisibleMemorySize - total_mb = long(total_kb) / 1024 - return total_mb + def _get_memory_info(self): + (total_mem_kb, free_mem_kb) = self._hostutils.get_memory_info() + total_mem_mb = total_mem_kb / 1024 + free_mem_mb = free_mem_kb / 1024 + return (total_mem_mb, free_mem_mb, total_mem_mb - free_mem_mb) def _get_local_hdd_info_gb(self): - """Get the total and used size of the volume containing - CONF.instances_path expressed in GB. - :returns: - A tuple with the total and used space in GB. - """ - normalized_path = os.path.normpath(CONF.instances_path) - drive, path = os.path.splitdrive(normalized_path) - hdd_info = self._conn_cimv2.query( - ("SELECT FreeSpace,Size FROM win32_logicaldisk WHERE DeviceID='%s'" - ) % drive)[0] - total_gb = long(hdd_info.Size) / (1024 ** 3) - free_gb = long(hdd_info.FreeSpace) / (1024 ** 3) - used_gb = total_gb - free_gb - return total_gb, used_gb + (drive, _) = os.path.splitdrive(self._pathutils.get_instances_path()) + (size, free_space) = self._hostutils.get_volume_info(drive) - def _get_vcpu_used(self): - """Get vcpu usage number of physical computer. - :returns: The total number of vcpu that currently used. - """ - #TODO(jordanrinke) figure out a way to count assigned VCPUs - total_vcpu = 0 - return total_vcpu - - def _get_memory_mb_used(self): - """Get the free memory size(MB) of physical computer. - :returns: the total usage of memory(MB). - """ - total_kb = self._conn_cimv2.query( - "SELECT FreePhysicalMemory FROM win32_operatingsystem")[0]\ - .FreePhysicalMemory - total_mb = long(total_kb) / 1024 - - return total_mb + total_gb = size / (1024 ** 3) + free_gb = free_space / (1024 ** 3) + used_gb = total_gb - free_gb + return (total_gb, free_gb, used_gb) def _get_hypervisor_version(self): """Get hypervisor version. :returns: hypervisor version (ex. 12003) """ - version = self._conn_cimv2.Win32_OperatingSystem()[0]\ - .Version.replace('.', '') - LOG.info(_('Windows version: %s ') % version) + version = self._hostutils.get_windows_version().replace('.', '') + LOG.debug(_('Windows version: %s ') % version) return version def get_available_resource(self): @@ -143,36 +98,53 @@ class HostOps(baseops.BaseOps): :returns: dictionary describing resources """ - LOG.info(_('get_available_resource called')) - - local_gb, used_gb = self._get_local_hdd_info_gb() - dic = {'vcpus': self._get_vcpu_total(), - 'memory_mb': self._get_memory_mb_total(), - 'local_gb': local_gb, - 'vcpus_used': self._get_vcpu_used(), - 'memory_mb_used': self._get_memory_mb_used(), - 'local_gb_used': used_gb, + LOG.debug(_('get_available_resource called')) + + (total_mem_mb, + free_mem_mb, + used_mem_mb) = self._get_memory_info() + + (total_hdd_gb, + free_hdd_gb, + used_hdd_gb) = self._get_local_hdd_info_gb() + + cpu_info = self._get_cpu_info() + cpu_topology = cpu_info['topology'] + vcpus = (cpu_topology['sockets'] * + cpu_topology['cores'] * + cpu_topology['threads']) + + dic = {'vcpus': vcpus, + 'memory_mb': total_mem_mb, + 'memory_mb_used': used_mem_mb, + 'local_gb': total_hdd_gb, + 'local_gb_used': used_hdd_gb, 'hypervisor_type': "hyperv", 'hypervisor_version': self._get_hypervisor_version(), 'hypervisor_hostname': platform.node(), - 'cpu_info': self._get_cpu_info()} + 'vcpus_used': 0, + 'cpu_info': jsonutils.dumps(cpu_info)} return dic def _update_stats(self): LOG.debug(_("Updating host stats")) + (total_mem_mb, free_mem_mb, used_mem_mb) = self._get_memory_info() + (total_hdd_gb, + free_hdd_gb, + used_hdd_gb) = self._get_local_hdd_info_gb() + data = {} - data["disk_total"], data["disk_used"] = self._get_local_hdd_info_gb() - data["disk_available"] = data["disk_total"] - data["disk_used"] - data["host_memory_total"] = self._get_memory_mb_total() - data["host_memory_overhead"] = self._get_memory_mb_used() - data["host_memory_free"] = \ - data["host_memory_total"] - data["host_memory_overhead"] - data["host_memory_free_computed"] = data["host_memory_free"] - data["supported_instances"] = \ - [('i686', 'hyperv', 'hvm'), - ('x86_64', 'hyperv', 'hvm')] + data["disk_total"] = total_hdd_gb + data["disk_used"] = used_hdd_gb + data["disk_available"] = free_hdd_gb + data["host_memory_total"] = total_mem_mb + data["host_memory_overhead"] = used_mem_mb + data["host_memory_free"] = free_mem_mb + data["host_memory_free_computed"] = free_mem_mb + data["supported_instances"] = [('i686', 'hyperv', 'hvm'), + ('x86_64', 'hyperv', 'hvm')] data["hypervisor_hostname"] = platform.node() self._stats = data diff --git a/nova/virt/hyperv/hostutils.py b/nova/virt/hyperv/hostutils.py new file mode 100644 index 000000000..71f3bc5b2 --- /dev/null +++ b/nova/virt/hyperv/hostutils.py @@ -0,0 +1,74 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cloudbase Solutions Srl +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import ctypes +import sys + +if sys.platform == 'win32': + import wmi + +from nova.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class HostUtils(object): + def __init__(self): + if sys.platform == 'win32': + self._conn_cimv2 = wmi.WMI(moniker='//./root/cimv2') + + def get_cpus_info(self): + cpus = self._conn_cimv2.query("SELECT * FROM Win32_Processor " + "WHERE ProcessorType = 3") + cpus_list = [] + for cpu in cpus: + cpu_info = {'Architecture': cpu.Architecture, + 'Name': cpu.Name, + 'Manufacturer': cpu.Manufacturer, + 'NumberOfCores': cpu.NumberOfCores, + 'NumberOfLogicalProcessors': + cpu.NumberOfLogicalProcessors} + cpus_list.append(cpu_info) + return cpus_list + + def is_cpu_feature_present(self, feature_key): + return ctypes.windll.kernel32.IsProcessorFeaturePresent(feature_key) + + def get_memory_info(self): + """ + Returns a tuple with total visible memory and free physical memory + expressed in kB. + """ + mem_info = self._conn_cimv2.query("SELECT TotalVisibleMemorySize, " + "FreePhysicalMemory " + "FROM win32_operatingsystem")[0] + return (long(mem_info.TotalVisibleMemorySize), + long(mem_info.FreePhysicalMemory)) + + def get_volume_info(self, drive): + """ + Returns a tuple with total size and free space + expressed in bytes. + """ + logical_disk = self._conn_cimv2.query("SELECT Size, FreeSpace " + "FROM win32_logicaldisk " + "WHERE DeviceID='%s'" + % drive)[0] + return (long(logical_disk.Size), long(logical_disk.FreeSpace)) + + def get_windows_version(self): + return self._conn_cimv2.Win32_OperatingSystem()[0].Version diff --git a/nova/virt/hyperv/ioutils.py b/nova/virt/hyperv/ioutils.py deleted file mode 100644 index d927e317f..000000000 --- a/nova/virt/hyperv/ioutils.py +++ /dev/null @@ -1,26 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Utility class to ease the task of creating stubs of built in IO functions. -""" - -import __builtin__ - - -def open(name, mode): - return __builtin__.open(name, mode) diff --git a/nova/virt/hyperv/livemigrationops.py b/nova/virt/hyperv/livemigrationops.py index 232cbd660..8ee3005f1 100644 --- a/nova/virt/hyperv/livemigrationops.py +++ b/nova/virt/hyperv/livemigrationops.py @@ -19,144 +19,66 @@ Management class for live migration VM operations. """ import os -import sys -from nova import exception from nova.openstack.common import cfg from nova.openstack.common import excutils from nova.openstack.common import log as logging -from nova.virt.hyperv import baseops -from nova.virt.hyperv import constants +from nova.virt.hyperv import livemigrationutils +from nova.virt.hyperv import pathutils from nova.virt.hyperv import vmutils - -# Check needed for unit testing on Unix -if sys.platform == 'win32': - import wmi +from nova.virt import images LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.import_opt('use_cow_images', 'nova.virt.driver') -class LiveMigrationOps(baseops.BaseOps): +class LiveMigrationOps(object): def __init__(self, volumeops): - super(LiveMigrationOps, self).__init__() + self._pathutils = pathutils.PathUtils() self._vmutils = vmutils.VMUtils() + self._livemigrutils = livemigrationutils.LiveMigrationUtils() self._volumeops = volumeops - def _check_live_migration_config(self): - try: - self._conn_v2 - except Exception: - raise vmutils.HyperVException( - _('Live migration is not supported " \ - "by this version of Hyper-V')) - - migration_svc = self._conn_v2.Msvm_VirtualSystemMigrationService()[0] - vsmssd = migration_svc.associators( - wmi_association_class='Msvm_ElementSettingData', - wmi_result_class='Msvm_VirtualSystemMigrationServiceSettingData')[0] - if not vsmssd.EnableVirtualSystemMigration: - raise vmutils.HyperVException( - _('Live migration is not enabled on this host')) - if not migration_svc.MigrationServiceListenerIPAddressList: - raise vmutils.HyperVException( - _('Live migration networks are not configured on this host')) - def live_migration(self, context, instance_ref, dest, post_method, - recover_method, block_migration=False, migrate_data=None): + recover_method, block_migration=False, + migrate_data=None): LOG.debug(_("live_migration called"), instance=instance_ref) instance_name = instance_ref["name"] try: - self._check_live_migration_config() - - vm_name = self._vmutils.lookup(self._conn, instance_name) - if vm_name is None: - raise exception.InstanceNotFound(instance=instance_name) - vm = self._conn_v2.Msvm_ComputerSystem( - ElementName=instance_name)[0] - vm_settings = vm.associators( - wmi_association_class='Msvm_SettingsDefineState', - wmi_result_class='Msvm_VirtualSystemSettingData')[0] - - new_resource_setting_data = [] - sasds = vm_settings.associators( - wmi_association_class='Msvm_VirtualSystemSettingDataComponent', - wmi_result_class='Msvm_StorageAllocationSettingData') - for sasd in sasds: - if sasd.ResourceType == 31 and \ - sasd.ResourceSubType == \ - "Microsoft:Hyper-V:Virtual Hard Disk": - #sasd.PoolId = "" - new_resource_setting_data.append(sasd.GetText_(1)) - - LOG.debug(_("Getting live migration networks for remote " - "host: %s"), dest) - _conn_v2_remote = wmi.WMI( - moniker='//' + dest + '/root/virtualization/v2') - migration_svc_remote = \ - _conn_v2_remote.Msvm_VirtualSystemMigrationService()[0] - remote_ip_address_list = \ - migration_svc_remote.MigrationServiceListenerIPAddressList - - # VirtualSystemAndStorage - vsmsd = self._conn_v2.query("select * from " - "Msvm_VirtualSystemMigrationSettingData " - "where MigrationType = 32771")[0] - vsmsd.DestinationIPAddressList = remote_ip_address_list - migration_setting_data = vsmsd.GetText_(1) - - migration_svc =\ - self._conn_v2.Msvm_VirtualSystemMigrationService()[0] - - LOG.debug(_("Starting live migration for instance: %s"), - instance_name) - (job_path, ret_val) = migration_svc.MigrateVirtualSystemToHost( - ComputerSystem=vm.path_(), - DestinationHost=dest, - MigrationSettingData=migration_setting_data, - NewResourceSettingData=new_resource_setting_data) - if ret_val == constants.WMI_JOB_STATUS_STARTED: - success = self._vmutils.check_job_status(job_path) - else: - success = (ret_val == 0) - if not success: - raise vmutils.HyperVException( - _('Failed to live migrate VM %s') % instance_name) + self._livemigrutils.live_migrate_vm(instance_name, dest) except Exception: with excutils.save_and_reraise_exception(): LOG.debug(_("Calling live migration recover_method " - "for instance: %s"), instance_name) + "for instance: %s"), instance_name) recover_method(context, instance_ref, dest, block_migration) LOG.debug(_("Calling live migration post_method for instance: %s"), - instance_name) + instance_name) post_method(context, instance_ref, dest, block_migration) def pre_live_migration(self, context, instance, block_device_info, - network_info): + network_info): LOG.debug(_("pre_live_migration called"), instance=instance) - self._check_live_migration_config() + self._livemigrutils.check_live_migration_config() if CONF.use_cow_images: ebs_root = self._volumeops.volume_in_mapping( self._volumeops.get_default_root_device(), block_device_info) if not ebs_root: - base_vhd_path = self._vmutils.get_base_vhd_path( + base_vhd_path = self._pathutils.get_base_vhd_path( instance["image_ref"]) if not os.path.exists(base_vhd_path): - self._vmutils.fetch_image(base_vhd_path, context, - instance["image_ref"], - instance["user_id"], - instance["project_id"]) + images.fetch(context, instance["image_ref"], base_vhd_path, + instance["user_id"], instance["project_id"]) def post_live_migration_at_destination(self, ctxt, instance_ref, - network_info, block_migration): + network_info, block_migration): LOG.debug(_("post_live_migration_at_destination called"), - instance=instance_ref) + instance=instance_ref) def compare_cpu(self, cpu_info): LOG.debug(_("compare_cpu called %s"), cpu_info) diff --git a/nova/virt/hyperv/livemigrationutils.py b/nova/virt/hyperv/livemigrationutils.py new file mode 100644 index 000000000..6af4f0fa5 --- /dev/null +++ b/nova/virt/hyperv/livemigrationutils.py @@ -0,0 +1,115 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cloudbase Solutions Srl +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +if sys.platform == 'win32': + import wmi + +from nova.openstack.common import log as logging +from nova.virt.hyperv import vmutils + +LOG = logging.getLogger(__name__) + + +class LiveMigrationUtils(object): + + def __init__(self): + self._vmutils = vmutils.VMUtils() + + def _get_conn_v2(self, host='localhost'): + try: + return wmi.WMI(moniker='//%s/root/virtualization/v2' % host) + except wmi.x_wmi as ex: + LOG.exception(ex) + if ex.com_error.hresult == -2147217394: + msg = (_('Live migration is not supported on target host "%s"') + % host) + elif ex.com_error.hresult == -2147023174: + msg = (_('Target live migration host "%s" is unreachable') + % host) + else: + msg = _('Live migration failed: %s') % ex.message + raise vmutils.HyperVException(msg) + + def check_live_migration_config(self): + conn_v2 = self._get_conn_v2() + migration_svc = conn_v2.Msvm_VirtualSystemMigrationService()[0] + vsmssds = migration_svc.associators( + wmi_association_class='Msvm_ElementSettingData', + wmi_result_class='Msvm_VirtualSystemMigrationServiceSettingData') + vsmssd = vsmssds[0] + if not vsmssd.EnableVirtualSystemMigration: + raise vmutils.HyperVException( + _('Live migration is not enabled on this host')) + if not migration_svc.MigrationServiceListenerIPAddressList: + raise vmutils.HyperVException( + _('Live migration networks are not configured on this host')) + + def _get_vm(self, conn_v2, vm_name): + vms = conn_v2.Msvm_ComputerSystem(ElementName=vm_name) + n = len(vms) + if not n: + raise vmutils.HyperVException(_('VM not found: %s') % vm_name) + elif n > 1: + raise vmutils.HyperVException(_('Duplicate VM name found: %s') + % vm_name) + return vms[0] + + def live_migrate_vm(self, vm_name, dest_host): + self.check_live_migration_config() + + # We need a v2 namespace VM object + conn_v2_local = self._get_conn_v2() + + vm = self._get_vm(conn_v2_local, vm_name) + vm_settings = vm.associators( + wmi_association_class='Msvm_SettingsDefineState', + wmi_result_class='Msvm_VirtualSystemSettingData')[0] + + new_resource_setting_data = [] + sasds = vm_settings.associators( + wmi_association_class='Msvm_VirtualSystemSettingDataComponent', + wmi_result_class='Msvm_StorageAllocationSettingData') + for sasd in sasds: + if (sasd.ResourceType == 31 and sasd.ResourceSubType == + "Microsoft:Hyper-V:Virtual Hard Disk"): + #sasd.PoolId = "" + new_resource_setting_data.append(sasd.GetText_(1)) + + LOG.debug(_("Getting live migration networks for remote host: %s"), + dest_host) + conn_v2_remote = self._get_conn_v2(dest_host) + migr_svc_rmt = conn_v2_remote.Msvm_VirtualSystemMigrationService()[0] + rmt_ip_addr_list = migr_svc_rmt.MigrationServiceListenerIPAddressList + + # VirtualSystemAndStorage + vsmsd = conn_v2_local.query("select * from " + "Msvm_VirtualSystemMigrationSettingData " + "where MigrationType = 32771")[0] + vsmsd.DestinationIPAddressList = rmt_ip_addr_list + migration_setting_data = vsmsd.GetText_(1) + + migr_svc = conn_v2_local.Msvm_VirtualSystemMigrationService()[0] + + LOG.debug(_("Starting live migration for VM: %s"), vm_name) + (job_path, ret_val) = migr_svc.MigrateVirtualSystemToHost( + ComputerSystem=vm.path_(), + DestinationHost=dest_host, + MigrationSettingData=migration_setting_data, + NewResourceSettingData=new_resource_setting_data) + self._vmutils.check_ret_val(ret_val, job_path) diff --git a/nova/virt/hyperv/networkutils.py b/nova/virt/hyperv/networkutils.py new file mode 100644 index 000000000..4e1f68685 --- /dev/null +++ b/nova/virt/hyperv/networkutils.py @@ -0,0 +1,62 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Cloudbase Solutions Srl +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Utility class for network related operations. +""" + +import sys +import uuid + +if sys.platform == 'win32': + import wmi + +from nova.virt.hyperv import vmutils + + +class NetworkUtils(object): + def __init__(self): + if sys.platform == 'win32': + self._conn = wmi.WMI(moniker='//./root/virtualization') + + def get_external_vswitch(self, vswitch_name): + if vswitch_name: + vswitches = self._conn.Msvm_VirtualSwitch(ElementName=vswitch_name) + else: + # Find the vswitch that is connected to the first physical nic. + ext_port = self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')[0] + port = ext_port.associators(wmi_result_class='Msvm_SwitchPort')[0] + vswitches = port.associators(wmi_result_class='Msvm_VirtualSwitch') + + if not len(vswitches): + raise vmutils.HyperVException(_('vswitch "%s" not found') + % vswitch_name) + return vswitches[0].path_() + + def create_vswitch_port(self, vswitch_path, port_name): + switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0] + #Create a port on the vswitch. + (new_port, ret_val) = switch_svc.CreateSwitchPort( + Name=str(uuid.uuid4()), + FriendlyName=port_name, + ScopeOfResidence="", + VirtualSwitch=vswitch_path) + if ret_val != 0: + raise vmutils.HyperVException(_("Failed to create vswitch port " + "%(port_name)s on switch " + "%(vswitch_path)s") % locals()) + return new_port diff --git a/nova/virt/hyperv/pathutils.py b/nova/virt/hyperv/pathutils.py new file mode 100644 index 000000000..7bc2e7ac2 --- /dev/null +++ b/nova/virt/hyperv/pathutils.py @@ -0,0 +1,67 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Cloudbase Solutions Srl +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import shutil + +from nova.openstack.common import cfg +from nova.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +CONF = cfg.CONF +CONF.import_opt('instances_path', 'nova.compute.manager') + + +class PathUtils(object): + def open(self, path, mode): + """Wrapper on __builin__.open used to simplify unit testing.""" + import __builtin__ + return __builtin__.open(path, mode) + + def get_instances_path(self): + return os.path.normpath(CONF.instances_path) + + def get_instance_path(self, instance_name): + instance_path = os.path.join(self.get_instances_path(), instance_name) + if not os.path.exists(instance_path): + LOG.debug(_('Creating folder %s '), instance_path) + os.makedirs(instance_path) + return instance_path + + def get_vhd_path(self, instance_name): + instance_path = self.get_instance_path(instance_name) + return os.path.join(instance_path, instance_name + ".vhd") + + def get_base_vhd_path(self, image_name): + base_dir = os.path.join(self.get_instances_path(), '_base') + if not os.path.exists(base_dir): + os.makedirs(base_dir) + return os.path.join(base_dir, image_name + ".vhd") + + def make_export_path(self, instance_name): + export_folder = os.path.join(self.get_instances_path(), "export", + instance_name) + if os.path.isdir(export_folder): + LOG.debug(_('Removing existing folder %s '), export_folder) + shutil.rmtree(export_folder) + LOG.debug(_('Creating folder %s '), export_folder) + os.makedirs(export_folder) + return export_folder + + def vhd_exists(self, path): + return os.path.exists(path) diff --git a/nova/virt/hyperv/snapshotops.py b/nova/virt/hyperv/snapshotops.py index cdc6e45a4..c43f59b70 100644 --- a/nova/virt/hyperv/snapshotops.py +++ b/nova/virt/hyperv/snapshotops.py @@ -20,173 +20,97 @@ Management class for VM snapshot operations. """ import os import shutil -import sys from nova.compute import task_states -from nova import exception from nova.image import glance from nova.openstack.common import cfg from nova.openstack.common import log as logging -from nova.virt.hyperv import baseops -from nova.virt.hyperv import constants -from nova.virt.hyperv import ioutils +from nova.virt.hyperv import pathutils +from nova.virt.hyperv import vhdutils from nova.virt.hyperv import vmutils -from xml.etree import ElementTree - -# Check needed for unit testing on Unix -if sys.platform == 'win32': - import wmi CONF = cfg.CONF LOG = logging.getLogger(__name__) -class SnapshotOps(baseops.BaseOps): +class SnapshotOps(object): def __init__(self): - super(SnapshotOps, self).__init__() + self._pathutils = pathutils.PathUtils() self._vmutils = vmutils.VMUtils() + self._vhdutils = vhdutils.VHDUtils() + + def _save_glance_image(self, context, name, image_vhd_path): + (glance_image_service, + image_id) = glance.get_remote_image_service(context, name) + image_metadata = {"is_public": False, + "disk_format": "vhd", + "container_format": "bare", + "properties": {}} + with self._pathutils.open(image_vhd_path, 'rb') as f: + glance_image_service.update(context, image_id, image_metadata, f) def snapshot(self, context, instance, name, update_task_state): """Create snapshot from a running VM instance.""" instance_name = instance["name"] - vm = self._vmutils.lookup(self._conn, instance_name) - if vm is None: - raise exception.InstanceNotFound(instance=instance_name) - vm = self._conn.Msvm_ComputerSystem(ElementName=instance_name)[0] - vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] LOG.debug(_("Creating snapshot for instance %s"), instance_name) - (job_path, ret_val, snap_setting_data) = \ - vs_man_svc.CreateVirtualSystemSnapshot(vm.path_()) - if ret_val == constants.WMI_JOB_STATUS_STARTED: - success = self._vmutils.check_job_status(job_path) - if success: - job_wmi_path = job_path.replace('\\', '/') - job = wmi.WMI(moniker=job_wmi_path) - snap_setting_data = job.associators( - wmi_result_class='Msvm_VirtualSystemSettingData')[0] - else: - success = (ret_val == 0) - if not success: - raise vmutils.HyperVException( - _('Failed to create snapshot for VM %s') % - instance_name) - else: - update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD) + snapshot_path = self._vmutils.take_vm_snapshot(instance_name) + update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD) export_folder = None - f = None try: - src_vhd_path = os.path.join(CONF.instances_path, instance_name, - instance_name + ".vhd") - - image_man_svc = self._conn.Msvm_ImageManagementService()[0] + src_vhd_path = self._pathutils.get_vhd_path(instance_name) LOG.debug(_("Getting info for VHD %s"), src_vhd_path) - (src_vhd_info, job_path, ret_val) = \ - image_man_svc.GetVirtualHardDiskInfo(src_vhd_path) - if ret_val == constants.WMI_JOB_STATUS_STARTED: - success = self._vmutils.check_job_status(job_path) - else: - success = (ret_val == 0) - if not success: - raise vmutils.HyperVException( - _("Failed to get info for disk %s") % - (src_vhd_path)) + src_base_disk_path = self._vhdutils.get_vhd_parent_path( + src_vhd_path) - src_base_disk_path = None - et = ElementTree.fromstring(src_vhd_info) - for item in et.findall("PROPERTY"): - if item.attrib["NAME"] == "ParentPath": - src_base_disk_path = item.find("VALUE").text - break - - export_folder = self._vmutils.make_export_path(instance_name) + export_folder = self._pathutils.make_export_path(instance_name) dest_vhd_path = os.path.join(export_folder, os.path.basename( src_vhd_path)) LOG.debug(_('Copying VHD %(src_vhd_path)s to %(dest_vhd_path)s'), - locals()) + locals()) shutil.copyfile(src_vhd_path, dest_vhd_path) image_vhd_path = None if not src_base_disk_path: image_vhd_path = dest_vhd_path else: - dest_base_disk_path = os.path.join(export_folder, - os.path.basename(src_base_disk_path)) + basename = os.path.basename(src_base_disk_path) + dest_base_disk_path = os.path.join(export_folder, basename) LOG.debug(_('Copying base disk %(src_vhd_path)s to ' - '%(dest_base_disk_path)s'), locals()) + '%(dest_base_disk_path)s'), locals()) shutil.copyfile(src_base_disk_path, dest_base_disk_path) LOG.debug(_("Reconnecting copied base VHD " - "%(dest_base_disk_path)s and diff VHD %(dest_vhd_path)s"), - locals()) - (job_path, ret_val) = \ - image_man_svc.ReconnectParentVirtualHardDisk( - ChildPath=dest_vhd_path, - ParentPath=dest_base_disk_path, - Force=True) - if ret_val == constants.WMI_JOB_STATUS_STARTED: - success = self._vmutils.check_job_status(job_path) - else: - success = (ret_val == 0) - if not success: - raise vmutils.HyperVException( - _("Failed to reconnect base disk " - "%(dest_base_disk_path)s and diff disk " - "%(dest_vhd_path)s") % - locals()) + "%(dest_base_disk_path)s and diff " + "VHD %(dest_vhd_path)s"), locals()) + self._vhdutils.reconnect_parent_vhd(dest_vhd_path, + dest_base_disk_path) LOG.debug(_("Merging base disk %(dest_base_disk_path)s and " - "diff disk %(dest_vhd_path)s"), - locals()) - (job_path, ret_val) = image_man_svc.MergeVirtualHardDisk( - SourcePath=dest_vhd_path, - DestinationPath=dest_base_disk_path) - if ret_val == constants.WMI_JOB_STATUS_STARTED: - success = self._vmutils.check_job_status(job_path) - else: - success = (ret_val == 0) - if not success: - raise vmutils.HyperVException( - _("Failed to merge base disk %(dest_base_disk_path)s " - "and diff disk %(dest_vhd_path)s") % - locals()) + "diff disk %(dest_vhd_path)s"), locals()) + self._vhdutils.merge_vhd(dest_vhd_path, dest_base_disk_path) image_vhd_path = dest_base_disk_path - (glance_image_service, image_id) = \ - glance.get_remote_image_service(context, name) - image_metadata = {"is_public": False, - "disk_format": "vhd", - "container_format": "bare", - "properties": {}} - f = ioutils.open(image_vhd_path, 'rb') - LOG.debug( - _("Updating Glance image %(image_id)s with content from " - "merged disk %(image_vhd_path)s"), - locals()) + LOG.debug(_("Updating Glance image %(image_id)s with content from " + "merged disk %(image_vhd_path)s"), locals()) update_task_state(task_state=task_states.IMAGE_UPLOADING, expected_state=task_states.IMAGE_PENDING_UPLOAD) - glance_image_service.update(context, image_id, image_metadata, f) + self._save_glance_image(context, name, image_vhd_path) LOG.debug(_("Snapshot image %(image_id)s updated for VM " - "%(instance_name)s"), locals()) + "%(instance_name)s"), locals()) finally: - LOG.debug(_("Removing snapshot %s"), name) - (job_path, ret_val) = vs_man_svc.RemoveVirtualSystemSnapshot( - snap_setting_data.path_()) - if ret_val == constants.WMI_JOB_STATUS_STARTED: - success = self._vmutils.check_job_status(job_path) - else: - success = (ret_val == 0) - if not success: - raise vmutils.HyperVException( - _('Failed to remove snapshot for VM %s') % - instance_name) - if f: - f.close() + try: + LOG.debug(_("Removing snapshot %s"), name) + self._vmutils.remove_vm_snapshot(snapshot_path) + except Exception as ex: + LOG.exception(ex) + LOG.warning(_('Failed to remove snapshot for VM %s') + % instance_name) if export_folder: LOG.debug(_('Removing folder %s '), export_folder) shutil.rmtree(export_folder) diff --git a/nova/virt/hyperv/vhdutils.py b/nova/virt/hyperv/vhdutils.py new file mode 100644 index 000000000..21c4b4a6d --- /dev/null +++ b/nova/virt/hyperv/vhdutils.py @@ -0,0 +1,72 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Cloudbase Solutions Srl +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +if sys.platform == 'win32': + import wmi + +from nova.virt.hyperv import vmutils +from xml.etree import ElementTree + + +class VHDUtils(object): + + def __init__(self): + self._vmutils = vmutils.VMUtils() + if sys.platform == 'win32': + self._conn = wmi.WMI(moniker='//./root/virtualization') + + def create_differencing_vhd(self, path, parent_path): + image_man_svc = self._conn.Msvm_ImageManagementService()[0] + + (job_path, ret_val) = image_man_svc.CreateDifferencingVirtualHardDisk( + Path=path, ParentPath=parent_path) + self._vmutils.check_ret_val(ret_val, job_path) + + def reconnect_parent_vhd(self, child_vhd_path, parent_vhd_path): + image_man_svc = self._conn.Msvm_ImageManagementService()[0] + + (job_path, ret_val) = image_man_svc.ReconnectParentVirtualHardDisk( + ChildPath=child_vhd_path, + ParentPath=parent_vhd_path, + Force=True) + self._vmutils.check_ret_val(ret_val, job_path) + + def merge_vhd(self, src_vhd_path, dest_vhd_path): + image_man_svc = self._conn.Msvm_ImageManagementService()[0] + + (job_path, ret_val) = image_man_svc.MergeVirtualHardDisk( + SourcePath=src_vhd_path, + DestinationPath=dest_vhd_path) + self._vmutils.check_ret_val(ret_val, job_path) + + def get_vhd_parent_path(self, vhd_path): + image_man_svc = self._conn.Msvm_ImageManagementService()[0] + + (vhd_info, + job_path, + ret_val) = image_man_svc.GetVirtualHardDiskInfo(vhd_path) + self._vmutils.check_ret_val(ret_val, job_path) + + base_disk_path = None + et = ElementTree.fromstring(vhd_info) + for item in et.findall("PROPERTY"): + if item.attrib["NAME"] == "ParentPath": + base_disk_path = item.find("VALUE").text + break + return base_disk_path diff --git a/nova/virt/hyperv/vif.py b/nova/virt/hyperv/vif.py index e01006eaa..cfe7c6a4c 100644 --- a/nova/virt/hyperv/vif.py +++ b/nova/virt/hyperv/vif.py @@ -15,17 +15,12 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -import abc -import sys -import uuid - -# Check needed for unit testing on Unix -if sys.platform == 'win32': - import wmi +import abc from nova.openstack.common import cfg from nova.openstack.common import log as logging +from nova.virt.hyperv import networkutils from nova.virt.hyperv import vmutils @@ -70,65 +65,17 @@ class HyperVNovaNetworkVIFDriver(HyperVBaseVIFDriver): def __init__(self): self._vmutils = vmutils.VMUtils() - self._conn = wmi.WMI(moniker='//./root/virtualization') - - def _find_external_network(self): - """Find the vswitch that is connected to the physical nic. - Assumes only one physical nic on the host - """ - #If there are no physical nics connected to networks, return. - LOG.debug(_("Attempting to bind NIC to %s ") - % CONF.vswitch_name) - if CONF.vswitch_name: - LOG.debug(_("Attempting to bind NIC to %s ") - % CONF.vswitch_name) - bound = self._conn.Msvm_VirtualSwitch( - ElementName=CONF.vswitch_name) - else: - LOG.debug(_("No vSwitch specified, attaching to default")) - self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE') - if len(bound) == 0: - return None - if CONF.vswitch_name: - return self._conn.Msvm_VirtualSwitch( - ElementName=CONF.vswitch_name)[0]\ - .associators(wmi_result_class='Msvm_SwitchPort')[0]\ - .associators(wmi_result_class='Msvm_VirtualSwitch')[0] - else: - return self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')\ - .associators(wmi_result_class='Msvm_SwitchPort')[0]\ - .associators(wmi_result_class='Msvm_VirtualSwitch')[0] + self._netutils = networkutils.NetworkUtils() def plug(self, instance, vif): - extswitch = self._find_external_network() - if extswitch is None: - raise vmutils.HyperVException(_('Cannot find vSwitch')) + vswitch_path = self._netutils.get_external_vswitch( + CONF.vswitch_name) vm_name = instance['name'] - - nic_data = self._conn.Msvm_SyntheticEthernetPortSettingData( - ElementName=vif['id'])[0] - - switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0] - #Create a port on the vswitch. - (new_port, ret_val) = switch_svc.CreateSwitchPort( - Name=str(uuid.uuid4()), - FriendlyName=vm_name, - ScopeOfResidence="", - VirtualSwitch=extswitch.path_()) - if ret_val != 0: - LOG.error(_('Failed creating a port on the external vswitch')) - raise vmutils.HyperVException(_('Failed creating port for %s') % - vm_name) - ext_path = extswitch.path_() - LOG.debug(_("Created switch port %(vm_name)s on switch %(ext_path)s") - % locals()) - - vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name) - vm = vms[0] - - nic_data.Connection = [new_port] - self._vmutils.modify_virt_resource(self._conn, nic_data, vm) + LOG.debug(_('Creating vswitch port for instance: %s') % vm_name) + vswitch_port = self._netutils.create_vswitch_port(vswitch_path, + vm_name) + self._vmutils.set_nic_connection(vm_name, vif['id'], vswitch_port) def unplug(self, instance, vif): #TODO(alepilotti) Not implemented diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py index 3d8958266..8ce1d508b 100644 --- a/nova/virt/hyperv/vmops.py +++ b/nova/virt/hyperv/vmops.py @@ -19,7 +19,6 @@ Management class for basic VM operations. """ import os -import uuid from nova.api.metadata import base as instance_metadata from nova import exception @@ -29,29 +28,31 @@ from nova.openstack.common import lockutils from nova.openstack.common import log as logging from nova import utils from nova.virt import configdrive -from nova.virt.hyperv import baseops from nova.virt.hyperv import constants +from nova.virt.hyperv import pathutils +from nova.virt.hyperv import vhdutils from nova.virt.hyperv import vmutils +from nova.virt import images LOG = logging.getLogger(__name__) hyperv_opts = [ cfg.BoolOpt('limit_cpu_features', - default=False, - help='Required for live migration among ' - 'hosts with different CPU features'), + default=False, + help='Required for live migration among ' + 'hosts with different CPU features'), cfg.BoolOpt('config_drive_inject_password', - default=False, - help='Sets the admin password in the config drive image'), + default=False, + help='Sets the admin password in the config drive image'), cfg.StrOpt('qemu_img_cmd', default="qemu-img.exe", help='qemu-img is used to convert between ' 'different image types'), cfg.BoolOpt('config_drive_cdrom', - default=False, - help='Attaches the Config Drive image as a cdrom drive ' - 'instead of a disk drive') - ] + default=False, + help='Attaches the Config Drive image as a cdrom drive ' + 'instead of a disk drive') +] CONF = cfg.CONF CONF.register_opts(hyperv_opts) @@ -59,19 +60,20 @@ CONF.import_opt('use_cow_images', 'nova.virt.driver') CONF.import_opt('network_api_class', 'nova.network') -class VMOps(baseops.BaseOps): +class VMOps(object): _vif_driver_class_map = { 'nova.network.quantumv2.api.API': - 'nova.virt.hyperv.vif.HyperVQuantumVIFDriver', + 'nova.virt.hyperv.vif.HyperVQuantumVIFDriver', 'nova.network.api.API': - 'nova.virt.hyperv.vif.HyperVNovaNetworkVIFDriver', + 'nova.virt.hyperv.vif.HyperVNovaNetworkVIFDriver', } def __init__(self, volumeops): - super(VMOps, self).__init__() - self._vmutils = vmutils.VMUtils() + self._vhdutils = vhdutils.VHDUtils() + self._pathutils = pathutils.PathUtils() self._volumeops = volumeops + self._vif_driver = None self._load_vif_driver_class() def _load_vif_driver_class(self): @@ -84,124 +86,106 @@ class VMOps(baseops.BaseOps): CONF.network_api_class) def list_instances(self): - """Return the names of all the instances known to Hyper-V.""" - vms = [v.ElementName - for v in self._conn.Msvm_ComputerSystem(['ElementName'], - Caption="Virtual Machine")] - return vms + return self._vmutils.list_instances() def get_info(self, instance): """Get information about the VM.""" LOG.debug(_("get_info called for instance"), instance=instance) - return self._get_info(instance['name']) - - def _get_info(self, instance_name): - vm = self._vmutils.lookup(self._conn, instance_name) - if vm is None: - raise exception.InstanceNotFound(instance=instance_name) - vm = self._conn.Msvm_ComputerSystem( - ElementName=instance_name)[0] - vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] - vmsettings = vm.associators( - wmi_association_class='Msvm_SettingsDefineState', - wmi_result_class='Msvm_VirtualSystemSettingData') - settings_paths = [v.path_() for v in vmsettings] - #See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx - summary_info = vs_man_svc.GetSummaryInformation( - [constants.VM_SUMMARY_NUM_PROCS, - constants.VM_SUMMARY_ENABLED_STATE, - constants.VM_SUMMARY_MEMORY_USAGE, - constants.VM_SUMMARY_UPTIME], - settings_paths)[1] - info = summary_info[0] - - LOG.debug(_("hyperv vm state: %s"), info.EnabledState) - state = constants.HYPERV_POWER_STATE[info.EnabledState] - memusage = str(info.MemoryUsage) - numprocs = str(info.NumberOfProcessors) - uptime = str(info.UpTime) - - LOG.debug(_("Got Info for vm %(instance_name)s: state=%(state)d," - " mem=%(memusage)s, num_cpu=%(numprocs)s," - " uptime=%(uptime)s"), locals()) + instance_name = instance['name'] + if not self._vmutils.vm_exists(instance_name): + raise exception.InstanceNotFound(instance=instance) + + info = self._vmutils.get_vm_summary_info(instance_name) + + state = constants.HYPERV_POWER_STATE[info['EnabledState']] return {'state': state, - 'max_mem': info.MemoryUsage, - 'mem': info.MemoryUsage, - 'num_cpu': info.NumberOfProcessors, - 'cpu_time': info.UpTime} + 'max_mem': info['MemoryUsage'], + 'mem': info['MemoryUsage'], + 'num_cpu': info['NumberOfProcessors'], + 'cpu_time': info['UpTime']} def spawn(self, context, instance, image_meta, injected_files, - admin_password, network_info, block_device_info=None): + admin_password, network_info, block_device_info=None): """Create a new VM and start it.""" - vm = self._vmutils.lookup(self._conn, instance['name']) - if vm is not None: - raise exception.InstanceExists(name=instance['name']) + + instance_name = instance['name'] + if self._vmutils.vm_exists(instance_name): + raise exception.InstanceExists(name=instance_name) ebs_root = self._volumeops.volume_in_mapping( self._volumeops.get_default_root_device(), - block_device_info) + block_device_info) #If is not a boot from volume spawn if not (ebs_root): #Fetch the file, assume it is a VHD file. - vhdfile = self._vmutils.get_vhd_path(instance['name']) + vhdfile = self._pathutils.get_vhd_path(instance_name) try: - self._cache_image(fn=self._vmutils.fetch_image, - context=context, - target=vhdfile, - fname=instance['image_ref'], - image_id=instance['image_ref'], - user=instance['user_id'], - project=instance['project_id'], - cow=CONF.use_cow_images) + self._cache_image(fn=self._fetch_image, + context=context, + target=vhdfile, + fname=instance['image_ref'], + image_id=instance['image_ref'], + user=instance['user_id'], + project=instance['project_id'], + cow=CONF.use_cow_images) except Exception as exn: LOG.exception(_('cache image failed: %s'), exn) - self.destroy(instance) + raise try: - self._create_vm(instance) + self._vmutils.create_vm(instance_name, + instance['memory_mb'], + instance['vcpus'], + CONF.limit_cpu_features) if not ebs_root: - self._attach_ide_drive(instance['name'], vhdfile, 0, 0, - constants.IDE_DISK) + self._vmutils.attach_ide_drive(instance_name, + vhdfile, + 0, + 0, + constants.IDE_DISK) else: self._volumeops.attach_boot_volume(block_device_info, - instance['name']) + instance_name) - #A SCSI controller for volumes connection is created - self._create_scsi_controller(instance['name']) + self._vmutils.create_scsi_controller(instance_name) for vif in network_info: - self._create_nic(instance['name'], vif) + LOG.debug(_('Creating nic for instance: %s'), instance_name) + self._vmutils.create_nic(instance_name, + vif['id'], + vif['address']) self._vif_driver.plug(instance, vif) if configdrive.required_by(instance): self._create_config_drive(instance, injected_files, - admin_password) + admin_password) - LOG.debug(_('Starting VM %s '), instance['name']) - self._set_vm_state(instance['name'], 'Enabled') - LOG.info(_('Started VM %s '), instance['name']) - except Exception as exn: - LOG.exception(_('spawn vm failed: %s'), exn) + self._set_vm_state(instance_name, + constants.HYPERV_VM_STATE_ENABLED) + except Exception as ex: + LOG.exception(ex) self.destroy(instance) - raise exn + raise vmutils.HyperVException(_('Spawn instance failed')) def _create_config_drive(self, instance, injected_files, admin_password): if CONF.config_drive_format != 'iso9660': vmutils.HyperVException(_('Invalid config_drive_format "%s"') % - CONF.config_drive_format) + CONF.config_drive_format) + + LOG.info(_('Using config drive for instance: %s'), instance=instance) - LOG.info(_('Using config drive'), instance=instance) extra_md = {} if admin_password and CONF.config_drive_inject_password: extra_md['admin_pass'] = admin_password inst_md = instance_metadata.InstanceMetadata(instance, - content=injected_files, extra_md=extra_md) + content=injected_files, + extra_md=extra_md) - instance_path = self._vmutils.get_instance_path( + instance_path = self._pathutils.get_instance_path( instance['name']) configdrive_path_iso = os.path.join(instance_path, 'configdrive.iso') LOG.info(_('Creating config drive at %(path)s'), @@ -218,7 +202,7 @@ class VMOps(baseops.BaseOps): if not CONF.config_drive_cdrom: drive_type = constants.IDE_DISK configdrive_path = os.path.join(instance_path, - 'configdrive.vhd') + 'configdrive.vhd') utils.execute(CONF.qemu_img_cmd, 'convert', '-f', @@ -233,302 +217,88 @@ class VMOps(baseops.BaseOps): drive_type = constants.IDE_DVD configdrive_path = configdrive_path_iso - self._attach_ide_drive(instance['name'], configdrive_path, 1, 0, - drive_type) - - def _create_vm(self, instance): - """Create a VM but don't start it.""" - vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + self._vmutils.attach_ide_drive(instance['name'], configdrive_path, + 1, 0, drive_type) - vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new() - vs_gs_data.ElementName = instance["name"] - (job, ret_val) = vs_man_svc.DefineVirtualSystem( - [], None, vs_gs_data.GetText_(1))[1:] - if ret_val == constants.WMI_JOB_STATUS_STARTED: - success = self._vmutils.check_job_status(job) - else: - success = (ret_val == 0) - - if not success: - raise vmutils.HyperVException(_('Failed to create VM %s') % - instance["name"]) - - LOG.debug(_('Created VM %s...'), instance["name"]) - vm = self._conn.Msvm_ComputerSystem(ElementName=instance["name"])[0] - - vmsettings = vm.associators( - wmi_result_class='Msvm_VirtualSystemSettingData') - vmsetting = [s for s in vmsettings - if s.SettingType == 3][0] # avoid snapshots - memsetting = vmsetting.associators( - wmi_result_class='Msvm_MemorySettingData')[0] - #No Dynamic Memory, so reservation, limit and quantity are identical. - mem = long(str(instance['memory_mb'])) - memsetting.VirtualQuantity = mem - memsetting.Reservation = mem - memsetting.Limit = mem - - (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources( - vm.path_(), [memsetting.GetText_(1)]) - LOG.debug(_('Set memory for vm %s...'), instance["name"]) - procsetting = vmsetting.associators( - wmi_result_class='Msvm_ProcessorSettingData')[0] - vcpus = long(instance['vcpus']) - procsetting.VirtualQuantity = vcpus - procsetting.Reservation = vcpus - procsetting.Limit = 100000 # static assignment to 100% - - if CONF.limit_cpu_features: - procsetting.LimitProcessorFeatures = True - - (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources( - vm.path_(), [procsetting.GetText_(1)]) - LOG.debug(_('Set vcpus for vm %s...'), instance["name"]) - - def _create_scsi_controller(self, vm_name): - """Create an iscsi controller ready to mount volumes.""" - LOG.debug(_('Creating a scsi controller for %(vm_name)s for volume ' - 'attaching') % locals()) - vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name) - vm = vms[0] - scsicontrldefault = self._conn.query( - "SELECT * FROM Msvm_ResourceAllocationSettingData \ - WHERE ResourceSubType = 'Microsoft Synthetic SCSI Controller'\ - AND InstanceID LIKE '%Default%'")[0] - if scsicontrldefault is None: - raise vmutils.HyperVException(_('Controller not found')) - scsicontrl = self._vmutils.clone_wmi_obj(self._conn, - 'Msvm_ResourceAllocationSettingData', scsicontrldefault) - scsicontrl.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}'] - scsiresource = self._vmutils.add_virt_resource(self._conn, - scsicontrl, vm) - if scsiresource is None: - raise vmutils.HyperVException( - _('Failed to add scsi controller to VM %s') % - vm_name) - - def _get_ide_controller(self, vm, ctrller_addr): - #Find the IDE controller for the vm. - vmsettings = vm.associators( - wmi_result_class='Msvm_VirtualSystemSettingData') - rasds = vmsettings[0].associators( - wmi_result_class='MSVM_ResourceAllocationSettingData') - ctrller = [r for r in rasds - if r.ResourceSubType == 'Microsoft Emulated IDE Controller' - and r.Address == str(ctrller_addr)] - return ctrller - - def _attach_ide_drive(self, vm_name, path, ctrller_addr, drive_addr, - drive_type=constants.IDE_DISK): - """Create an IDE drive and attach it to the vm.""" - LOG.debug(_('Creating disk for %(vm_name)s by attaching' - ' disk file %(path)s') % locals()) - - vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name) - vm = vms[0] - - ctrller = self._get_ide_controller(vm, ctrller_addr) - - if drive_type == constants.IDE_DISK: - resSubType = 'Microsoft Synthetic Disk Drive' - elif drive_type == constants.IDE_DVD: - resSubType = 'Microsoft Synthetic DVD Drive' - - #Find the default disk drive object for the vm and clone it. - drivedflt = self._conn.query( - "SELECT * FROM Msvm_ResourceAllocationSettingData \ - WHERE ResourceSubType LIKE '%(resSubType)s'\ - AND InstanceID LIKE '%%Default%%'" % locals())[0] - drive = self._vmutils.clone_wmi_obj(self._conn, - 'Msvm_ResourceAllocationSettingData', drivedflt) - #Set the IDE ctrller as parent. - drive.Parent = ctrller[0].path_() - drive.Address = drive_addr - #Add the cloned disk drive object to the vm. - new_resources = self._vmutils.add_virt_resource(self._conn, - drive, vm) - if new_resources is None: - raise vmutils.HyperVException( - _('Failed to add drive to VM %s') % - vm_name) - drive_path = new_resources[0] - LOG.debug(_('New %(drive_type)s drive path is %(drive_path)s') % - locals()) - - if drive_type == constants.IDE_DISK: - resSubType = 'Microsoft Virtual Hard Disk' - elif drive_type == constants.IDE_DVD: - resSubType = 'Microsoft Virtual CD/DVD Disk' - - #Find the default VHD disk object. - drivedefault = self._conn.query( - "SELECT * FROM Msvm_ResourceAllocationSettingData \ - WHERE ResourceSubType LIKE '%(resSubType)s' AND \ - InstanceID LIKE '%%Default%%' " % locals())[0] - - #Clone the default and point it to the image file. - res = self._vmutils.clone_wmi_obj(self._conn, - 'Msvm_ResourceAllocationSettingData', drivedefault) - #Set the new drive as the parent. - res.Parent = drive_path - res.Connection = [path] - - #Add the new vhd object as a virtual hard disk to the vm. - new_resources = self._vmutils.add_virt_resource(self._conn, res, vm) - if new_resources is None: - raise vmutils.HyperVException( - _('Failed to add %(drive_type)s image to VM %(vm_name)s') % - locals()) - LOG.info(_('Created drive type %(drive_type)s for %(vm_name)s') % - locals()) - - def _create_nic(self, vm_name, vif): - """Create a (synthetic) nic and attach it to the vm.""" - LOG.debug(_('Creating nic for %s '), vm_name) - - #Create a new nic - syntheticnics_data = self._conn.Msvm_SyntheticEthernetPortSettingData() - default_nic_data = [n for n in syntheticnics_data - if n.InstanceID.rfind('Default') > 0] - new_nic_data = self._vmutils.clone_wmi_obj(self._conn, - 'Msvm_SyntheticEthernetPortSettingData', - default_nic_data[0]) - - #Configure the nic - new_nic_data.ElementName = vif['id'] - new_nic_data.Address = vif['address'].replace(':', '') - new_nic_data.StaticMacAddress = 'True' - new_nic_data.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}'] - - #Add the new nic to the vm - vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name) - vm = vms[0] - - new_resources = self._vmutils.add_virt_resource(self._conn, - new_nic_data, vm) - if new_resources is None: - raise vmutils.HyperVException(_('Failed to add nic to VM %s') % - vm_name) - LOG.info(_("Created nic for %s "), vm_name) + def destroy(self, instance, network_info=None, cleanup=True, + destroy_disks=True): + instance_name = instance['name'] + LOG.debug(_("Got request to destroy instance: %s"), instance_name) + try: + if self._vmutils.vm_exists(instance_name): + volumes_drives_list = self._vmutils.destroy_vm(instance_name, + destroy_disks) + #Disconnect volumes + for volume_drive in volumes_drives_list: + self._volumeops.disconnect_volume(volume_drive) + else: + LOG.debug(_("Instance not found: %s"), instance_name) + except Exception as ex: + LOG.exception(ex) + raise vmutils.HyperVException(_('Failed to destroy instance: %s') % + instance_name) def reboot(self, instance, network_info, reboot_type): """Reboot the specified instance.""" - vm = self._vmutils.lookup(self._conn, instance['name']) - if vm is None: - raise exception.InstanceNotFound(instance_id=instance["id"]) - self._set_vm_state(instance['name'], 'Reboot') - - def destroy(self, instance, network_info=None, cleanup=True, - destroy_disks=True): - """Destroy the VM. Also destroy the associated VHD disk files.""" - LOG.debug(_("Got request to destroy vm %s"), instance['name']) - vm = self._vmutils.lookup(self._conn, instance['name']) - if vm is None: - return - vm = self._conn.Msvm_ComputerSystem(ElementName=instance['name'])[0] - vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] - #Stop the VM first. - self._set_vm_state(instance['name'], 'Disabled') - vmsettings = vm.associators( - wmi_result_class='Msvm_VirtualSystemSettingData') - rasds = vmsettings[0].associators( - wmi_result_class='MSVM_ResourceAllocationSettingData') - disks = [r for r in rasds - if r.ResourceSubType == 'Microsoft Virtual Hard Disk'] - disk_files = [] - volumes = [r for r in rasds - if r.ResourceSubType == 'Microsoft Physical Disk Drive'] - volumes_drives_list = [] - #collect the volumes information before destroying the VM. - for volume in volumes: - hostResources = volume.HostResource - drive_path = hostResources[0] - #Appending the Msvm_Disk path - volumes_drives_list.append(drive_path) - #Collect disk file information before destroying the VM. - for disk in disks: - disk_files.extend([c for c in disk.Connection]) - #Nuke the VM. Does not destroy disks. - (job, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_()) - if ret_val == constants.WMI_JOB_STATUS_STARTED: - success = self._vmutils.check_job_status(job) - elif ret_val == 0: - success = True - if not success: - raise vmutils.HyperVException(_('Failed to destroy vm %s') % - instance['name']) - if destroy_disks: - #Disconnect volumes - for volume_drive in volumes_drives_list: - self._volumeops.disconnect_volume(volume_drive) - #Delete associated vhd disk files. - for disk in disk_files: - vhdfile = self._conn_cimv2.query( - "Select * from CIM_DataFile where Name = '" + - disk.replace("'", "''") + "'")[0] - LOG.debug(_("Del: disk %(vhdfile)s vm %(name)s") - % {'vhdfile': vhdfile, 'name': instance['name']}) - vhdfile.Delete() + LOG.debug(_("reboot instance"), instance=instance) + self._set_vm_state(instance['name'], + constants.HYPERV_VM_STATE_REBOOT) def pause(self, instance): """Pause VM instance.""" LOG.debug(_("Pause instance"), instance=instance) - self._set_vm_state(instance["name"], 'Paused') + self._set_vm_state(instance["name"], + constants.HYPERV_VM_STATE_PAUSED) def unpause(self, instance): """Unpause paused VM instance.""" LOG.debug(_("Unpause instance"), instance=instance) - self._set_vm_state(instance["name"], 'Enabled') + self._set_vm_state(instance["name"], + constants.HYPERV_VM_STATE_ENABLED) def suspend(self, instance): """Suspend the specified instance.""" print instance LOG.debug(_("Suspend instance"), instance=instance) - self._set_vm_state(instance["name"], 'Suspended') + self._set_vm_state(instance["name"], + constants.HYPERV_VM_STATE_SUSPENDED) def resume(self, instance): """Resume the suspended VM instance.""" LOG.debug(_("Resume instance"), instance=instance) - self._set_vm_state(instance["name"], 'Enabled') + self._set_vm_state(instance["name"], + constants.HYPERV_VM_STATE_ENABLED) def power_off(self, instance): """Power off the specified instance.""" LOG.debug(_("Power off instance"), instance=instance) - self._set_vm_state(instance["name"], 'Disabled') + self._set_vm_state(instance["name"], + constants.HYPERV_VM_STATE_DISABLED) def power_on(self, instance): """Power on the specified instance.""" LOG.debug(_("Power on instance"), instance=instance) - self._set_vm_state(instance["name"], 'Enabled') + self._set_vm_state(instance["name"], + constants.HYPERV_VM_STATE_ENABLED) def _set_vm_state(self, vm_name, req_state): - """Set the desired state of the VM.""" - vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name) - if len(vms) == 0: - return False - (job, ret_val) = vms[0].RequestStateChange( - constants.REQ_POWER_STATE[req_state]) - success = False - if ret_val == constants.WMI_JOB_STATUS_STARTED: - success = self._vmutils.check_job_status(job) - elif ret_val == 0: - success = True - elif ret_val == 32775: - #Invalid state for current operation. Typically means it is - #already in the state requested - success = True - if success: - LOG.info(_("Successfully changed vm state of %(vm_name)s" - " to %(req_state)s") % locals()) - else: + try: + self._vmutils.set_vm_state(vm_name, req_state) + LOG.debug(_("Successfully changed state of VM %(vm_name)s" + " to: %(req_state)s") % locals()) + except Exception as ex: + LOG.exception(ex) msg = _("Failed to change vm state of %(vm_name)s" " to %(req_state)s") % locals() - LOG.error(msg) raise vmutils.HyperVException(msg) - def _cache_image(self, fn, target, fname, cow=False, Size=None, - *args, **kwargs): - """Wrapper for a method that creates an image that caches the image. + def _fetch_image(self, target, context, image_id, user, project, + *args, **kwargs): + images.fetch(context, image_id, target, user, project) + + def _cache_image(self, fn, target, fname, cow=False, size=None, + *args, **kwargs): + """Wrapper for a method that creates and caches an image. This wrapper will save the image into a common store and create a copy for use by the hypervisor. @@ -543,32 +313,23 @@ class VMOps(baseops.BaseOps): """ @lockutils.synchronized(fname, 'nova-') def call_if_not_exists(path, fn, *args, **kwargs): - if not os.path.exists(path): - fn(target=path, *args, **kwargs) + if not os.path.exists(path): + fn(target=path, *args, **kwargs) - if not os.path.exists(target): - LOG.debug(_("use_cow_image:%s"), cow) + if not self._pathutils.vhd_exists(target): + LOG.debug(_("Use CoW image: %s"), cow) if cow: - base = self._vmutils.get_base_vhd_path(fname) - call_if_not_exists(base, fn, *args, **kwargs) - - image_service = self._conn.query( - "Select * from Msvm_ImageManagementService")[0] - (job, ret_val) = \ - image_service.CreateDifferencingVirtualHardDisk( - Path=target, ParentPath=base) - LOG.debug( - "Creating difference disk: JobID=%s, Source=%s, Target=%s", - job, base, target) - if ret_val == constants.WMI_JOB_STATUS_STARTED: - success = self._vmutils.check_job_status(job) - else: - success = (ret_val == 0) - - if not success: + parent_path = self._pathutils.get_base_vhd_path(fname) + call_if_not_exists(parent_path, fn, *args, **kwargs) + + LOG.debug(_("Creating differencing VHD. Parent: " + "%(parent_path)s, Target: %(target)s") % locals()) + try: + self._vhdutils.create_differencing_vhd(target, parent_path) + except Exception as ex: + LOG.exception(ex) raise vmutils.HyperVException( - _('Failed to create Difference Disk from ' - '%(base)s to %(target)s') % locals()) - + _('Failed to create a differencing disk from ' + '%(parent_path)s to %(target)s') % locals()) else: call_if_not_exists(target, fn, *args, **kwargs) diff --git a/nova/virt/hyperv/vmutils.py b/nova/virt/hyperv/vmutils.py index d899f977d..0305d8306 100644 --- a/nova/virt/hyperv/vmutils.py +++ b/nova/virt/hyperv/vmutils.py @@ -16,24 +16,20 @@ # under the License. """ -Utility class for VM related operations. +Utility class for VM related operations on Hyper-V. """ -import os -import shutil import sys import time import uuid +if sys.platform == 'win32': + import wmi + from nova import exception from nova.openstack.common import cfg from nova.openstack.common import log as logging from nova.virt.hyperv import constants -from nova.virt import images - -# Check needed for unit testing on Unix -if sys.platform == 'win32': - import wmi CONF = cfg.CONF LOG = logging.getLogger(__name__) @@ -45,19 +41,342 @@ class HyperVException(exception.NovaException): class VMUtils(object): - def lookup(self, conn, i): - vms = conn.Msvm_ComputerSystem(ElementName=i) + + def __init__(self): + if sys.platform == 'win32': + self._conn = wmi.WMI(moniker='//./root/virtualization') + self._conn_cimv2 = wmi.WMI(moniker='//./root/cimv2') + + def list_instances(self): + """Return the names of all the instances known to Hyper-V.""" + vm_names = [v.ElementName + for v in self._conn.Msvm_ComputerSystem(['ElementName'], + Caption="Virtual Machine")] + return vm_names + + def get_vm_summary_info(self, vm_name): + vm = self._lookup_vm_check(vm_name) + + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + vmsettings = vm.associators( + wmi_association_class='Msvm_SettingsDefineState', + wmi_result_class='Msvm_VirtualSystemSettingData') + settings_paths = [v.path_() for v in vmsettings] + #See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx + (ret_val, summary_info) = vs_man_svc.GetSummaryInformation( + [constants.VM_SUMMARY_NUM_PROCS, + constants.VM_SUMMARY_ENABLED_STATE, + constants.VM_SUMMARY_MEMORY_USAGE, + constants.VM_SUMMARY_UPTIME], + settings_paths) + if ret_val: + raise HyperVException(_('Cannot get VM summary data for: %s') + % vm_name) + + si = summary_info[0] + memory_usage = None + if si.MemoryUsage is not None: + memory_usage = long(si.MemoryUsage) + up_time = None + if si.UpTime is not None: + up_time = long(si.UpTime) + + summary_info_dict = {'NumberOfProcessors': si.NumberOfProcessors, + 'EnabledState': si.EnabledState, + 'MemoryUsage': memory_usage, + 'UpTime': up_time} + return summary_info_dict + + def _lookup_vm_check(self, vm_name): + vm = self._lookup_vm(vm_name) + if not vm: + raise HyperVException(_('VM not found: %s') % vm_name) + return vm + + def _lookup_vm(self, vm_name): + vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name) n = len(vms) if n == 0: return None elif n > 1: - raise HyperVException(_('duplicate name found: %s') % i) + raise HyperVException(_('Duplicate VM name found: %s') % vm_name) else: - return vms[0].ElementName + return vms[0] + + def vm_exists(self, vm_name): + return self._lookup_vm(vm_name) is not None + + def _get_vm_setting_data(self, vm): + vmsettings = vm.associators( + wmi_result_class='Msvm_VirtualSystemSettingData') + # Avoid snapshots + return [s for s in vmsettings if s.SettingType == 3][0] + + def _set_vm_memory(self, vm, vmsetting, memory_mb): + memsetting = vmsetting.associators( + wmi_result_class='Msvm_MemorySettingData')[0] + #No Dynamic Memory, so reservation, limit and quantity are identical. + mem = long(memory_mb) + memsetting.VirtualQuantity = mem + memsetting.Reservation = mem + memsetting.Limit = mem + + self._modify_virt_resource(memsetting, vm.path_()) + + def _set_vm_vcpus(self, vm, vmsetting, vcpus_num, limit_cpu_features): + procsetting = vmsetting.associators( + wmi_result_class='Msvm_ProcessorSettingData')[0] + vcpus = long(vcpus_num) + procsetting.VirtualQuantity = vcpus + procsetting.Reservation = vcpus + procsetting.Limit = 100000 # static assignment to 100% + procsetting.LimitProcessorFeatures = limit_cpu_features + + self._modify_virt_resource(procsetting, vm.path_()) + + def create_vm(self, vm_name, memory_mb, vcpus_num, limit_cpu_features): + """Creates a VM.""" + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + + vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new() + vs_gs_data.ElementName = vm_name + + LOG.debug(_('Creating VM %s'), vm_name) + (job_path, + ret_val) = vs_man_svc.DefineVirtualSystem([], None, + vs_gs_data.GetText_(1))[1:] + self.check_ret_val(ret_val, job_path) + + vm = self._lookup_vm_check(vm_name) + vmsetting = self._get_vm_setting_data(vm) + + LOG.debug(_('Setting memory for vm %s'), vm_name) + self._set_vm_memory(vm, vmsetting, memory_mb) + + LOG.debug(_('Set vCPUs for vm %s'), vm_name) + self._set_vm_vcpus(vm, vmsetting, vcpus_num, limit_cpu_features) + + def get_vm_iscsi_controller(self, vm_name): + vm = self._lookup_vm_check(vm_name) + + vmsettings = vm.associators( + wmi_result_class='Msvm_VirtualSystemSettingData') + rasds = vmsettings[0].associators( + wmi_result_class='MSVM_ResourceAllocationSettingData') + res = [r for r in rasds + if r.ResourceSubType == + 'Microsoft Synthetic SCSI Controller'][0] + return res.path_() + + def _get_vm_ide_controller(self, vm, ctrller_addr): + vmsettings = vm.associators( + wmi_result_class='Msvm_VirtualSystemSettingData') + rasds = vmsettings[0].associators( + wmi_result_class='MSVM_ResourceAllocationSettingData') + return [r for r in rasds + if r.ResourceSubType == 'Microsoft Emulated IDE Controller' + and r.Address == str(ctrller_addr)][0].path_() + + def get_vm_ide_controller(self, vm_name, ctrller_addr): + vm = self._lookup_vm_check(vm_name) + return self._get_vm_ide_controller(vm, ctrller_addr) + + def get_attached_disks_count(self, scsi_controller_path): + volumes = self._conn.query("SELECT * FROM " + "Msvm_ResourceAllocationSettingData " + "WHERE ResourceSubType LIKE " + "'Microsoft Physical Disk Drive' " + "AND Parent = '%s'" % + scsi_controller_path.replace("'", "''")) + return len(volumes) + + def attach_ide_drive(self, vm_name, path, ctrller_addr, drive_addr, + drive_type=constants.IDE_DISK): + """Create an IDE drive and attach it to the vm.""" + + vm = self._lookup_vm_check(vm_name) + + ctrller_path = self._get_vm_ide_controller(vm, ctrller_addr) + + if drive_type == constants.IDE_DISK: + res_sub_type = 'Microsoft Synthetic Disk Drive' + elif drive_type == constants.IDE_DVD: + res_sub_type = 'Microsoft Synthetic DVD Drive' + + #Find the default disk drive object for the vm and clone it. + drivedflt = self._conn.query("SELECT * FROM " + "Msvm_ResourceAllocationSettingData " + "WHERE ResourceSubType LIKE " + "'%(res_sub_type)s' AND InstanceID LIKE " + "'%%Default%%'" % locals())[0] + drive = self._clone_wmi_obj('Msvm_ResourceAllocationSettingData', + drivedflt) + #Set the IDE ctrller as parent. + drive.Parent = ctrller_path + drive.Address = drive_addr + #Add the cloned disk drive object to the vm. + new_resources = self._add_virt_resource(drive, vm.path_()) + drive_path = new_resources[0] + + if drive_type == constants.IDE_DISK: + res_sub_type = 'Microsoft Virtual Hard Disk' + elif drive_type == constants.IDE_DVD: + res_sub_type = 'Microsoft Virtual CD/DVD Disk' + + #Find the default VHD disk object. + drivedefault = self._conn.query("SELECT * FROM " + "Msvm_ResourceAllocationSettingData " + "WHERE ResourceSubType LIKE " + "'%(res_sub_type)s' AND " + "InstanceID LIKE '%%Default%%'" + % locals())[0] + + #Clone the default and point it to the image file. + res = self._clone_wmi_obj('Msvm_ResourceAllocationSettingData', + drivedefault) + #Set the new drive as the parent. + res.Parent = drive_path + res.Connection = [path] + + #Add the new vhd object as a virtual hard disk to the vm. + self._add_virt_resource(res, vm.path_()) + + def create_scsi_controller(self, vm_name): + """Create an iscsi controller ready to mount volumes.""" + + vm = self._lookup_vm_check(vm_name) + scsicontrldflt = self._conn.query("SELECT * FROM " + "Msvm_ResourceAllocationSettingData " + "WHERE ResourceSubType = 'Microsoft " + "Synthetic SCSI Controller' AND " + "InstanceID LIKE '%Default%'")[0] + if scsicontrldflt is None: + raise HyperVException(_('Controller not found')) + scsicontrl = self._clone_wmi_obj('Msvm_ResourceAllocationSettingData', + scsicontrldflt) + scsicontrl.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}'] + scsiresource = self._add_virt_resource(scsicontrl, vm.path_()) + + def attach_volume_to_controller(self, vm_name, controller_path, address, + mounted_disk_path): + """Attach a volume to a controller.""" - def check_job_status(self, jobpath): - """Poll WMI job state for completion.""" - job_wmi_path = jobpath.replace('\\', '/') + vm = self._lookup_vm_check(vm_name) + + diskdflt = self._conn.query("SELECT * FROM " + "Msvm_ResourceAllocationSettingData " + "WHERE ResourceSubType LIKE " + "'Microsoft Physical Disk Drive' " + "AND InstanceID LIKE '%Default%'")[0] + diskdrive = self._clone_wmi_obj('Msvm_ResourceAllocationSettingData', + diskdflt) + diskdrive.Address = address + diskdrive.Parent = controller_path + diskdrive.HostResource = [mounted_disk_path] + self._add_virt_resource(diskdrive, vm.path_()) + + def set_nic_connection(self, vm_name, nic_name, vswitch_port): + nic_data = self._get_nic_data_by_name(nic_name) + nic_data.Connection = [vswitch_port] + + vm = self._lookup_vm_check(vm_name) + self._modify_virt_resource(nic_data, vm.path_()) + + def _get_nic_data_by_name(self, name): + return self._conn.Msvm_SyntheticEthernetPortSettingData( + ElementName=name)[0] + + def create_nic(self, vm_name, nic_name, mac_address): + """Create a (synthetic) nic and attach it to the vm.""" + #Create a new nic + syntheticnics_data = self._conn.Msvm_SyntheticEthernetPortSettingData() + default_nic_data = [n for n in syntheticnics_data + if n.InstanceID.rfind('Default') > 0] + new_nic_data = self._clone_wmi_obj( + 'Msvm_SyntheticEthernetPortSettingData', default_nic_data[0]) + + #Configure the nic + new_nic_data.ElementName = nic_name + new_nic_data.Address = mac_address.replace(':', '') + new_nic_data.StaticMacAddress = 'True' + new_nic_data.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}'] + + #Add the new nic to the vm + vm = self._lookup_vm_check(vm_name) + + self._add_virt_resource(new_nic_data, vm.path_()) + + def set_vm_state(self, vm_name, req_state): + """Set the desired state of the VM.""" + + vm = self._lookup_vm_check(vm_name) + (job_path, ret_val) = vm.RequestStateChange(req_state) + #Invalid state for current operation (32775) typically means that + #the VM is already in the state requested + self.check_ret_val(ret_val, job_path, [0, 32775]) + LOG.debug(_("Successfully changed vm state of %(vm_name)s" + " to %(req_state)s") % locals()) + + def destroy_vm(self, vm_name, destroy_disks=True): + """Destroy the VM. Also destroy the associated VHD disk files.""" + + vm = self._lookup_vm_check(vm_name) + + #Stop the VM first. + self.set_vm_state(vm_name, constants.HYPERV_VM_STATE_DISABLED) + + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + vmsettings = vm.associators( + wmi_result_class='Msvm_VirtualSystemSettingData') + rasds = vmsettings[0].associators( + wmi_result_class='MSVM_ResourceAllocationSettingData') + disk_resources = [r for r in rasds + if r.ResourceSubType == + 'Microsoft Virtual Hard Disk'] + volume_resources = [r for r in rasds + if r.ResourceSubType == + 'Microsoft Physical Disk Drive'] + + #Collect volumes information before destroying the VM. + volumes_drives_list = [] + for volume_resource in volume_resources: + drive_path = volume_resource.HostResource[0] + #Appending the Msvm_Disk path + volumes_drives_list.append(drive_path) + + #Collect disk file information before destroying the VM. + disk_files = [] + for disk_resource in disk_resources: + disk_files.extend([c for c in disk_resource.Connection]) + + #Remove the VM. Does not destroy disks. + (job_path, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_()) + self.check_ret_val(ret_val, job_path) + + if destroy_disks: + #Delete associated vhd disk files. + for disk in disk_files: + LOG.debug(_("Deleting disk file: %(disk)s") % locals()) + self._delete_file(disk) + + return volumes_drives_list + + def _delete_file(self, path): + f = self._conn_cimv2.query("Select * from CIM_DataFile where " + "Name = '%s'" % path.replace("'", "''"))[0] + f.Delete() + + def check_ret_val(self, ret_val, job_path, success_values=[0]): + if ret_val == constants.WMI_JOB_STATUS_STARTED: + self._wait_for_job(job_path) + elif ret_val not in success_values: + raise HyperVException(_('Operation failed with return value: %s') + % ret_val) + + def _wait_for_job(self, job_path): + """Poll WMI job state and wait for completion.""" + + job_wmi_path = job_path.replace('\\', '/') job = wmi.WMI(moniker=job_wmi_path) while job.JobState == constants.WMI_JOB_STATE_RUNNING: @@ -69,54 +388,30 @@ class VMUtils(object): err_sum_desc = job.ErrorSummaryDescription err_desc = job.ErrorDescription err_code = job.ErrorCode - LOG.debug(_("WMI job failed with status %(job_state)d. " - "Error details: %(err_sum_desc)s - %(err_desc)s - " - "Error code: %(err_code)d") % locals()) + raise HyperVException(_("WMI job failed with status " + "%(job_state)d. Error details: " + "%(err_sum_desc)s - %(err_desc)s - " + "Error code: %(err_code)d") + % locals()) else: (error, ret_val) = job.GetError() if not ret_val and error: - LOG.debug(_("WMI job failed with status %(job_state)d. " - "Error details: %(error)s") % locals()) + raise HyperVException(_("WMI job failed with status " + "%(job_state)d. Error details: " + "%(error)s") % locals()) else: - LOG.debug(_("WMI job failed with status %(job_state)d. " - "No error description available") % locals()) - return False + raise HyperVException(_("WMI job failed with status " + "%(job_state)d. No error " + "description available") + % locals()) desc = job.Description elap = job.ElapsedTime LOG.debug(_("WMI job succeeded: %(desc)s, Elapsed=%(elap)s") - % locals()) - return True - - def get_instance_path(self, instance_name): - instance_path = os.path.join(CONF.instances_path, instance_name) - if not os.path.exists(instance_path): - LOG.debug(_('Creating folder %s '), instance_path) - os.makedirs(instance_path) - return instance_path - - def get_vhd_path(self, instance_name): - instance_path = self.get_instance_path(instance_name) - return os.path.join(instance_path, instance_name + ".vhd") - - def get_base_vhd_path(self, image_name): - base_dir = os.path.join(CONF.instances_path, '_base') - if not os.path.exists(base_dir): - os.makedirs(base_dir) - return os.path.join(base_dir, image_name + ".vhd") - - def make_export_path(self, instance_name): - export_folder = os.path.join(CONF.instances_path, "export", - instance_name) - if os.path.isdir(export_folder): - LOG.debug(_('Removing existing folder %s '), export_folder) - shutil.rmtree(export_folder) - LOG.debug(_('Creating folder %s '), export_folder) - os.makedirs(export_folder) - return export_folder - - def clone_wmi_obj(self, conn, wmi_class, wmi_obj): + % locals()) + + def _clone_wmi_obj(self, wmi_class, wmi_obj): """Clone a WMI object.""" - cl = conn.__getattr__(wmi_class) # get the class + cl = getattr(self._conn, wmi_class) # get the class newinst = cl.new() #Copy the properties from the original. for prop in wmi_obj._properties: @@ -125,51 +420,78 @@ class VMUtils(object): strguid.append(str(uuid.uuid4())) newinst.Properties_.Item(prop).Value = strguid else: - newinst.Properties_.Item(prop).Value = \ - wmi_obj.Properties_.Item(prop).Value + prop_value = wmi_obj.Properties_.Item(prop).Value + newinst.Properties_.Item(prop).Value = prop_value + return newinst - def add_virt_resource(self, conn, res_setting_data, target_vm): + def _add_virt_resource(self, res_setting_data, vm_path): """Adds a new resource to the VM.""" - vs_man_svc = conn.Msvm_VirtualSystemManagementService()[0] - (job, new_resources, ret_val) = vs_man_svc.\ - AddVirtualSystemResources([res_setting_data.GetText_(1)], - target_vm.path_()) - success = True - if ret_val == constants.WMI_JOB_STATUS_STARTED: - success = self.check_job_status(job) - else: - success = (ret_val == 0) - if success: - return new_resources - else: - return None + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + res_xml = [res_setting_data.GetText_(1)] + (job_path, + new_resources, + ret_val) = vs_man_svc.AddVirtualSystemResources(res_xml, vm_path) + self.check_ret_val(ret_val, job_path) + return new_resources - def modify_virt_resource(self, conn, res_setting_data, target_vm): + def _modify_virt_resource(self, res_setting_data, vm_path): """Updates a VM resource.""" - vs_man_svc = conn.Msvm_VirtualSystemManagementService()[0] - (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources( + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + (job_path, ret_val) = vs_man_svc.ModifyVirtualSystemResources( ResourceSettingData=[res_setting_data.GetText_(1)], - ComputerSystem=target_vm.path_()) - if ret_val == constants.WMI_JOB_STATUS_STARTED: - success = self.check_job_status(job) - else: - success = (ret_val == 0) - return success + ComputerSystem=vm_path) + self.check_ret_val(ret_val, job_path) - def remove_virt_resource(self, conn, res_setting_data, target_vm): + def _remove_virt_resource(self, res_setting_data, vm_path): """Removes a VM resource.""" - vs_man_svc = conn.Msvm_VirtualSystemManagementService()[0] - (job, ret_val) = vs_man_svc.\ - RemoveVirtualSystemResources([res_setting_data.path_()], - target_vm.path_()) - success = True - if ret_val == constants.WMI_JOB_STATUS_STARTED: - success = self.check_job_status(job) - else: - success = (ret_val == 0) - return success + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + res_path = [res_setting_data.path_()] + (job_path, ret_val) = vs_man_svc.RemoveVirtualSystemResources(res_path, + vm_path) + self.check_ret_val(ret_val, job_path) + + def take_vm_snapshot(self, vm_name): + vm = self._lookup_vm_check(vm_name) + + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + + (job_path, ret_val, + snp_setting_data) = vs_man_svc.CreateVirtualSystemSnapshot(vm.path_()) + self.check_ret_val(ret_val, job_path) + + job_wmi_path = job_path.replace('\\', '/') + job = wmi.WMI(moniker=job_wmi_path) + snp_setting_data = job.associators( + wmi_result_class='Msvm_VirtualSystemSettingData')[0] + return snp_setting_data.path_() + + def remove_vm_snapshot(self, snapshot_path): + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + + (job_path, ret_val) = vs_man_svc.RemoveVirtualSystemSnapshot( + snapshot_path) + self.check_ret_val(ret_val, job_path) + + def detach_vm_disk(self, vm_name, disk_path): + vm = self._lookup_vm_check(vm_name) + physical_disk = self._get_mounted_disk_resource_from_path( + disk_path) + self._remove_virt_resource(physical_disk, vm.path_()) + + def _get_mounted_disk_resource_from_path(self, disk_path): + physical_disks = self._conn.query("SELECT * FROM " + "Msvm_ResourceAllocationSettingData" + " WHERE ResourceSubType = " + "'Microsoft Physical Disk Drive'") + for physical_disk in physical_disks: + if physical_disk.HostResource: + if physical_disk.HostResource[0].lower() == disk_path.lower(): + return physical_disk - def fetch_image(self, target, context, image_id, user, project, - *args, **kwargs): - images.fetch(context, image_id, target, user, project) + def get_mounted_disk_by_drive_number(self, device_number): + mounted_disks = self._conn.query("SELECT * FROM Msvm_DiskDrive " + "WHERE DriveNumber=" + + str(device_number)) + if len(mounted_disks): + return mounted_disks[0].path_() diff --git a/nova/virt/hyperv/volumeops.py b/nova/virt/hyperv/volumeops.py index b69cf7bf1..a7e56b739 100644 --- a/nova/virt/hyperv/volumeops.py +++ b/nova/virt/hyperv/volumeops.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 Pedro Navarro Perez +# Copyright 2013 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -20,210 +21,140 @@ Management class for Storage-related functions (attach, detach, etc). """ import time -from nova import block_device from nova.openstack.common import cfg from nova.openstack.common import log as logging from nova.virt import driver -from nova.virt.hyperv import baseops +from nova.virt.hyperv import hostutils from nova.virt.hyperv import vmutils from nova.virt.hyperv import volumeutils -from nova.virt.hyperv import volumeutilsV2 +from nova.virt.hyperv import volumeutilsv2 LOG = logging.getLogger(__name__) hyper_volumeops_opts = [ cfg.IntOpt('hyperv_attaching_volume_retry_count', - default=10, - help='The number of times we retry on attaching volume '), + default=10, + help='The number of times we retry on attaching volume '), cfg.IntOpt('hyperv_wait_between_attach_retry', - default=5, - help='The seconds to wait between a volume attachment attempt'), + default=5, + help='The seconds to wait between an volume ' + 'attachment attempt'), cfg.BoolOpt('force_volumeutils_v1', - default=False, - help='Force volumeutils v1'), - ] + default=False, + help='Force volumeutils v1'), +] CONF = cfg.CONF CONF.register_opts(hyper_volumeops_opts) CONF.import_opt('my_ip', 'nova.netconf') -class VolumeOps(baseops.BaseOps): +class VolumeOps(object): """ Management class for Volume-related tasks """ def __init__(self): - super(VolumeOps, self).__init__() - + self._hostutils = hostutils.HostUtils() self._vmutils = vmutils.VMUtils() - self._driver = driver - self._block_device = block_device - self._time = time + self._volutils = self._get_volume_utils() self._initiator = None self._default_root_device = 'vda' - self._attaching_volume_retry_count = \ - CONF.hyperv_attaching_volume_retry_count - self._wait_between_attach_retry = \ - CONF.hyperv_wait_between_attach_retry - self._volutils = self._get_volume_utils() def _get_volume_utils(self): - if(not CONF.force_volumeutils_v1) and \ - (self._get_hypervisor_version() >= 6.2): - return volumeutilsV2.VolumeUtilsV2( - self._conn_storage, self._conn_wmi) + if(not CONF.force_volumeutils_v1 and + self._hostutils.get_windows_version() >= 6.2): + return volumeutilsv2.VolumeUtilsV2() else: - return volumeutils.VolumeUtils(self._conn_wmi) - - def _get_hypervisor_version(self): - """Get hypervisor version. - :returns: hypervisor version (ex. 12003) - """ - version = self._conn_cimv2.Win32_OperatingSystem()[0]\ - .Version - LOG.info(_('Windows version: %s ') % version) - return version + return volumeutils.VolumeUtils() def attach_boot_volume(self, block_device_info, vm_name): """Attach the boot volume to the IDE controller.""" + LOG.debug(_("block device info: %s"), block_device_info) - ebs_root = self._driver.block_device_info_get_mapping( + ebs_root = driver.block_device_info_get_mapping( block_device_info)[0] + connection_info = ebs_root['connection_info'] data = connection_info['data'] target_lun = data['target_lun'] target_iqn = data['target_iqn'] target_portal = data['target_portal'] self._volutils.login_storage_target(target_lun, target_iqn, - target_portal) + target_portal) try: #Getting the mounted disk - mounted_disk = self._get_mounted_disk_from_lun(target_iqn, - target_lun) - #Attach to IDE controller + mounted_disk_path = self._get_mounted_disk_from_lun(target_iqn, + target_lun) #Find the IDE controller for the vm. - vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name) - vm = vms[0] - vmsettings = vm.associators( - wmi_result_class='Msvm_VirtualSystemSettingData') - rasds = vmsettings[0].associators( - wmi_result_class='MSVM_ResourceAllocationSettingData') - ctrller = [r for r in rasds - if r.ResourceSubType == 'Microsoft Emulated IDE Controller' - and r.Address == "0"] + ctrller_path = self._vmutils.get_vm_ide_controller(vm_name, 0) #Attaching to the same slot as the VHD disk file - self._attach_volume_to_controller(ctrller, 0, mounted_disk, vm) + self._vmutils.attach_volume_to_controller(vm_name, + ctrller_path, 0, + mounted_disk_path) except Exception as exn: LOG.exception(_('Attach boot from volume failed: %s'), exn) self._volutils.logout_storage_target(target_iqn) raise vmutils.HyperVException( - _('Unable to attach boot volume to instance %s') - % vm_name) + _('Unable to attach boot volume to instance %s') % vm_name) def volume_in_mapping(self, mount_device, block_device_info): return self._volutils.volume_in_mapping(mount_device, - block_device_info) + block_device_info) - def attach_volume(self, connection_info, instance_name, mountpoint): + def attach_volume(self, connection_info, instance_name): """Attach a volume to the SCSI controller.""" - LOG.debug(_("Attach_volume: %(connection_info)s, %(instance_name)s," - " %(mountpoint)s") % locals()) + LOG.debug(_("Attach_volume: %(connection_info)s to %(instance_name)s") + % locals()) data = connection_info['data'] target_lun = data['target_lun'] target_iqn = data['target_iqn'] target_portal = data['target_portal'] self._volutils.login_storage_target(target_lun, target_iqn, - target_portal) + target_portal) try: #Getting the mounted disk - mounted_disk = self._get_mounted_disk_from_lun(target_iqn, - target_lun) + mounted_disk_path = self._get_mounted_disk_from_lun(target_iqn, + target_lun) #Find the SCSI controller for the vm - vms = self._conn.MSVM_ComputerSystem(ElementName=instance_name) - vm = vms[0] - vmsettings = vm.associators( - wmi_result_class='Msvm_VirtualSystemSettingData') - rasds = vmsettings[0].associators( - wmi_result_class='MSVM_ResourceAllocationSettingData') - ctrller = [r for r in rasds - if r.ResourceSubType == 'Microsoft Synthetic SCSI Controller'] - self._attach_volume_to_controller( - ctrller, self._get_free_controller_slot(ctrller[0]), - mounted_disk, vm) + ctrller_path = self._vmutils.get_vm_iscsi_controller(instance_name) + + slot = self._get_free_controller_slot(ctrller_path) + self._vmutils.attach_volume_to_controller(instance_name, + ctrller_path, + slot, + mounted_disk_path) except Exception as exn: LOG.exception(_('Attach volume failed: %s'), exn) self._volutils.logout_storage_target(target_iqn) - raise vmutils.HyperVException( - _('Unable to attach volume to instance %s') - % instance_name) + raise vmutils.HyperVException(_('Unable to attach volume ' + 'to instance %s') % instance_name) - def _attach_volume_to_controller(self, controller, address, mounted_disk, - instance): - """Attach a volume to a controller.""" - #Find the default disk drive object for the vm and clone it. - diskdflt = self._conn.query( - "SELECT * FROM Msvm_ResourceAllocationSettingData \ - WHERE ResourceSubType LIKE 'Microsoft Physical Disk Drive'\ - AND InstanceID LIKE '%Default%'")[0] - diskdrive = self._vmutils.clone_wmi_obj(self._conn, - 'Msvm_ResourceAllocationSettingData', diskdflt) - diskdrive.Address = address - diskdrive.Parent = controller[0].path_() - diskdrive.HostResource = [mounted_disk[0].path_()] - new_resources = self._vmutils.add_virt_resource(self._conn, diskdrive, - instance) - if new_resources is None: - raise vmutils.HyperVException(_('Failed to add volume to VM %s') % - instance) + def _get_free_controller_slot(self, scsi_controller_path): + #Slots starts from 0, so the lenght of the disks gives us the free slot + return self._vmutils.get_attached_disks_count(scsi_controller_path) - def _get_free_controller_slot(self, scsi_controller): - #Getting volumes mounted in the SCSI controller - volumes = self._conn.query( - "SELECT * FROM Msvm_ResourceAllocationSettingData \ - WHERE ResourceSubType LIKE 'Microsoft Physical Disk Drive'\ - AND Parent = '" + scsi_controller.path_() + "'") - #Slots starts from 0, so the length of the disks gives us the free slot - return len(volumes) - - def detach_volume(self, connection_info, instance_name, mountpoint): + def detach_volume(self, connection_info, instance_name): """Dettach a volume to the SCSI controller.""" - LOG.debug(_("Detach_volume: %(connection_info)s, %(instance_name)s," - " %(mountpoint)s") % locals()) + LOG.debug(_("Detach_volume: %(connection_info)s " + "from %(instance_name)s") % locals()) data = connection_info['data'] target_lun = data['target_lun'] target_iqn = data['target_iqn'] #Getting the mounted disk - mounted_disk = self._get_mounted_disk_from_lun(target_iqn, target_lun) - physical_list = self._conn.query( - "SELECT * FROM Msvm_ResourceAllocationSettingData \ - WHERE ResourceSubType LIKE 'Microsoft Physical Disk Drive'") - physical_disk = 0 - for phydisk in physical_list: - host_resource_list = phydisk.HostResource - if host_resource_list is None: - continue - host_resource = str(host_resource_list[0].lower()) - mounted_disk_path = str(mounted_disk[0].path_().lower()) - LOG.debug(_("Mounted disk to detach is: %s"), mounted_disk_path) - LOG.debug(_("host_resource disk detached is: %s"), host_resource) - if host_resource == mounted_disk_path: - physical_disk = phydisk - LOG.debug(_("Physical disk detached is: %s"), physical_disk) - vms = self._conn.MSVM_ComputerSystem(ElementName=instance_name) - vm = vms[0] - remove_result = self._vmutils.remove_virt_resource(self._conn, - physical_disk, vm) - if remove_result is False: - raise vmutils.HyperVException( - _('Failed to remove volume from VM %s') % - instance_name) + mounted_disk_path = self._get_mounted_disk_from_lun(target_iqn, + target_lun) + + LOG.debug(_("Detaching physical disk from instance: %s"), + mounted_disk_path) + self._vmutils.detach_vm_disk(instance_name, mounted_disk_path) + #Sending logout self._volutils.logout_storage_target(target_iqn) def get_volume_connector(self, instance): if not self._initiator: - self._initiator = self._get_iscsi_initiator() + self._initiator = self._volutils.get_iscsi_initiator() if not self._initiator: LOG.warn(_('Could not determine iscsi initiator name'), instance=instance) @@ -232,87 +163,35 @@ class VolumeOps(baseops.BaseOps): 'initiator': self._initiator, } - def _get_iscsi_initiator(self): - return self._volutils.get_iscsi_initiator(self._conn_cimv2) - def _get_mounted_disk_from_lun(self, target_iqn, target_lun): - initiator_session = self._conn_wmi.query( - "SELECT * FROM MSiSCSIInitiator_SessionClass \ - WHERE TargetName='" + target_iqn + "'")[0] - devices = initiator_session.Devices - device_number = None - for device in devices: - LOG.debug(_("device.InitiatorName: %s"), device.InitiatorName) - LOG.debug(_("device.TargetName: %s"), device.TargetName) - LOG.debug(_("device.ScsiPortNumber: %s"), device.ScsiPortNumber) - LOG.debug(_("device.ScsiPathId: %s"), device.ScsiPathId) - LOG.debug(_("device.ScsiTargetId): %s"), device.ScsiTargetId) - LOG.debug(_("device.ScsiLun: %s"), device.ScsiLun) - LOG.debug(_("device.DeviceInterfaceGuid :%s"), - device.DeviceInterfaceGuid) - LOG.debug(_("device.DeviceInterfaceName: %s"), - device.DeviceInterfaceName) - LOG.debug(_("device.LegacyName: %s"), device.LegacyName) - LOG.debug(_("device.DeviceType: %s"), device.DeviceType) - LOG.debug(_("device.DeviceNumber %s"), device.DeviceNumber) - LOG.debug(_("device.PartitionNumber :%s"), device.PartitionNumber) - scsi_lun = device.ScsiLun - if scsi_lun == target_lun: - device_number = device.DeviceNumber + device_number = self._volutils.get_device_number_for_target(target_iqn, + target_lun) if device_number is None: - raise vmutils.HyperVException( - _('Unable to find a mounted disk for' - ' target_iqn: %s') % target_iqn) - LOG.debug(_("Device number : %s"), device_number) - LOG.debug(_("Target lun : %s"), target_lun) + raise vmutils.HyperVException(_('Unable to find a mounted ' + 'disk for target_iqn: %s') + % target_iqn) + LOG.debug(_('Device number: %(device_number)s, ' + 'target lun: %(target_lun)s') % locals()) #Finding Mounted disk drive - for i in range(1, self._attaching_volume_retry_count): - mounted_disk = self._conn.query( - "SELECT * FROM Msvm_DiskDrive WHERE DriveNumber=" + - str(device_number) + "") - LOG.debug(_("Mounted disk is: %s"), mounted_disk) - if len(mounted_disk) > 0: + for i in range(1, CONF.hyperv_attaching_volume_retry_count): + mounted_disk_path = self._vmutils.get_mounted_disk_by_drive_number( + device_number) + if mounted_disk_path: break - self._time.sleep(self._wait_between_attach_retry) - mounted_disk = self._conn.query( - "SELECT * FROM Msvm_DiskDrive WHERE DriveNumber=" + - str(device_number) + "") - LOG.debug(_("Mounted disk is: %s"), mounted_disk) - if len(mounted_disk) == 0: - raise vmutils.HyperVException( - _('Unable to find a mounted disk for' - ' target_iqn: %s') % target_iqn) - return mounted_disk + time.sleep(CONF.hyperv_wait_between_attach_retry) + + if not mounted_disk_path: + raise vmutils.HyperVException(_('Unable to find a mounted disk ' + 'for target_iqn: %s') + % target_iqn) + return mounted_disk_path def disconnect_volume(self, physical_drive_path): #Get the session_id of the ISCSI connection - session_id = self._get_session_id_from_mounted_disk( + session_id = self._volutils.get_session_id_from_mounted_disk( physical_drive_path) #Logging out the target self._volutils.execute_log_out(session_id) - def _get_session_id_from_mounted_disk(self, physical_drive_path): - drive_number = self._get_drive_number_from_disk_path( - physical_drive_path) - LOG.debug(_("Drive number to disconnect is: %s"), drive_number) - initiator_sessions = self._conn_wmi.query( - "SELECT * FROM MSiSCSIInitiator_SessionClass") - for initiator_session in initiator_sessions: - devices = initiator_session.Devices - for device in devices: - deviceNumber = str(device.DeviceNumber) - LOG.debug(_("DeviceNumber : %s"), deviceNumber) - if deviceNumber == drive_number: - return initiator_session.SessionId - - def _get_drive_number_from_disk_path(self, disk_path): - LOG.debug(_("Disk path to parse: %s"), disk_path) - start_device_id = disk_path.find('"', disk_path.find('DeviceID')) - LOG.debug(_("start_device_id: %s"), start_device_id) - end_device_id = disk_path.find('"', start_device_id + 1) - LOG.debug(_("end_device_id: %s"), end_device_id) - deviceID = disk_path[start_device_id + 1:end_device_id] - return deviceID[deviceID.find("\\") + 2:] - def get_default_root_device(self): return self._default_root_device diff --git a/nova/virt/hyperv/volumeutils.py b/nova/virt/hyperv/volumeutils.py index 051c37fd6..713ace258 100644 --- a/nova/virt/hyperv/volumeutils.py +++ b/nova/virt/hyperv/volumeutils.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2012 Pedro Navarro Perez +# Copyright 2013 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -35,47 +36,48 @@ CONF = cfg.CONF class VolumeUtils(basevolumeutils.BaseVolumeUtils): - def __init__(self, conn_wmi): - self._conn_wmi = conn_wmi + def __init__(self): + super(VolumeUtils, self).__init__() - def execute(self, *args, **kwargs): - _PIPE = subprocess.PIPE # pylint: disable=E1101 - proc = subprocess.Popen( - [args], - stdin=_PIPE, - stdout=_PIPE, - stderr=_PIPE, - ) - stdout_value, stderr_value = proc.communicate() - if stdout_value.find('The operation completed successfully') == -1: - raise vmutils.HyperVException(_('An error has occurred when ' - 'calling the iscsi initiator: %s') % stdout_value) + def execute(self, *args, **kwargs): + _PIPE = subprocess.PIPE # pylint: disable=E1101 + proc = subprocess.Popen( + [args], + stdin=_PIPE, + stdout=_PIPE, + stderr=_PIPE, + ) + stdout_value, stderr_value = proc.communicate() + if stdout_value.find('The operation completed successfully') == -1: + raise vmutils.HyperVException(_('An error has occurred when ' + 'calling the iscsi initiator: %s') + % stdout_value) - def login_storage_target(self, target_lun, target_iqn, target_portal): - """Add target portal, list targets and logins to the target.""" - separator = target_portal.find(':') - target_address = target_portal[:separator] - target_port = target_portal[separator + 1:] - #Adding target portal to iscsi initiator. Sending targets - self.execute('iscsicli.exe ' + 'AddTargetPortal ' + - target_address + ' ' + target_port + - ' * * * * * * * * * * * * *') - #Listing targets - self.execute('iscsicli.exe ' + 'LisTargets') - #Sending login - self.execute('iscsicli.exe ' + 'qlogintarget ' + target_iqn) - #Waiting the disk to be mounted. Research this to avoid sleep - time.sleep(CONF.hyperv_wait_between_attach_retry) + def login_storage_target(self, target_lun, target_iqn, target_portal): + """Add target portal, list targets and logins to the target.""" + separator = target_portal.find(':') + target_address = target_portal[:separator] + target_port = target_portal[separator + 1:] + #Adding target portal to iscsi initiator. Sending targets + self.execute('iscsicli.exe ' + 'AddTargetPortal ' + + target_address + ' ' + target_port + + ' * * * * * * * * * * * * *') + #Listing targets + self.execute('iscsicli.exe ' + 'LisTargets') + #Sending login + self.execute('iscsicli.exe ' + 'qlogintarget ' + target_iqn) + #Waiting the disk to be mounted. Research this to avoid sleep + time.sleep(CONF.hyperv_wait_between_attach_retry) - def logout_storage_target(self, target_iqn): - """Logs out storage target through its session id.""" + def logout_storage_target(self, target_iqn): + """Logs out storage target through its session id.""" - sessions = self._conn_wmi.query( - "SELECT * FROM MSiSCSIInitiator_SessionClass \ - WHERE TargetName='" + target_iqn + "'") - for session in sessions: - self.execute_log_out(session.SessionId) + sessions = self._conn_wmi.query("SELECT * FROM " + "MSiSCSIInitiator_SessionClass " + "WHERE TargetName='%s'" % target_iqn) + for session in sessions: + self.execute_log_out(session.SessionId) - def execute_log_out(self, session_id): - """Executes log out of the session described by its session ID.""" - self.execute('iscsicli.exe ' + 'logouttarget ' + session_id) + def execute_log_out(self, session_id): + """Executes log out of the session described by its session ID.""" + self.execute('iscsicli.exe ' + 'logouttarget ' + session_id) diff --git a/nova/virt/hyperv/volumeutilsV2.py b/nova/virt/hyperv/volumeutilsV2.py deleted file mode 100644 index 6f5bcdac9..000000000 --- a/nova/virt/hyperv/volumeutilsV2.py +++ /dev/null @@ -1,70 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2012 Pedro Navarro Perez -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Helper methods for operations related to the management of volumes, -and storage repositories for Windows 2012 -""" -import time - -from nova.openstack.common import cfg -from nova.openstack.common import log as logging -from nova.virt.hyperv import basevolumeutils - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - - -class VolumeUtilsV2(basevolumeutils.BaseVolumeUtils): - - def __init__(self, conn_storage, conn_wmi): - self._conn_storage = conn_storage - self._conn_wmi = conn_wmi - - def login_storage_target(self, target_lun, target_iqn, - target_portal): - """Add target portal, list targets and logins to the target.""" - separator = target_portal.find(':') - target_address = target_portal[:separator] - target_port = target_portal[separator + 1:] - #Adding target portal to iscsi initiator. Sending targets - portal = self._conn_storage.__getattr__("MSFT_iSCSITargetPortal") - portal.New(TargetPortalAddress=target_address, - TargetPortalPortNumber=target_port) - #Connecting to the target - target = self._conn_storage.__getattr__("MSFT_iSCSITarget") - target.Connect(NodeAddress=target_iqn, - IsPersistent=True) - #Waiting the disk to be mounted. Research this - time.sleep(CONF.hyperv_wait_between_attach_retry) - - def logout_storage_target(self, target_iqn): - """Logs out storage target through its session id.""" - - target = self._conn_storage.MSFT_iSCSITarget( - NodeAddress=target_iqn)[0] - if target.IsConnected: - session = self._conn_storage.MSFT_iSCSISession( - TargetNodeAddress=target_iqn)[0] - if session.IsPersistent: - session.Unregister() - target.Disconnect() - - def execute_log_out(self, session_id): - session = self._conn_wmi.MSiSCSIInitiator_SessionClass( - SessionId=session_id)[0] - self.logout_storage_target(session.TargetName) diff --git a/nova/virt/hyperv/volumeutilsv2.py b/nova/virt/hyperv/volumeutilsv2.py new file mode 100644 index 000000000..8322d31d3 --- /dev/null +++ b/nova/virt/hyperv/volumeutilsv2.py @@ -0,0 +1,75 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2012 Pedro Navarro Perez +# Copyright 2013 Cloudbase Solutions Srl +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Helper methods for operations related to the management of volumes +and storage repositories on Windows Server 2012 and above +""" +import sys +import time + +if sys.platform == 'win32': + import wmi + +from nova.openstack.common import cfg +from nova.openstack.common import log as logging +from nova.virt.hyperv import basevolumeutils + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF + + +class VolumeUtilsV2(basevolumeutils.BaseVolumeUtils): + def __init__(self): + super(VolumeUtilsV2, self).__init__() + + storage_namespace = '//./root/microsoft/windows/storage' + if sys.platform == 'win32': + self._conn_storage = wmi.WMI(moniker=storage_namespace) + + def login_storage_target(self, target_lun, target_iqn, target_portal): + """Add target portal, list targets and logins to the target.""" + separator = target_portal.find(':') + target_address = target_portal[:separator] + target_port = target_portal[separator + 1:] + #Adding target portal to iscsi initiator. Sending targets + portal = self._conn_storage.MSFT_iSCSITargetPortal + portal.New(TargetPortalAddress=target_address, + TargetPortalPortNumber=target_port) + #Connecting to the target + target = self._conn_storage.MSFT_iSCSITarget + target.Connect(NodeAddress=target_iqn, + IsPersistent=True) + #Waiting the disk to be mounted. Research this + time.sleep(CONF.hyperv_wait_between_attach_retry) + + def logout_storage_target(self, target_iqn): + """Logs out storage target through its session id.""" + + target = self._conn_storage.MSFT_iSCSITarget(NodeAddress=target_iqn)[0] + if target.IsConnected: + session = self._conn_storage.MSFT_iSCSISession( + TargetNodeAddress=target_iqn)[0] + if session.IsPersistent: + session.Unregister() + target.Disconnect() + + def execute_log_out(self, session_id): + session = self._conn_wmi.MSiSCSIInitiator_SessionClass( + SessionId=session_id)[0] + self.logout_storage_target(session.TargetName) -- cgit