summaryrefslogtreecommitdiffstats
path: root/nova/virt
diff options
context:
space:
mode:
authorAlessandro Pilotti <ap@pilotti.it>2012-08-13 17:24:16 +0300
committerAlessandro Pilotti <ap@pilotti.it>2012-08-16 03:38:51 +0300
commitbae2632658bce8d09a8b5f777e8a3d1b6c960467 (patch)
tree80be7baef251d9eb491b1302c3e52a771886014c /nova/virt
parent943bbcb6de5b1836ab7b9ef88adf525212c9ffa6 (diff)
Adds Hyper-V support in nova-compute (with new network_info model), including unit tests
blueprint hyper-v-revival Features included in ths commit: Spawn (including CoW image option support) Destroy Info List Pause Unpause Suspend Resume Reboot Power On Power Off Snapshot Volume Attach Volume Detach Boot from Volume Live Migration Supported platforms: Windows Server / Hyper-V Server 2008 R2 Windows Server / Hyper-V Server 2012 Unit tests: Unit tests for all the listed features are included. Tests can be execute on Linux as well. nova.conf relevant flags: Compute driver: compute_driver=nova.virt.hyperv.driver.HyperVDriver External vswitch to be used: vswitch_name=an_external_vswitch Path where the VHDs are going to be stored instances_path=C:\Hyper-V\instances Live migration support for hosts with etherogeneus CPUs limit_cpu_features=true Change-Id: Ic40adcd2d78b0ca6792d77940810f5a44de8cc37
Diffstat (limited to 'nova/virt')
-rw-r--r--nova/virt/hyperv/README.rst44
-rw-r--r--nova/virt/hyperv/__init__.py0
-rw-r--r--nova/virt/hyperv/baseops.py61
-rw-r--r--nova/virt/hyperv/constants.py54
-rw-r--r--nova/virt/hyperv/driver.py226
-rw-r--r--nova/virt/hyperv/ioutils.py26
-rw-r--r--nova/virt/hyperv/livemigrationops.py162
-rw-r--r--nova/virt/hyperv/snapshotops.py187
-rw-r--r--nova/virt/hyperv/vmops.py650
-rw-r--r--nova/virt/hyperv/vmutils.py146
-rw-r--r--nova/virt/hyperv/volumeops.py297
-rw-r--r--nova/virt/hyperv/volumeutils.py122
12 files changed, 1975 insertions, 0 deletions
diff --git a/nova/virt/hyperv/README.rst b/nova/virt/hyperv/README.rst
new file mode 100644
index 000000000..c0609f310
--- /dev/null
+++ b/nova/virt/hyperv/README.rst
@@ -0,0 +1,44 @@
+Hyper-V Volumes Management
+=============================================
+
+To enable the volume features, the first thing that needs to be done is to
+enable the iSCSI service on the Windows compute nodes and set it to start
+automatically.
+
+sc config msiscsi start= auto
+net start msiscsi
+
+In Windows Server 2012, it's important to execute the following commands to
+prevent having the volumes being online by default:
+
+diskpart
+san policy=OfflineAll
+exit
+
+How to check if your iSCSI configuration is working properly:
+
+On your OpenStack controller:
+
+1. Create a volume with e.g. "nova volume-create 1" and note the generated
+volume id
+
+On Windows:
+
+2. iscsicli QAddTargetPortal <your_iSCSI_target>
+3. iscsicli ListTargets
+
+The output should contain the iqn related to your volume:
+iqn.2010-10.org.openstack:volume-<volume_id>
+
+How to test Boot from volume in Hyper-V from the OpenStack dashboard:
+
+1. Fist of all create a volume
+2. Get the volume ID of the created volume
+3. Upload and untar to the Cloud controller the next VHD image:
+http://dev.opennebula.org/attachments/download/482/ttylinux.vhd.gz
+4. sudo dd if=/path/to/vhdfileofstep3
+of=/dev/nova-volumes/volume-XXXXX <- Related to the ID of step 2
+5. Launch an instance from any image (this is not important because we are
+just booting from a volume) from the dashboard, and don't forget to select
+boot from volume and select the volume created in step2. Important: Device
+name must be "vda".
diff --git a/nova/virt/hyperv/__init__.py b/nova/virt/hyperv/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/nova/virt/hyperv/__init__.py
diff --git a/nova/virt/hyperv/baseops.py b/nova/virt/hyperv/baseops.py
new file mode 100644
index 000000000..3d941a854
--- /dev/null
+++ b/nova/virt/hyperv/baseops.py
@@ -0,0 +1,61 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Management base class for Hyper-V operations.
+"""
+import sys
+
+from nova.openstack.common import log as logging
+
+# Check needed for unit testing on Unix
+if sys.platform == 'win32':
+ import wmi
+
+LOG = logging.getLogger(__name__)
+
+
+class BaseOps(object):
+ def __init__(self):
+ self.__conn = None
+ self.__conn_v2 = None
+ self.__conn_cimv2 = None
+ self.__conn_wmi = None
+
+ @property
+ def _conn(self):
+ if self.__conn is None:
+ self.__conn = wmi.WMI(moniker='//./root/virtualization')
+ return self.__conn
+
+ @property
+ def _conn_v2(self):
+ if self.__conn_v2 is None:
+ self.__conn_v2 = wmi.WMI(moniker='//./root/virtualization/v2')
+ return self.__conn_v2
+
+ @property
+ def _conn_cimv2(self):
+ if self.__conn_cimv2 is None:
+ self.__conn_cimv2 = wmi.WMI(moniker='//./root/cimv2')
+ return self.__conn_cimv2
+
+ @property
+ def _conn_wmi(self):
+ if self.__conn_wmi is None:
+ self.__conn_wmi = wmi.WMI(moniker='//./root/wmi')
+ return self.__conn_wmi
diff --git a/nova/virt/hyperv/constants.py b/nova/virt/hyperv/constants.py
new file mode 100644
index 000000000..392dcfa13
--- /dev/null
+++ b/nova/virt/hyperv/constants.py
@@ -0,0 +1,54 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Constants used in ops classes
+"""
+
+from nova.compute import power_state
+
+HYPERV_VM_STATE_ENABLED = 2
+HYPERV_VM_STATE_DISABLED = 3
+HYPERV_VM_STATE_REBOOT = 10
+HYPERV_VM_STATE_RESET = 11
+HYPERV_VM_STATE_PAUSED = 32768
+HYPERV_VM_STATE_SUSPENDED = 32769
+
+HYPERV_POWER_STATE = {
+ HYPERV_VM_STATE_DISABLED: power_state.SHUTDOWN,
+ HYPERV_VM_STATE_ENABLED: power_state.RUNNING,
+ HYPERV_VM_STATE_PAUSED: power_state.PAUSED,
+ HYPERV_VM_STATE_SUSPENDED: power_state.SUSPENDED
+}
+
+REQ_POWER_STATE = {
+ 'Enabled': HYPERV_VM_STATE_ENABLED,
+ 'Disabled': HYPERV_VM_STATE_DISABLED,
+ 'Reboot': HYPERV_VM_STATE_REBOOT,
+ 'Reset': HYPERV_VM_STATE_RESET,
+ 'Paused': HYPERV_VM_STATE_PAUSED,
+ 'Suspended': HYPERV_VM_STATE_SUSPENDED,
+}
+
+WMI_JOB_STATUS_STARTED = 4096
+WMI_JOB_STATE_RUNNING = 4
+WMI_JOB_STATE_COMPLETED = 7
+
+VM_SUMMARY_NUM_PROCS = 4
+VM_SUMMARY_ENABLED_STATE = 100
+VM_SUMMARY_MEMORY_USAGE = 103
+VM_SUMMARY_UPTIME = 105
diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py
new file mode 100644
index 000000000..0a29c9426
--- /dev/null
+++ b/nova/virt/hyperv/driver.py
@@ -0,0 +1,226 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Cloud.com, Inc
+# Copyright (c) 2012 Cloudbase Solutions Srl
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+A connection to Hyper-V .
+Uses Windows Management Instrumentation (WMI) calls to interact with Hyper-V
+Hyper-V WMI usage:
+ http://msdn.microsoft.com/en-us/library/cc723875%28v=VS.85%29.aspx
+The Hyper-V object model briefly:
+ The physical computer and its hosted virtual machines are each represented
+ by the Msvm_ComputerSystem class.
+
+ Each virtual machine is associated with a
+ Msvm_VirtualSystemGlobalSettingData (vs_gs_data) instance and one or more
+ Msvm_VirtualSystemSettingData (vmsetting) instances. For each vmsetting
+ there is a series of Msvm_ResourceAllocationSettingData (rasd) objects.
+ The rasd objects describe the settings for each device in a VM.
+ Together, the vs_gs_data, vmsettings and rasds describe the configuration
+ of the virtual machine.
+
+ Creating new resources such as disks and nics involves cloning a default
+ rasd object and appropriately modifying the clone and calling the
+ AddVirtualSystemResources WMI method
+ Changing resources such as memory uses the ModifyVirtualSystemResources
+ WMI method
+
+Using the Python WMI library:
+ Tutorial:
+ http://timgolden.me.uk/python/wmi/tutorial.html
+ Hyper-V WMI objects can be retrieved simply by using the class name
+ of the WMI object and optionally specifying a column to filter the
+ result set. More complex filters can be formed using WQL (sql-like)
+ queries.
+ The parameters and return tuples of WMI method calls can gleaned by
+ examining the doc string. For example:
+ >>> vs_man_svc.ModifyVirtualSystemResources.__doc__
+ ModifyVirtualSystemResources (ComputerSystem, ResourceSettingData[])
+ => (Job, ReturnValue)'
+ When passing setting data (ResourceSettingData) to the WMI method,
+ an XML representation of the data is passed in using GetText_(1).
+ Available methods on a service can be determined using method.keys():
+ >>> vs_man_svc.methods.keys()
+ vmsettings and rasds for a vm can be retrieved using the 'associators'
+ method with the appropriate return class.
+ Long running WMI commands generally return a Job (an instance of
+ Msvm_ConcreteJob) whose state can be polled to determine when it finishes
+
+"""
+
+from nova.openstack.common import log as logging
+from nova.virt import driver
+from nova.virt.hyperv import livemigrationops
+from nova.virt.hyperv import snapshotops
+from nova.virt.hyperv import vmops
+from nova.virt.hyperv import volumeops
+
+LOG = logging.getLogger(__name__)
+
+
+class HyperVDriver(driver.ComputeDriver):
+ def __init__(self):
+ super(HyperVDriver, self).__init__()
+
+ self._volumeops = volumeops.VolumeOps()
+ self._vmops = vmops.VMOps(self._volumeops)
+ self._snapshotops = snapshotops.SnapshotOps()
+ self._livemigrationops = livemigrationops.LiveMigrationOps(
+ self._volumeops)
+
+ def init_host(self, host):
+ self._host = host
+
+ def list_instances(self):
+ return self._vmops.list_instances()
+
+ def list_instances_detail(self):
+ return self._vmops.list_instances_detail()
+
+ def spawn(self, context, instance, image_meta, network_info,
+ block_device_info=None):
+ self._vmops.spawn(context, instance, image_meta, network_info,
+ block_device_info)
+
+ def reboot(self, instance, network_info, reboot_type):
+ self._vmops.reboot(instance, network_info, reboot_type)
+
+ def destroy(self, instance, network_info=None, cleanup=True):
+ self._vmops.destroy(instance, network_info, cleanup)
+
+ def get_info(self, instance):
+ return self._vmops.get_info(instance)
+
+ def attach_volume(self, connection_info, instance_name, mountpoint):
+ """Attach volume storage to VM instance"""
+ return self._volumeops.attach_volume(connection_info,
+ instance_name,
+ mountpoint)
+
+ def detach_volume(self, connection_info, instance_name, mountpoint):
+ """Detach volume storage to VM instance"""
+ return self._volumeops.detach_volume(connection_info,
+ instance_name,
+ mountpoint)
+
+ def get_volume_connector(self, instance):
+ return self._volumeops.get_volume_connector(instance)
+
+ def poll_rescued_instances(self, timeout):
+ pass
+
+ def update_available_resource(self, context, host):
+ self._vmops.update_available_resource(context, host)
+
+ def update_host_status(self):
+ """See xenapi_conn.py implementation."""
+ pass
+
+ def get_host_stats(self, refresh=False):
+ """See xenapi_conn.py implementation."""
+ return {}
+
+ def host_power_action(self, host, action):
+ """Reboots, shuts down or powers up the host."""
+ pass
+
+ def set_host_enabled(self, host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ pass
+
+ def snapshot(self, context, instance, name):
+ self._snapshotops.snapshot(context, instance, name)
+
+ def pause(self, instance):
+ self._vmops.pause(instance)
+
+ def unpause(self, instance):
+ self._vmops.unpause(instance)
+
+ def suspend(self, instance):
+ self._vmops.suspend(instance)
+
+ def resume(self, instance):
+ self._vmops.resume(instance)
+
+ def power_off(self, instance):
+ self._vmops.power_off(instance)
+
+ def power_on(self, instance):
+ self._vmops.power_on(instance)
+
+ def live_migration(self, context, instance_ref, dest, post_method,
+ recover_method, block_migration=False):
+ self._livemigrationops.live_migration(context, instance_ref, dest,
+ post_method, recover_method, block_migration)
+
+ def compare_cpu(self, cpu_info):
+ return self._livemigrationops.compare_cpu(cpu_info)
+
+ def pre_live_migration(self, context, instance, block_device_info,
+ network_info):
+ self._livemigrationops.pre_live_migration(context, instance,
+ block_device_info, network_info)
+
+ def post_live_migration_at_destination(self, ctxt, instance_ref,
+ network_info, block_migration):
+ self._livemigrationops.post_live_migration_at_destination(ctxt,
+ instance_ref, network_info, block_migration)
+
+ def check_can_live_migrate_destination(self, ctxt, instance,
+ block_migration, disk_over_commit):
+ pass
+
+ def check_can_live_migrate_destination_cleanup(self, ctxt,
+ dest_check_data):
+ pass
+
+ def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):
+ pass
+
+ def plug_vifs(self, instance, network_info):
+ LOG.debug(_("plug_vifs called"), instance=instance)
+
+ def unplug_vifs(self, instance, network_info):
+ LOG.debug(_("plug_vifs called"), instance=instance)
+
+ def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
+ LOG.debug(_("ensure_filtering_rules_for_instance called"),
+ instance=instance_ref)
+
+ def unfilter_instance(self, instance, network_info):
+ """Stop filtering instance"""
+ LOG.debug(_("unfilter_instance called"), instance=instance)
+
+ def confirm_migration(self, migration, instance, network_info):
+ """Confirms a resize, destroying the source VM"""
+ LOG.debug(_("confirm_migration called"), instance=instance)
+
+ def finish_revert_migration(self, instance, network_info):
+ """Finish reverting a resize, powering back on the instance"""
+ LOG.debug(_("finish_revert_migration called"), instance=instance)
+
+ def finish_migration(self, context, migration, instance, disk_info,
+ network_info, image_meta, resize_instance=False):
+ """Completes a resize, turning on the migrated instance"""
+ LOG.debug(_("finish_migration called"), instance=instance)
+
+ def get_console_output(self, instance):
+ LOG.debug(_("get_console_output called"), instance=instance)
+ return ''
+
+ def legacy_nwinfo(self):
+ return False
diff --git a/nova/virt/hyperv/ioutils.py b/nova/virt/hyperv/ioutils.py
new file mode 100644
index 000000000..d927e317f
--- /dev/null
+++ b/nova/virt/hyperv/ioutils.py
@@ -0,0 +1,26 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Utility class to ease the task of creating stubs of built in IO functions.
+"""
+
+import __builtin__
+
+
+def open(name, mode):
+ return __builtin__.open(name, mode)
diff --git a/nova/virt/hyperv/livemigrationops.py b/nova/virt/hyperv/livemigrationops.py
new file mode 100644
index 000000000..1f97adf24
--- /dev/null
+++ b/nova/virt/hyperv/livemigrationops.py
@@ -0,0 +1,162 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Management class for live migration VM operations.
+"""
+import os
+import sys
+
+from nova import exception
+from nova import flags
+from nova.openstack.common import excutils
+from nova.openstack.common import log as logging
+from nova.virt.hyperv import baseops
+from nova.virt.hyperv import constants
+from nova.virt.hyperv import vmutils
+
+# Check needed for unit testing on Unix
+if sys.platform == 'win32':
+ import wmi
+
+LOG = logging.getLogger(__name__)
+FLAGS = flags.FLAGS
+
+
+class LiveMigrationOps(baseops.BaseOps):
+ def __init__(self, volumeops):
+ super(LiveMigrationOps, self).__init__()
+
+ self._vmutils = vmutils.VMUtils()
+ self._volumeops = volumeops
+
+ def _check_live_migration_config(self):
+ try:
+ self._conn_v2
+ except Exception:
+ raise vmutils.HyperVException(
+ _('Live migration is not supported " \
+ "by this version of Hyper-V'))
+
+ migration_svc = self._conn_v2.Msvm_VirtualSystemMigrationService()[0]
+ vsmssd = migration_svc.associators(
+ wmi_association_class='Msvm_ElementSettingData',
+ wmi_result_class='Msvm_VirtualSystemMigrationServiceSettingData')[0]
+ if not vsmssd.EnableVirtualSystemMigration:
+ raise vmutils.HyperVException(
+ _('Live migration is not enabled on this host'))
+ if not migration_svc.MigrationServiceListenerIPAddressList:
+ raise vmutils.HyperVException(
+ _('Live migration networks are not configured on this host'))
+
+ def live_migration(self, context, instance_ref, dest, post_method,
+ recover_method, block_migration=False):
+ LOG.debug(_("live_migration called"), instance=instance_ref)
+ instance_name = instance_ref["name"]
+
+ try:
+ self._check_live_migration_config()
+
+ vm_name = self._vmutils.lookup(self._conn, instance_name)
+ if vm_name is None:
+ raise exception.InstanceNotFound(instance=instance_name)
+ vm = self._conn_v2.Msvm_ComputerSystem(
+ ElementName=instance_name)[0]
+ vm_settings = vm.associators(
+ wmi_association_class='Msvm_SettingsDefineState',
+ wmi_result_class='Msvm_VirtualSystemSettingData')[0]
+
+ new_resource_setting_data = []
+ sasds = vm_settings.associators(
+ wmi_association_class='Msvm_VirtualSystemSettingDataComponent',
+ wmi_result_class='Msvm_StorageAllocationSettingData')
+ for sasd in sasds:
+ if sasd.ResourceType == 31 and \
+ sasd.ResourceSubType == \
+ "Microsoft:Hyper-V:Virtual Hard Disk":
+ #sasd.PoolId = ""
+ new_resource_setting_data.append(sasd.GetText_(1))
+
+ LOG.debug(_("Getting live migration networks for remote "
+ "host: %s"), dest)
+ _conn_v2_remote = wmi.WMI(
+ moniker='//' + dest + '/root/virtualization/v2')
+ migration_svc_remote = \
+ _conn_v2_remote.Msvm_VirtualSystemMigrationService()[0]
+ remote_ip_address_list = \
+ migration_svc_remote.MigrationServiceListenerIPAddressList
+
+ # VirtualSystemAndStorage
+ vsmsd = self._conn_v2.query("select * from "
+ "Msvm_VirtualSystemMigrationSettingData "
+ "where MigrationType = 32771")[0]
+ vsmsd.DestinationIPAddressList = remote_ip_address_list
+ migration_setting_data = vsmsd.GetText_(1)
+
+ migration_svc =\
+ self._conn_v2.Msvm_VirtualSystemMigrationService()[0]
+
+ LOG.debug(_("Starting live migration for instance: %s"),
+ instance_name)
+ (job_path, ret_val) = migration_svc.MigrateVirtualSystemToHost(
+ ComputerSystem=vm.path_(),
+ DestinationHost=dest,
+ MigrationSettingData=migration_setting_data,
+ NewResourceSettingData=new_resource_setting_data)
+ if ret_val == constants.WMI_JOB_STATUS_STARTED:
+ success = self._vmutils.check_job_status(job_path)
+ else:
+ success = (ret_val == 0)
+ if not success:
+ raise vmutils.HyperVException(
+ _('Failed to live migrate VM %s') % instance_name)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.debug(_("Calling live migration recover_method "
+ "for instance: %s"), instance_name)
+ recover_method(context, instance_ref, dest, block_migration)
+
+ LOG.debug(_("Calling live migration post_method for instance: %s"),
+ instance_name)
+ post_method(context, instance_ref, dest, block_migration)
+
+ def pre_live_migration(self, context, instance, block_device_info,
+ network_info):
+ LOG.debug(_("pre_live_migration called"), instance=instance)
+ self._check_live_migration_config()
+
+ if FLAGS.use_cow_images:
+ ebs_root = self._volumeops.volume_in_mapping(
+ self._volumeops.get_default_root_device(),
+ block_device_info)
+ if not ebs_root:
+ base_vhd_path = self._vmutils.get_base_vhd_path(
+ instance["image_ref"])
+ if not os.path.exists(base_vhd_path):
+ self._vmutils.fetch_image(base_vhd_path, context,
+ instance["image_ref"],
+ instance["user_id"],
+ instance["project_id"])
+
+ def post_live_migration_at_destination(self, ctxt, instance_ref,
+ network_info, block_migration):
+ LOG.debug(_("post_live_migration_at_destination called"),
+ instance=instance_ref)
+
+ def compare_cpu(self, cpu_info):
+ LOG.debug(_("compare_cpu called %s"), cpu_info)
+ return True
diff --git a/nova/virt/hyperv/snapshotops.py b/nova/virt/hyperv/snapshotops.py
new file mode 100644
index 000000000..5e4676a4a
--- /dev/null
+++ b/nova/virt/hyperv/snapshotops.py
@@ -0,0 +1,187 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Management class for VM snapshot operations.
+"""
+import os
+import shutil
+import sys
+
+from nova import exception
+from nova import flags
+from nova.image import glance
+from nova.openstack.common import log as logging
+from nova.virt.hyperv import baseops
+from nova.virt.hyperv import constants
+from nova.virt.hyperv import ioutils
+from nova.virt.hyperv import vmutils
+from xml.etree import ElementTree
+
+# Check needed for unit testing on Unix
+if sys.platform == 'win32':
+ import wmi
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger(__name__)
+
+
+class SnapshotOps(baseops.BaseOps):
+ def __init__(self):
+ super(SnapshotOps, self).__init__()
+ self._vmutils = vmutils.VMUtils()
+
+ def snapshot(self, context, instance, name):
+ """Create snapshot from a running VM instance."""
+ instance_name = instance["name"]
+ vm = self._vmutils.lookup(self._conn, instance_name)
+ if vm is None:
+ raise exception.InstanceNotFound(instance=instance_name)
+ vm = self._conn.Msvm_ComputerSystem(ElementName=instance_name)[0]
+ vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
+
+ LOG.debug(_("Creating snapshot for instance %s"), instance_name)
+ (job_path, ret_val, snap_setting_data) = \
+ vs_man_svc.CreateVirtualSystemSnapshot(vm.path_())
+ if ret_val == constants.WMI_JOB_STATUS_STARTED:
+ success = self._vmutils.check_job_status(job_path)
+ if success:
+ job_wmi_path = job_path.replace('\\', '/')
+ job = wmi.WMI(moniker=job_wmi_path)
+ snap_setting_data = job.associators(
+ wmi_result_class='Msvm_VirtualSystemSettingData')[0]
+ else:
+ success = (ret_val == 0)
+ if not success:
+ raise vmutils.HyperVException(
+ _('Failed to create snapshot for VM %s') %
+ instance_name)
+
+ export_folder = None
+ f = None
+
+ try:
+ src_vhd_path = os.path.join(FLAGS.instances_path, instance_name,
+ instance_name + ".vhd")
+
+ image_man_svc = self._conn.Msvm_ImageManagementService()[0]
+
+ LOG.debug(_("Getting info for VHD %s"), src_vhd_path)
+ (src_vhd_info, job_path, ret_val) = \
+ image_man_svc.GetVirtualHardDiskInfo(src_vhd_path)
+ if ret_val == constants.WMI_JOB_STATUS_STARTED:
+ success = self._vmutils.check_job_status(job_path)
+ else:
+ success = (ret_val == 0)
+ if not success:
+ raise vmutils.HyperVException(
+ _("Failed to get info for disk %s") %
+ (src_vhd_path))
+
+ src_base_disk_path = None
+ et = ElementTree.fromstring(src_vhd_info)
+ for item in et.findall("PROPERTY"):
+ if item.attrib["NAME"] == "ParentPath":
+ src_base_disk_path = item.find("VALUE").text
+ break
+
+ export_folder = self._vmutils.make_export_path(instance_name)
+
+ dest_vhd_path = os.path.join(export_folder, os.path.basename(
+ src_vhd_path))
+ LOG.debug(_('Copying VHD %(src_vhd_path)s to %(dest_vhd_path)s'),
+ locals())
+ shutil.copyfile(src_vhd_path, dest_vhd_path)
+
+ image_vhd_path = None
+ if not src_base_disk_path:
+ image_vhd_path = dest_vhd_path
+ else:
+ dest_base_disk_path = os.path.join(export_folder,
+ os.path.basename(src_base_disk_path))
+ LOG.debug(_('Copying base disk %(src_vhd_path)s to '
+ '%(dest_base_disk_path)s'), locals())
+ shutil.copyfile(src_base_disk_path, dest_base_disk_path)
+
+ LOG.debug(_("Reconnecting copied base VHD "
+ "%(dest_base_disk_path)s and diff VHD %(dest_vhd_path)s"),
+ locals())
+ (job_path, ret_val) = \
+ image_man_svc.ReconnectParentVirtualHardDisk(
+ ChildPath=dest_vhd_path,
+ ParentPath=dest_base_disk_path,
+ Force=True)
+ if ret_val == constants.WMI_JOB_STATUS_STARTED:
+ success = self._vmutils.check_job_status(job_path)
+ else:
+ success = (ret_val == 0)
+ if not success:
+ raise vmutils.HyperVException(
+ _("Failed to reconnect base disk "
+ "%(dest_base_disk_path)s and diff disk "
+ "%(dest_vhd_path)s") %
+ locals())
+
+ LOG.debug(_("Merging base disk %(dest_base_disk_path)s and "
+ "diff disk %(dest_vhd_path)s"),
+ locals())
+ (job_path, ret_val) = image_man_svc.MergeVirtualHardDisk(
+ SourcePath=dest_vhd_path,
+ DestinationPath=dest_base_disk_path)
+ if ret_val == constants.WMI_JOB_STATUS_STARTED:
+ success = self._vmutils.check_job_status(job_path)
+ else:
+ success = (ret_val == 0)
+ if not success:
+ raise vmutils.HyperVException(
+ _("Failed to merge base disk %(dest_base_disk_path)s "
+ "and diff disk %(dest_vhd_path)s") %
+ locals())
+ image_vhd_path = dest_base_disk_path
+
+ (glance_image_service, image_id) = \
+ glance.get_remote_image_service(context, name)
+ image_metadata = {"is_public": False,
+ "disk_format": "vhd",
+ "container_format": "bare",
+ "properties": {}}
+ f = ioutils.open(image_vhd_path, 'rb')
+ LOG.debug(
+ _("Updating Glance image %(image_id)s with content from "
+ "merged disk %(image_vhd_path)s"),
+ locals())
+ glance_image_service.update(context, image_id, image_metadata, f)
+
+ LOG.debug(_("Snapshot image %(image_id)s updated for VM "
+ "%(instance_name)s"), locals())
+ finally:
+ LOG.debug(_("Removing snapshot %s"), name)
+ (job_path, ret_val) = vs_man_svc.RemoveVirtualSystemSnapshot(
+ snap_setting_data.path_())
+ if ret_val == constants.WMI_JOB_STATUS_STARTED:
+ success = self._vmutils.check_job_status(job_path)
+ else:
+ success = (ret_val == 0)
+ if not success:
+ raise vmutils.HyperVException(
+ _('Failed to remove snapshot for VM %s') %
+ instance_name)
+ if f:
+ f.close()
+ if export_folder:
+ LOG.debug(_('Removing folder %s '), export_folder)
+ shutil.rmtree(export_folder)
diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py
new file mode 100644
index 000000000..94cb7477e
--- /dev/null
+++ b/nova/virt/hyperv/vmops.py
@@ -0,0 +1,650 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Management class for basic VM operations.
+"""
+import multiprocessing
+import os
+import uuid
+
+from nova import db
+from nova import exception
+from nova import flags
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova import utils
+from nova.virt import driver
+from nova.virt.hyperv import baseops
+from nova.virt.hyperv import constants
+from nova.virt.hyperv import vmutils
+
+LOG = logging.getLogger(__name__)
+
+hyperv_opts = [
+ cfg.StrOpt('vswitch_name',
+ default=None,
+ help='Default vSwitch Name, '
+ 'if none provided first external is used'),
+ cfg.BoolOpt('limit_cpu_features',
+ default=False,
+ help='required for live migration among '
+ 'hosts with different CPU features')
+ ]
+
+FLAGS = flags.FLAGS
+FLAGS.register_opts(hyperv_opts)
+
+
+class VMOps(baseops.BaseOps):
+ def __init__(self, volumeops):
+ super(VMOps, self).__init__()
+
+ self._vmutils = vmutils.VMUtils()
+ self._volumeops = volumeops
+
+ def list_instances(self):
+ """ Return the names of all the instances known to Hyper-V. """
+ vms = [v.ElementName
+ for v in self._conn.Msvm_ComputerSystem(['ElementName'],
+ Caption="Virtual Machine")]
+ return vms
+
+ def list_instances_detail(self):
+ instance_infos = []
+ for instance_name in self.list_instances():
+ info = self._get_info(instance_name)
+ instance_info = driver.InstanceInfo(
+ instance_name, int(info['state']))
+ instance_infos.append(instance_info)
+ return instance_infos
+
+ def get_info(self, instance):
+ """Get information about the VM"""
+ LOG.debug(_("get_info called for instance"), instance=instance)
+ instance_name = instance["name"]
+ return self._get_info(instance_name)
+
+ def _get_info(self, instance_name):
+ vm = self._vmutils.lookup(self._conn, instance_name)
+ if vm is None:
+ raise exception.InstanceNotFound(instance=instance_name)
+ vm = self._conn.Msvm_ComputerSystem(
+ ElementName=instance_name)[0]
+ vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
+ vmsettings = vm.associators(
+ wmi_association_class='Msvm_SettingsDefineState',
+ wmi_result_class='Msvm_VirtualSystemSettingData')
+ settings_paths = [v.path_() for v in vmsettings]
+ #See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx
+ summary_info = vs_man_svc.GetSummaryInformation(
+ [constants.VM_SUMMARY_NUM_PROCS,
+ constants.VM_SUMMARY_ENABLED_STATE,
+ constants.VM_SUMMARY_MEMORY_USAGE,
+ constants.VM_SUMMARY_UPTIME],
+ settings_paths)[1]
+ info = summary_info[0]
+
+ LOG.debug(_("hyperv vm state: %s"), info.EnabledState)
+ state = str(constants.HYPERV_POWER_STATE[info.EnabledState])
+ memusage = str(info.MemoryUsage)
+ numprocs = str(info.NumberOfProcessors)
+ uptime = str(info.UpTime)
+
+ LOG.debug(_("Got Info for vm %(instance_name)s: state=%(state)s,"
+ " mem=%(memusage)s, num_cpu=%(numprocs)s,"
+ " uptime=%(uptime)s"), locals())
+
+ return {'state': state,
+ 'max_mem': info.MemoryUsage,
+ 'mem': info.MemoryUsage,
+ 'num_cpu': info.NumberOfProcessors,
+ 'cpu_time': info.UpTime}
+
+ def spawn(self, context, instance, image_meta, network_info,
+ block_device_info=None):
+ """ Create a new VM and start it."""
+ instance_name = instance["name"]
+ vm = self._vmutils.lookup(self._conn, instance_name)
+ if vm is not None:
+ raise exception.InstanceExists(name=instance_name)
+
+ ebs_root = self._volumeops.volume_in_mapping(
+ self._volumeops.get_default_root_device(),
+ block_device_info)
+
+ #If is not a boot from volume spawn
+ if not (ebs_root):
+ #Fetch the file, assume it is a VHD file.
+ vhdfile = self._vmutils.get_vhd_path(instance_name)
+ try:
+ self._cache_image(fn=self._vmutils.fetch_image,
+ context=context,
+ target=vhdfile,
+ fname=instance['image_ref'],
+ image_id=instance['image_ref'],
+ user=instance['user_id'],
+ project=instance['project_id'],
+ cow=FLAGS.use_cow_images)
+ except Exception as exn:
+ LOG.exception(_('cache image failed: %s'), exn)
+ self.destroy(instance)
+
+ try:
+ self._create_vm(instance)
+
+ if not ebs_root:
+ self._create_disk(instance['name'], vhdfile)
+ else:
+ self._volumeops.attach_boot_volume(block_device_info,
+ instance_name)
+
+ #A SCSI controller for volumes connection is created
+ self._create_scsi_controller(instance['name'])
+
+ for vif in network_info:
+ mac_address = vif['address'].replace(':', '')
+ self._create_nic(instance['name'], mac_address)
+
+ LOG.debug(_('Starting VM %s '), instance_name)
+ self._set_vm_state(instance['name'], 'Enabled')
+ LOG.info(_('Started VM %s '), instance_name)
+ except Exception as exn:
+ LOG.exception(_('spawn vm failed: %s'), exn)
+ self.destroy(instance)
+ raise exn
+
+ def _create_vm(self, instance):
+ """Create a VM but don't start it. """
+ instance_name = instance["name"]
+ vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
+
+ vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new()
+ vs_gs_data.ElementName = instance_name
+ (job, ret_val) = vs_man_svc.DefineVirtualSystem(
+ [], None, vs_gs_data.GetText_(1))[1:]
+ if ret_val == constants.WMI_JOB_STATUS_STARTED:
+ success = self._vmutils.check_job_status(job)
+ else:
+ success = (ret_val == 0)
+
+ if not success:
+ raise vmutils.HyperVException(_('Failed to create VM %s') %
+ instance_name)
+
+ LOG.debug(_('Created VM %s...'), instance_name)
+ vm = self._conn.Msvm_ComputerSystem(ElementName=instance_name)[0]
+
+ vmsettings = vm.associators(
+ wmi_result_class='Msvm_VirtualSystemSettingData')
+ vmsetting = [s for s in vmsettings
+ if s.SettingType == 3][0] # avoid snapshots
+ memsetting = vmsetting.associators(
+ wmi_result_class='Msvm_MemorySettingData')[0]
+ #No Dynamic Memory, so reservation, limit and quantity are identical.
+ mem = long(str(instance['memory_mb']))
+ memsetting.VirtualQuantity = mem
+ memsetting.Reservation = mem
+ memsetting.Limit = mem
+
+ (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
+ vm.path_(), [memsetting.GetText_(1)])
+ LOG.debug(_('Set memory for vm %s...'), instance_name)
+ procsetting = vmsetting.associators(
+ wmi_result_class='Msvm_ProcessorSettingData')[0]
+ vcpus = long(instance['vcpus'])
+ procsetting.VirtualQuantity = vcpus
+ procsetting.Reservation = vcpus
+ procsetting.Limit = 100000 # static assignment to 100%
+
+ if FLAGS.limit_cpu_features:
+ procsetting.LimitProcessorFeatures = True
+
+ (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
+ vm.path_(), [procsetting.GetText_(1)])
+ LOG.debug(_('Set vcpus for vm %s...'), instance_name)
+
+ def _create_scsi_controller(self, vm_name):
+ """ Create an iscsi controller ready to mount volumes """
+ LOG.debug(_('Creating a scsi controller for %(vm_name)s for volume '
+ 'attaching') % locals())
+ vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
+ vm = vms[0]
+ scsicontrldefault = self._conn.query(
+ "SELECT * FROM Msvm_ResourceAllocationSettingData \
+ WHERE ResourceSubType = 'Microsoft Synthetic SCSI Controller'\
+ AND InstanceID LIKE '%Default%'")[0]
+ if scsicontrldefault is None:
+ raise vmutils.HyperVException(_('Controller not found'))
+ scsicontrl = self._vmutils.clone_wmi_obj(self._conn,
+ 'Msvm_ResourceAllocationSettingData', scsicontrldefault)
+ scsicontrl.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
+ scsiresource = self._vmutils.add_virt_resource(self._conn,
+ scsicontrl, vm)
+ if scsiresource is None:
+ raise vmutils.HyperVException(
+ _('Failed to add scsi controller to VM %s') %
+ vm_name)
+
+ def _create_disk(self, vm_name, vhdfile):
+ """Create a disk and attach it to the vm"""
+ LOG.debug(_('Creating disk for %(vm_name)s by attaching'
+ ' disk file %(vhdfile)s') % locals())
+ #Find the IDE controller for the vm.
+ vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
+ vm = vms[0]
+ vmsettings = vm.associators(
+ wmi_result_class='Msvm_VirtualSystemSettingData')
+ rasds = vmsettings[0].associators(
+ wmi_result_class='MSVM_ResourceAllocationSettingData')
+ ctrller = [r for r in rasds
+ if r.ResourceSubType == 'Microsoft Emulated IDE Controller'
+ and r.Address == "0"]
+ #Find the default disk drive object for the vm and clone it.
+ diskdflt = self._conn.query(
+ "SELECT * FROM Msvm_ResourceAllocationSettingData \
+ WHERE ResourceSubType LIKE 'Microsoft Synthetic Disk Drive'\
+ AND InstanceID LIKE '%Default%'")[0]
+ diskdrive = self._vmutils.clone_wmi_obj(self._conn,
+ 'Msvm_ResourceAllocationSettingData', diskdflt)
+ #Set the IDE ctrller as parent.
+ diskdrive.Parent = ctrller[0].path_()
+ diskdrive.Address = 0
+ #Add the cloned disk drive object to the vm.
+ new_resources = self._vmutils.add_virt_resource(self._conn,
+ diskdrive, vm)
+ if new_resources is None:
+ raise vmutils.HyperVException(
+ _('Failed to add diskdrive to VM %s') %
+ vm_name)
+ diskdrive_path = new_resources[0]
+ LOG.debug(_('New disk drive path is %s'), diskdrive_path)
+ #Find the default VHD disk object.
+ vhddefault = self._conn.query(
+ "SELECT * FROM Msvm_ResourceAllocationSettingData \
+ WHERE ResourceSubType LIKE 'Microsoft Virtual Hard Disk' AND \
+ InstanceID LIKE '%Default%' ")[0]
+
+ #Clone the default and point it to the image file.
+ vhddisk = self._vmutils.clone_wmi_obj(self._conn,
+ 'Msvm_ResourceAllocationSettingData', vhddefault)
+ #Set the new drive as the parent.
+ vhddisk.Parent = diskdrive_path
+ vhddisk.Connection = [vhdfile]
+
+ #Add the new vhd object as a virtual hard disk to the vm.
+ new_resources = self._vmutils.add_virt_resource(self._conn,
+ vhddisk, vm)
+ if new_resources is None:
+ raise vmutils.HyperVException(
+ _('Failed to add vhd file to VM %s') %
+ vm_name)
+ LOG.info(_('Created disk for %s'), vm_name)
+
+ def _create_nic(self, vm_name, mac):
+ """Create a (synthetic) nic and attach it to the vm"""
+ LOG.debug(_('Creating nic for %s '), vm_name)
+ #Find the vswitch that is connected to the physical nic.
+ vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
+ extswitch = self._find_external_network()
+ if extswitch is None:
+ raise vmutils.HyperVException(_('Cannot find vSwitch'))
+
+ vm = vms[0]
+ switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0]
+ #Find the default nic and clone it to create a new nic for the vm.
+ #Use Msvm_SyntheticEthernetPortSettingData for Windows or Linux with
+ #Linux Integration Components installed.
+ syntheticnics_data = self._conn.Msvm_SyntheticEthernetPortSettingData()
+ default_nic_data = [n for n in syntheticnics_data
+ if n.InstanceID.rfind('Default') > 0]
+ new_nic_data = self._vmutils.clone_wmi_obj(self._conn,
+ 'Msvm_SyntheticEthernetPortSettingData',
+ default_nic_data[0])
+ #Create a port on the vswitch.
+ (new_port, ret_val) = switch_svc.CreateSwitchPort(
+ Name=str(uuid.uuid4()),
+ FriendlyName=vm_name,
+ ScopeOfResidence="",
+ VirtualSwitch=extswitch.path_())
+ if ret_val != 0:
+ LOG.error(_('Failed creating a port on the external vswitch'))
+ raise vmutils.HyperVException(_('Failed creating port for %s') %
+ vm_name)
+ ext_path = extswitch.path_()
+ LOG.debug(_("Created switch port %(vm_name)s on switch %(ext_path)s")
+ % locals())
+ #Connect the new nic to the new port.
+ new_nic_data.Connection = [new_port]
+ new_nic_data.ElementName = vm_name + ' nic'
+ new_nic_data.Address = mac
+ new_nic_data.StaticMacAddress = 'True'
+ new_nic_data.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
+ #Add the new nic to the vm.
+ new_resources = self._vmutils.add_virt_resource(self._conn,
+ new_nic_data, vm)
+ if new_resources is None:
+ raise vmutils.HyperVException(_('Failed to add nic to VM %s') %
+ vm_name)
+ LOG.info(_("Created nic for %s "), vm_name)
+
+ def _find_external_network(self):
+ """Find the vswitch that is connected to the physical nic.
+ Assumes only one physical nic on the host
+ """
+ #If there are no physical nics connected to networks, return.
+ LOG.debug(_("Attempting to bind NIC to %s ")
+ % FLAGS.vswitch_name)
+ if FLAGS.vswitch_name:
+ LOG.debug(_("Attempting to bind NIC to %s ")
+ % FLAGS.vswitch_name)
+ bound = self._conn.Msvm_VirtualSwitch(
+ ElementName=FLAGS.vswitch_name)
+ else:
+ LOG.debug(_("No vSwitch specified, attaching to default"))
+ self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')
+ if len(bound) == 0:
+ return None
+ if FLAGS.vswitch_name:
+ return self._conn.Msvm_VirtualSwitch(
+ ElementName=FLAGS.vswitch_name)[0]\
+ .associators(wmi_result_class='Msvm_SwitchPort')[0]\
+ .associators(wmi_result_class='Msvm_VirtualSwitch')[0]
+ else:
+ return self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')\
+ .associators(wmi_result_class='Msvm_SwitchPort')[0]\
+ .associators(wmi_result_class='Msvm_VirtualSwitch')[0]
+
+ def reboot(self, instance, network_info, reboot_type):
+ instance_name = instance["name"]
+ """Reboot the specified instance."""
+ vm = self._vmutils.lookup(self._conn, instance_name)
+ if vm is None:
+ raise exception.InstanceNotFound(instance_id=instance["id"])
+ self._set_vm_state(instance_name, 'Reboot')
+
+ def destroy(self, instance, network_info=None, cleanup=True):
+ """Destroy the VM. Also destroy the associated VHD disk files"""
+ instance_name = instance["name"]
+ LOG.debug(_("Got request to destroy vm %s"), instance_name)
+ vm = self._vmutils.lookup(self._conn, instance_name)
+ if vm is None:
+ return
+ vm = self._conn.Msvm_ComputerSystem(ElementName=instance_name)[0]
+ vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
+ #Stop the VM first.
+ self._set_vm_state(instance_name, 'Disabled')
+ vmsettings = vm.associators(
+ wmi_result_class='Msvm_VirtualSystemSettingData')
+ rasds = vmsettings[0].associators(
+ wmi_result_class='MSVM_ResourceAllocationSettingData')
+ disks = [r for r in rasds
+ if r.ResourceSubType == 'Microsoft Virtual Hard Disk']
+ disk_files = []
+ volumes = [r for r in rasds
+ if r.ResourceSubType == 'Microsoft Physical Disk Drive']
+ volumes_drives_list = []
+ #collect the volumes information before destroying the VM.
+ for volume in volumes:
+ hostResources = volume.HostResource
+ drive_path = hostResources[0]
+ #Appending the Msvm_Disk path
+ volumes_drives_list.append(drive_path)
+ #Collect disk file information before destroying the VM.
+ for disk in disks:
+ disk_files.extend([c for c in disk.Connection])
+ #Nuke the VM. Does not destroy disks.
+ (job, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_())
+ if ret_val == constants.WMI_JOB_STATUS_STARTED:
+ success = self._vmutils.check_job_status(job)
+ elif ret_val == 0:
+ success = True
+ if not success:
+ raise vmutils.HyperVException(_('Failed to destroy vm %s') %
+ instance_name)
+ #Disconnect volumes
+ for volume_drive in volumes_drives_list:
+ self._volumeops.disconnect_volume(volume_drive)
+ #Delete associated vhd disk files.
+ for disk in disk_files:
+ vhdfile = self._conn_cimv2.query(
+ "Select * from CIM_DataFile where Name = '" +
+ disk.replace("'", "''") + "'")[0]
+ LOG.debug(_("Del: disk %(vhdfile)s vm %(instance_name)s")
+ % locals())
+ vhdfile.Delete()
+
+ def pause(self, instance):
+ """Pause VM instance."""
+ LOG.debug(_("Pause instance"), instance=instance)
+ self._set_vm_state(instance["name"], 'Paused')
+
+ def unpause(self, instance):
+ """Unpause paused VM instance."""
+ LOG.debug(_("Unpause instance"), instance=instance)
+ self._set_vm_state(instance["name"], 'Enabled')
+
+ def suspend(self, instance):
+ """Suspend the specified instance."""
+ print instance
+ LOG.debug(_("Suspend instance"), instance=instance)
+ self._set_vm_state(instance["name"], 'Suspended')
+
+ def resume(self, instance):
+ """Resume the suspended VM instance."""
+ LOG.debug(_("Resume instance"), instance=instance)
+ self._set_vm_state(instance["name"], 'Enabled')
+
+ def power_off(self, instance):
+ """Power off the specified instance."""
+ LOG.debug(_("Power off instance"), instance=instance)
+ self._set_vm_state(instance["name"], 'Disabled')
+
+ def power_on(self, instance):
+ """Power on the specified instance"""
+ LOG.debug(_("Power on instance"), instance=instance)
+ self._set_vm_state(instance["name"], 'Enabled')
+
+ def _set_vm_state(self, vm_name, req_state):
+ """Set the desired state of the VM"""
+ vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
+ if len(vms) == 0:
+ return False
+ (job, ret_val) = vms[0].RequestStateChange(
+ constants.REQ_POWER_STATE[req_state])
+ success = False
+ if ret_val == constants.WMI_JOB_STATUS_STARTED:
+ success = self._vmutils.check_job_status(job)
+ elif ret_val == 0:
+ success = True
+ elif ret_val == 32775:
+ #Invalid state for current operation. Typically means it is
+ #already in the state requested
+ success = True
+ if success:
+ LOG.info(_("Successfully changed vm state of %(vm_name)s"
+ " to %(req_state)s") % locals())
+ else:
+ msg = _("Failed to change vm state of %(vm_name)s"
+ " to %(req_state)s") % locals()
+ LOG.error(msg)
+ raise vmutils.HyperVException(msg)
+
+ def _get_vcpu_total(self):
+ """Get vcpu number of physical computer.
+ :returns: the number of cpu core.
+ """
+ # On certain platforms, this will raise a NotImplementedError.
+ try:
+ return multiprocessing.cpu_count()
+ except NotImplementedError:
+ LOG.warn(_("Cannot get the number of cpu, because this "
+ "function is not implemented for this platform. "
+ "This error can be safely ignored for now."))
+ return 0
+
+ def _get_memory_mb_total(self):
+ """Get the total memory size(MB) of physical computer.
+ :returns: the total amount of memory(MB).
+ """
+ total_kb = self._conn_cimv2.query(
+ "SELECT TotalVisibleMemorySize FROM win32_operatingsystem")[0]\
+ .TotalVisibleMemorySize
+ total_mb = long(total_kb) / 1024
+ return total_mb
+
+ def _get_local_gb_total(self):
+ """Get the total hdd size(GB) of physical computer.
+ :returns:
+ The total amount of HDD(GB).
+ Note that this value shows a partition where
+ NOVA-INST-DIR/instances mounts.
+ """
+ #TODO(jordanrinke): This binds to C only right now,
+ #need to bind to instance dir
+ total_kb = self._conn_cimv2.query(
+ "SELECT Size FROM win32_logicaldisk WHERE DriveType=3")[0].Size
+ total_gb = long(total_kb) / (1024 ** 3)
+ return total_gb
+
+ def _get_vcpu_used(self):
+ """ Get vcpu usage number of physical computer.
+ :returns: The total number of vcpu that currently used.
+ """
+ #TODO(jordanrinke) figure out a way to count assigned VCPUs
+ total_vcpu = 0
+ return total_vcpu
+
+ def _get_memory_mb_used(self):
+ """Get the free memory size(MB) of physical computer.
+ :returns: the total usage of memory(MB).
+ """
+ total_kb = self._conn_cimv2.query(
+ "SELECT FreePhysicalMemory FROM win32_operatingsystem")[0]\
+ .FreePhysicalMemory
+ total_mb = long(total_kb) / 1024
+
+ return total_mb
+
+ def _get_local_gb_used(self):
+ """Get the free hdd size(GB) of physical computer.
+ :returns:
+ The total usage of HDD(GB).
+ Note that this value shows a partition where
+ NOVA-INST-DIR/instances mounts.
+ """
+ #TODO(jordanrinke): This binds to C only right now,
+ #need to bind to instance dir
+ total_kb = self._conn_cimv2.query(
+ "SELECT FreeSpace FROM win32_logicaldisk WHERE DriveType=3")[0]\
+ .FreeSpace
+ total_gb = long(total_kb) / (1024 ** 3)
+ return total_gb
+
+ def _get_hypervisor_version(self):
+ """Get hypervisor version.
+ :returns: hypervisor version (ex. 12003)
+ """
+ version = self._conn_cimv2.Win32_OperatingSystem()[0]\
+ .Version.replace('.', '')
+ LOG.info(_('Windows version: %s ') % version)
+ return version
+
+ def update_available_resource(self, context, host):
+ """Updates compute manager resource info on ComputeNode table.
+
+ This method is called as an periodic tasks and is used only
+ in live migration currently.
+
+ :param ctxt: security context
+ :param host: hostname that compute manager is currently running
+
+ """
+
+ try:
+ service_ref = db.service_get_all_compute_by_host(context, host)[0]
+ except exception.NotFound:
+ raise exception.ComputeServiceUnavailable(host=host)
+
+ # Updating host information
+ # TODO(alexpilotti) implemented cpu_info
+ dic = {'vcpus': self._get_vcpu_total(),
+ 'memory_mb': self._get_memory_mb_total(),
+ 'local_gb': self._get_local_gb_total(),
+ 'vcpus_used': self._get_vcpu_used(),
+ 'memory_mb_used': self._get_memory_mb_used(),
+ 'local_gb_used': self._get_local_gb_used(),
+ 'hypervisor_type': "hyperv",
+ 'hypervisor_version': self._get_hypervisor_version(),
+ 'cpu_info': "unknown",
+ 'service_id': service_ref['id'],
+ 'disk_available_least': 1}
+
+ compute_node_ref = service_ref['compute_node']
+ if not compute_node_ref:
+ LOG.info(_('Compute_service record created for %s ') % host)
+ db.compute_node_create(context, dic)
+ else:
+ LOG.info(_('Compute_service record updated for %s ') % host)
+ db.compute_node_update(context, compute_node_ref[0]['id'], dic)
+
+ def _cache_image(self, fn, target, fname, cow=False, Size=None,
+ *args, **kwargs):
+ """Wrapper for a method that creates an image that caches the image.
+
+ This wrapper will save the image into a common store and create a
+ copy for use by the hypervisor.
+
+ The underlying method should specify a kwarg of target representing
+ where the image will be saved.
+
+ fname is used as the filename of the base image. The filename needs
+ to be unique to a given image.
+
+ If cow is True, it will make a CoW image instead of a copy.
+ """
+ @utils.synchronized(fname)
+ def call_if_not_exists(path, fn, *args, **kwargs):
+ if not os.path.exists(path):
+ fn(target=path, *args, **kwargs)
+
+ if not os.path.exists(target):
+ LOG.debug(_("use_cow_image:%s"), cow)
+ if cow:
+ base = self._vmutils.get_base_vhd_path(fname)
+ call_if_not_exists(base, fn, *args, **kwargs)
+
+ image_service = self._conn.query(
+ "Select * from Msvm_ImageManagementService")[0]
+ (job, ret_val) = \
+ image_service.CreateDifferencingVirtualHardDisk(
+ Path=target, ParentPath=base)
+ LOG.debug(
+ "Creating difference disk: JobID=%s, Source=%s, Target=%s",
+ job, base, target)
+ if ret_val == constants.WMI_JOB_STATUS_STARTED:
+ success = self._vmutils.check_job_status(job)
+ else:
+ success = (ret_val == 0)
+
+ if not success:
+ raise vmutils.HyperVException(
+ _('Failed to create Difference Disk from '
+ '%(base)s to %(target)s') % locals())
+
+ else:
+ call_if_not_exists(target, fn, *args, **kwargs)
diff --git a/nova/virt/hyperv/vmutils.py b/nova/virt/hyperv/vmutils.py
new file mode 100644
index 000000000..2e54e6d47
--- /dev/null
+++ b/nova/virt/hyperv/vmutils.py
@@ -0,0 +1,146 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Cloudbase Solutions Srl / Pedro Navarro Perez
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Utility class for VM related operations.
+"""
+
+import os
+import shutil
+import sys
+import time
+import uuid
+
+from nova import exception
+from nova import flags
+from nova.openstack.common import log as logging
+from nova.virt.hyperv import constants
+from nova.virt import images
+
+# Check needed for unit testing on Unix
+if sys.platform == 'win32':
+ import wmi
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger(__name__)
+
+
+class HyperVException(exception.NovaException):
+ def __init__(self, message=None):
+ super(HyperVException, self).__init__(message)
+
+
+class VMUtils(object):
+ def lookup(self, conn, i):
+ vms = conn.Msvm_ComputerSystem(ElementName=i)
+ n = len(vms)
+ if n == 0:
+ return None
+ elif n > 1:
+ raise HyperVException(_('duplicate name found: %s') % i)
+ else:
+ return vms[0].ElementName
+
+ #TODO(alexpilotti): use the reactor to poll instead of sleep
+ def check_job_status(self, jobpath):
+ """Poll WMI job state for completion"""
+ job_wmi_path = jobpath.replace('\\', '/')
+ job = wmi.WMI(moniker=job_wmi_path)
+
+ while job.JobState == constants.WMI_JOB_STATE_RUNNING:
+ time.sleep(0.1)
+ job = wmi.WMI(moniker=job_wmi_path)
+ if job.JobState != constants.WMI_JOB_STATE_COMPLETED:
+ LOG.debug(_("WMI job failed: %(ErrorSummaryDescription)s - "
+ "%(ErrorDescription)s - %(ErrorCode)s") % job)
+ return False
+ desc = job.Description
+ elap = job.ElapsedTime
+ LOG.debug(_("WMI job succeeded: %(desc)s, Elapsed=%(elap)s ")
+ % locals())
+ return True
+
+ def get_vhd_path(self, instance_name):
+ base_vhd_folder = os.path.join(FLAGS.instances_path, instance_name)
+ if not os.path.exists(base_vhd_folder):
+ LOG.debug(_('Creating folder %s '), base_vhd_folder)
+ os.makedirs(base_vhd_folder)
+ return os.path.join(base_vhd_folder, instance_name + ".vhd")
+
+ def get_base_vhd_path(self, image_name):
+ base_dir = os.path.join(FLAGS.instances_path, '_base')
+ if not os.path.exists(base_dir):
+ os.makedirs(base_dir)
+ return os.path.join(base_dir, image_name + ".vhd")
+
+ def make_export_path(self, instance_name):
+ export_folder = os.path.join(FLAGS.instances_path, "export",
+ instance_name)
+ if os.path.isdir(export_folder):
+ LOG.debug(_('Removing existing folder %s '), export_folder)
+ shutil.rmtree(export_folder)
+ LOG.debug(_('Creating folder %s '), export_folder)
+ os.makedirs(export_folder)
+ return export_folder
+
+ def clone_wmi_obj(self, conn, wmi_class, wmi_obj):
+ """Clone a WMI object"""
+ cl = conn.__getattr__(wmi_class) # get the class
+ newinst = cl.new()
+ #Copy the properties from the original.
+ for prop in wmi_obj._properties:
+ if prop == "VirtualSystemIdentifiers":
+ strguid = []
+ strguid.append(str(uuid.uuid4()))
+ newinst.Properties_.Item(prop).Value = strguid
+ else:
+ newinst.Properties_.Item(prop).Value = \
+ wmi_obj.Properties_.Item(prop).Value
+ return newinst
+
+ def add_virt_resource(self, conn, res_setting_data, target_vm):
+ """Add a new resource (disk/nic) to the VM"""
+ vs_man_svc = conn.Msvm_VirtualSystemManagementService()[0]
+ (job, new_resources, ret_val) = vs_man_svc.\
+ AddVirtualSystemResources([res_setting_data.GetText_(1)],
+ target_vm.path_())
+ success = True
+ if ret_val == constants.WMI_JOB_STATUS_STARTED:
+ success = self.check_job_status(job)
+ else:
+ success = (ret_val == 0)
+ if success:
+ return new_resources
+ else:
+ return None
+
+ def remove_virt_resource(self, conn, res_setting_data, target_vm):
+ """Add a new resource (disk/nic) to the VM"""
+ vs_man_svc = conn.Msvm_VirtualSystemManagementService()[0]
+ (job, ret_val) = vs_man_svc.\
+ RemoveVirtualSystemResources([res_setting_data.path_()],
+ target_vm.path_())
+ success = True
+ if ret_val == constants.WMI_JOB_STATUS_STARTED:
+ success = self.check_job_status(job)
+ else:
+ success = (ret_val == 0)
+ return success
+
+ def fetch_image(self, target, context, image_id, user, project,
+ *args, **kwargs):
+ images.fetch(context, image_id, target, user, project)
diff --git a/nova/virt/hyperv/volumeops.py b/nova/virt/hyperv/volumeops.py
new file mode 100644
index 000000000..a8e5299c0
--- /dev/null
+++ b/nova/virt/hyperv/volumeops.py
@@ -0,0 +1,297 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Pedro Navarro Perez
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Management class for Storage-related functions (attach, detach, etc).
+"""
+import time
+
+from nova import block_device
+from nova import flags
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova.virt import driver
+from nova.virt.hyperv import baseops
+from nova.virt.hyperv import vmutils
+from nova.virt.hyperv import volumeutils
+
+LOG = logging.getLogger(__name__)
+
+hyper_volumeops_opts = [
+ cfg.StrOpt('hyperv_attaching_volume_retry_count',
+ default=10,
+ help='The number of times we retry on attaching volume '),
+ cfg.StrOpt('hyperv_wait_between_attach_retry',
+ default=5,
+ help='The seconds to wait between an volume attachment attempt'),
+ ]
+
+FLAGS = flags.FLAGS
+FLAGS.register_opts(hyper_volumeops_opts)
+
+
+class VolumeOps(baseops.BaseOps):
+ """
+ Management class for Volume-related tasks
+ """
+
+ def __init__(self):
+ super(VolumeOps, self).__init__()
+
+ self._vmutils = vmutils.VMUtils()
+ self._driver = driver
+ self._block_device = block_device
+ self._time = time
+ self._initiator = None
+ self._default_root_device = 'vda'
+ self._attaching_volume_retry_count = \
+ FLAGS.hyperv_attaching_volume_retry_count
+ self._wait_between_attach_retry = \
+ FLAGS.hyperv_wait_between_attach_retry
+ self._volutils = volumeutils.VolumeUtils()
+
+ def attach_boot_volume(self, block_device_info, vm_name):
+ """Attach the boot volume to the IDE controller"""
+ LOG.debug(_("block device info: %s"), block_device_info)
+ ebs_root = self._driver.block_device_info_get_mapping(
+ block_device_info)[0]
+ connection_info = ebs_root['connection_info']
+ data = connection_info['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+ target_portal = data['target_portal']
+ self._volutils.login_storage_target(target_lun, target_iqn,
+ target_portal)
+ try:
+ #Getting the mounted disk
+ mounted_disk = self._get_mounted_disk_from_lun(target_iqn,
+ target_lun)
+ #Attach to IDE controller
+ #Find the IDE controller for the vm.
+ vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
+ vm = vms[0]
+ vmsettings = vm.associators(
+ wmi_result_class='Msvm_VirtualSystemSettingData')
+ rasds = vmsettings[0].associators(
+ wmi_result_class='MSVM_ResourceAllocationSettingData')
+ ctrller = [r for r in rasds
+ if r.ResourceSubType == 'Microsoft Emulated IDE Controller'
+ and r.Address == "0"]
+ #Attaching to the same slot as the VHD disk file
+ self._attach_volume_to_controller(ctrller, 0, mounted_disk, vm)
+ except Exception as exn:
+ LOG.exception(_('Attach boot from volume failed: %s'), exn)
+ self._volutils.logout_storage_target(self._conn_wmi, target_iqn)
+ raise vmutils.HyperVException(
+ _('Unable to attach boot volume to instance %s')
+ % vm_name)
+
+ def volume_in_mapping(self, mount_device, block_device_info):
+ return self._volutils.volume_in_mapping(mount_device,
+ block_device_info)
+
+ def attach_volume(self, connection_info, instance_name, mountpoint):
+ """Attach a volume to the SCSI controller"""
+ LOG.debug(_("Attach_volume: %(connection_info)s, %(instance_name)s,"
+ " %(mountpoint)s") % locals())
+ data = connection_info['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+ target_portal = data['target_portal']
+ self._volutils.login_storage_target(target_lun, target_iqn,
+ target_portal)
+ try:
+ #Getting the mounted disk
+ mounted_disk = self._get_mounted_disk_from_lun(target_iqn,
+ target_lun)
+ #Find the SCSI controller for the vm
+ vms = self._conn.MSVM_ComputerSystem(ElementName=instance_name)
+ vm = vms[0]
+ vmsettings = vm.associators(
+ wmi_result_class='Msvm_VirtualSystemSettingData')
+ rasds = vmsettings[0].associators(
+ wmi_result_class='MSVM_ResourceAllocationSettingData')
+ ctrller = [r for r in rasds
+ if r.ResourceSubType == 'Microsoft Synthetic SCSI Controller']
+ self._attach_volume_to_controller(
+ ctrller, self._get_free_controller_slot(ctrller[0]),
+ mounted_disk, vm)
+ except Exception as exn:
+ LOG.exception(_('Attach volume failed: %s'), exn)
+ self._volutils.logout_storage_target(self._conn_wmi, target_iqn)
+ raise vmutils.HyperVException(
+ _('Unable to attach volume to instance %s')
+ % instance_name)
+
+ def _attach_volume_to_controller(self, controller, address, mounted_disk,
+ instance):
+ """Attach a volume to a controller """
+ #Find the default disk drive object for the vm and clone it.
+ diskdflt = self._conn.query(
+ "SELECT * FROM Msvm_ResourceAllocationSettingData \
+ WHERE ResourceSubType LIKE 'Microsoft Physical Disk Drive'\
+ AND InstanceID LIKE '%Default%'")[0]
+ diskdrive = self._vmutils.clone_wmi_obj(self._conn,
+ 'Msvm_ResourceAllocationSettingData', diskdflt)
+ diskdrive.Address = address
+ diskdrive.Parent = controller[0].path_()
+ diskdrive.HostResource = [mounted_disk[0].path_()]
+ new_resources = self._vmutils.add_virt_resource(self._conn, diskdrive,
+ instance)
+ if new_resources is None:
+ raise vmutils.HyperVException(_('Failed to add volume to VM %s') %
+ instance)
+
+ def _get_free_controller_slot(self, scsi_controller):
+ #Getting volumes mounted in the SCSI controller
+ volumes = self._conn.query(
+ "SELECT * FROM Msvm_ResourceAllocationSettingData \
+ WHERE ResourceSubType LIKE 'Microsoft Physical Disk Drive'\
+ AND Parent = '" + scsi_controller.path_() + "'")
+ #Slots starts from 0, so the lenght of the disks gives us the free slot
+ return len(volumes)
+
+ def detach_volume(self, connection_info, instance_name, mountpoint):
+ """Dettach a volume to the SCSI controller"""
+ LOG.debug(_("Detach_volume: %(connection_info)s, %(instance_name)s,"
+ " %(mountpoint)s") % locals())
+ data = connection_info['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+ #Getting the mounted disk
+ mounted_disk = self._get_mounted_disk_from_lun(target_iqn, target_lun)
+ physical_list = self._conn.query(
+ "SELECT * FROM Msvm_ResourceAllocationSettingData \
+ WHERE ResourceSubType LIKE 'Microsoft Physical Disk Drive'")
+ physical_disk = 0
+ for phydisk in physical_list:
+ host_resource_list = phydisk.HostResource
+ if host_resource_list is None:
+ continue
+ host_resource = str(host_resource_list[0].lower())
+ mounted_disk_path = str(mounted_disk[0].path_().lower())
+ LOG.debug(_("Mounted disk to detach is: %s"), mounted_disk_path)
+ LOG.debug(_("host_resource disk detached is: %s"), host_resource)
+ if host_resource == mounted_disk_path:
+ physical_disk = phydisk
+ LOG.debug(_("Physical disk detached is: %s"), physical_disk)
+ vms = self._conn.MSVM_ComputerSystem(ElementName=instance_name)
+ vm = vms[0]
+ remove_result = self._vmutils.remove_virt_resource(self._conn,
+ physical_disk, vm)
+ if remove_result is False:
+ raise vmutils.HyperVException(
+ _('Failed to remove volume from VM %s') %
+ instance_name)
+ #Sending logout
+ self._volutils.logout_storage_target(self._conn_wmi, target_iqn)
+
+ def get_volume_connector(self, instance):
+ if not self._initiator:
+ self._initiator = self._get_iscsi_initiator()
+ if not self._initiator:
+ LOG.warn(_('Could not determine iscsi initiator name'),
+ instance=instance)
+ return {
+ 'ip': FLAGS.my_ip,
+ 'initiator': self._initiator,
+ }
+
+ def _get_iscsi_initiator(self):
+ return self._volutils.get_iscsi_initiator(self._conn_cimv2)
+
+ def _get_mounted_disk_from_lun(self, target_iqn, target_lun):
+ initiator_session = self._conn_wmi.query(
+ "SELECT * FROM MSiSCSIInitiator_SessionClass \
+ WHERE TargetName='" + target_iqn + "'")[0]
+ devices = initiator_session.Devices
+ device_number = None
+ for device in devices:
+ LOG.debug(_("device.InitiatorName: %s"), device.InitiatorName)
+ LOG.debug(_("device.TargetName: %s"), device.TargetName)
+ LOG.debug(_("device.ScsiPortNumber: %s"), device.ScsiPortNumber)
+ LOG.debug(_("device.ScsiPathId: %s"), device.ScsiPathId)
+ LOG.debug(_("device.ScsiTargetId): %s"), device.ScsiTargetId)
+ LOG.debug(_("device.ScsiLun: %s"), device.ScsiLun)
+ LOG.debug(_("device.DeviceInterfaceGuid :%s"),
+ device.DeviceInterfaceGuid)
+ LOG.debug(_("device.DeviceInterfaceName: %s"),
+ device.DeviceInterfaceName)
+ LOG.debug(_("device.LegacyName: %s"), device.LegacyName)
+ LOG.debug(_("device.DeviceType: %s"), device.DeviceType)
+ LOG.debug(_("device.DeviceNumber %s"), device.DeviceNumber)
+ LOG.debug(_("device.PartitionNumber :%s"), device.PartitionNumber)
+ scsi_lun = device.ScsiLun
+ if scsi_lun == target_lun:
+ device_number = device.DeviceNumber
+ if device_number is None:
+ raise vmutils.HyperVException(
+ _('Unable to find a mounted disk for'
+ ' target_iqn: %s') % target_iqn)
+ LOG.debug(_("Device number : %s"), device_number)
+ LOG.debug(_("Target lun : %s"), target_lun)
+ #Finding Mounted disk drive
+ for i in range(1, self._attaching_volume_retry_count):
+ mounted_disk = self._conn.query(
+ "SELECT * FROM Msvm_DiskDrive WHERE DriveNumber=" +
+ str(device_number) + "")
+ LOG.debug(_("Mounted disk is: %s"), mounted_disk)
+ if len(mounted_disk) > 0:
+ break
+ self._time.sleep(self._wait_between_attach_retry)
+ mounted_disk = self._conn.query(
+ "SELECT * FROM Msvm_DiskDrive WHERE DriveNumber=" +
+ str(device_number) + "")
+ LOG.debug(_("Mounted disk is: %s"), mounted_disk)
+ if len(mounted_disk) == 0:
+ raise vmutils.HyperVException(
+ _('Unable to find a mounted disk for'
+ ' target_iqn: %s') % target_iqn)
+ return mounted_disk
+
+ def disconnect_volume(self, physical_drive_path):
+ #Get the session_id of the ISCSI connection
+ session_id = self._get_session_id_from_mounted_disk(
+ physical_drive_path)
+ #Logging out the target
+ self._volutils.execute_log_out(session_id)
+
+ def _get_session_id_from_mounted_disk(self, physical_drive_path):
+ drive_number = self._get_drive_number_from_disk_path(
+ physical_drive_path)
+ LOG.debug(_("Drive number to disconnect is: %s"), drive_number)
+ initiator_sessions = self._conn_wmi.query(
+ "SELECT * FROM MSiSCSIInitiator_SessionClass")
+ for initiator_session in initiator_sessions:
+ devices = initiator_session.Devices
+ for device in devices:
+ deviceNumber = str(device.DeviceNumber)
+ LOG.debug(_("DeviceNumber : %s"), deviceNumber)
+ if deviceNumber == drive_number:
+ return initiator_session.SessionId
+
+ def _get_drive_number_from_disk_path(self, disk_path):
+ LOG.debug(_("Disk path to parse: %s"), disk_path)
+ start_device_id = disk_path.find('"', disk_path.find('DeviceID'))
+ LOG.debug(_("start_device_id: %s"), start_device_id)
+ end_device_id = disk_path.find('"', start_device_id + 1)
+ LOG.debug(_("end_device_id: %s"), end_device_id)
+ deviceID = disk_path[start_device_id + 1:end_device_id]
+ return deviceID[deviceID.find("\\") + 2:]
+
+ def get_default_root_device(self):
+ return self._default_root_device
diff --git a/nova/virt/hyperv/volumeutils.py b/nova/virt/hyperv/volumeutils.py
new file mode 100644
index 000000000..018a4c278
--- /dev/null
+++ b/nova/virt/hyperv/volumeutils.py
@@ -0,0 +1,122 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2012 Pedro Navarro Perez
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Helper methods for operations related to the management of volumes,
+and storage repositories
+"""
+
+import subprocess
+import sys
+import time
+
+from nova import block_device
+from nova import flags
+from nova.openstack.common import log as logging
+from nova.virt import driver
+from nova.virt.hyperv import vmutils
+
+# Check needed for unit testing on Unix
+if sys.platform == 'win32':
+ import _winreg
+
+LOG = logging.getLogger(__name__)
+FLAGS = flags.FLAGS
+
+
+class VolumeUtils(object):
+ def execute(self, *args, **kwargs):
+ proc = subprocess.Popen(
+ [args],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ )
+ stdout_value, stderr_value = proc.communicate()
+ if stdout_value.find('The operation completed successfully') == -1:
+ raise vmutils.HyperVException(_('An error has occurred when '
+ 'calling the iscsi initiator: %s') % stdout_value)
+
+ def get_iscsi_initiator(self, cim_conn):
+ """Get iscsi initiator name for this machine"""
+
+ computer_system = cim_conn.Win32_ComputerSystem()[0]
+ hostname = computer_system.name
+ keypath = \
+ r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\iSCSI\Discovery"
+ try:
+ key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, keypath, 0,
+ _winreg.KEY_ALL_ACCESS)
+ temp = _winreg.QueryValueEx(key, 'DefaultInitiatorName')
+ initiator_name = str(temp[0])
+ _winreg.CloseKey(key)
+ except Exception:
+ LOG.info(_("The ISCSI initiator name can't be found. "
+ "Choosing the default one"))
+ computer_system = cim_conn.Win32_ComputerSystem()[0]
+ initiator_name = "iqn.1991-05.com.microsoft:" + \
+ hostname.lower()
+ return {
+ 'ip': FLAGS.my_ip,
+ 'initiator': initiator_name,
+ }
+
+ def login_storage_target(self, target_lun, target_iqn, target_portal):
+ """Add target portal, list targets and logins to the target"""
+ separator = target_portal.find(':')
+ target_address = target_portal[:separator]
+ target_port = target_portal[separator + 1:]
+ #Adding target portal to iscsi initiator. Sending targets
+ self.execute('iscsicli.exe ' + 'AddTargetPortal ' +
+ target_address + ' ' + target_port +
+ ' * * * * * * * * * * * * *')
+ #Listing targets
+ self.execute('iscsicli.exe ' + 'LisTargets')
+ #Sending login
+ self.execute('iscsicli.exe ' + 'qlogintarget ' + target_iqn)
+ #Waiting the disk to be mounted. Research this
+ time.sleep(FLAGS.hyperv_wait_between_attach_retry)
+
+ def logout_storage_target(self, _conn_wmi, target_iqn):
+ """ Logs out storage target through its session id """
+
+ sessions = _conn_wmi.query(
+ "SELECT * FROM MSiSCSIInitiator_SessionClass \
+ WHERE TargetName='" + target_iqn + "'")
+ for session in sessions:
+ self.execute_log_out(session.SessionId)
+
+ def execute_log_out(self, session_id):
+ """ Executes log out of the session described by its session ID """
+ self.execute('iscsicli.exe ' + 'logouttarget ' + session_id)
+
+ def volume_in_mapping(self, mount_device, block_device_info):
+ block_device_list = [block_device.strip_dev(vol['mount_device'])
+ for vol in
+ driver.block_device_info_get_mapping(
+ block_device_info)]
+ swap = driver.block_device_info_get_swap(block_device_info)
+ if driver.swap_is_usable(swap):
+ block_device_list.append(
+ block_device.strip_dev(swap['device_name']))
+ block_device_list += [block_device.strip_dev(
+ ephemeral['device_name'])
+ for ephemeral in
+ driver.block_device_info_get_ephemerals(block_device_info)]
+
+ LOG.debug(_("block_device_list %s"), block_device_list)
+ return block_device.strip_dev(mount_device) in block_device_list