summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSean Chen <xuchenx@gmail.com>2013-01-10 14:42:46 -0800
committerSean Chen <xuchenx@gmail.com>2013-01-23 12:08:18 -0800
commitfefea36baff5f65c56984ae27074e4ad95a3b511 (patch)
treec53e19145855bd0f57c2ae943821aa2b7800feaa
parentb512bba76cbed6b602a224521736d79aae3f6c4d (diff)
downloadnova-fefea36baff5f65c56984ae27074e4ad95a3b511.tar.gz
nova-fefea36baff5f65c56984ae27074e4ad95a3b511.tar.xz
nova-fefea36baff5f65c56984ae27074e4ad95a3b511.zip
VMware Compute Driver Volume Management
blueprint vmware-compute-driver Attach and Detach iSCSI volume Get volume connector Change-Id: I25e0c79ffb0b762726fb931233f7beeb53092a34
-rw-r--r--nova/virt/vmwareapi/driver.py26
-rw-r--r--nova/virt/vmwareapi/fake.py25
-rw-r--r--nova/virt/vmwareapi/io_util.py9
-rw-r--r--nova/virt/vmwareapi/vm_util.py201
-rw-r--r--nova/virt/vmwareapi/vmops.py18
-rw-r--r--nova/virt/vmwareapi/volume_util.py178
-rw-r--r--nova/virt/vmwareapi/volumeops.py183
7 files changed, 569 insertions, 71 deletions
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index 986c4ef28..4000f1f9c 100644
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -45,6 +45,7 @@ from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import vim
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vmops
+from nova.virt.vmwareapi import volumeops
LOG = logging.getLogger(__name__)
@@ -52,7 +53,7 @@ LOG = logging.getLogger(__name__)
vmwareapi_opts = [
cfg.StrOpt('vmwareapi_host_ip',
default=None,
- help='URL for connection to VMware ESX host.Required if '
+ help='URL for connection to VMware ESX host. Required if '
'compute_driver is vmwareapi.VMwareESXDriver.'),
cfg.StrOpt('vmwareapi_host_username',
default=None,
@@ -109,9 +110,10 @@ class VMwareESXDriver(driver.ComputeDriver):
"and vmwareapi_host_password to use"
"compute_driver=vmwareapi.VMwareESXDriver"))
- session = VMwareAPISession(host_ip, host_username, host_password,
+ self._session = VMwareAPISession(host_ip, host_username, host_password,
api_retry_count, scheme=scheme)
- self._vmops = vmops.VMwareVMOps(session)
+ self._volumeops = volumeops.VMwareVolumeOps(self._session)
+ self._vmops = vmops.VMwareVMOps(self._session)
def init_host(self, host):
"""Do the initialization that needs to be done."""
@@ -172,23 +174,21 @@ class VMwareESXDriver(driver.ComputeDriver):
"""Return snapshot of console."""
return self._vmops.get_console_output(instance)
- def get_volume_connector(self, _instance):
+ def get_volume_connector(self, instance):
"""Return volume connector information."""
- # TODO(vish): When volume attaching is supported, return the
- # proper initiator iqn and host.
- return {
- 'ip': CONF.vmwareapi_host_ip,
- 'initiator': None,
- 'host': None
- }
+ return self._volumeops.get_volume_connector(instance)
def attach_volume(self, connection_info, instance, mountpoint):
"""Attach volume storage to VM instance."""
- pass
+ return self._volumeops.attach_volume(connection_info,
+ instance,
+ mountpoint)
def detach_volume(self, connection_info, instance, mountpoint):
"""Detach volume storage to VM instance."""
- pass
+ return self._volumeops.detach_volume(connection_info,
+ instance,
+ mountpoint)
def get_console_pool_info(self, console_type):
"""Get info about the host on which the VM resides."""
diff --git a/nova/virt/vmwareapi/fake.py b/nova/virt/vmwareapi/fake.py
index 3f5041c22..27e26526f 100644
--- a/nova/virt/vmwareapi/fake.py
+++ b/nova/virt/vmwareapi/fake.py
@@ -1,5 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack LLC.
#
@@ -140,16 +141,30 @@ class DataObject(object):
class VirtualDisk(DataObject):
"""
- Virtual Disk class. Does nothing special except setting
- __class__.__name__ to 'VirtualDisk'. Refer place where __class__.__name__
- is used in the code.
+ Virtual Disk class.
"""
- pass
+
+ def __init__(self):
+ super(VirtualDisk, self).__init__()
+ self.key = 0
+ self.unitNumber = 0
class VirtualDiskFlatVer2BackingInfo(DataObject):
"""VirtualDiskFlatVer2BackingInfo class."""
- pass
+
+ def __init__(self):
+ super(VirtualDiskFlatVer2BackingInfo, self).__init__()
+ self.thinProvisioned = False
+ self.eagerlyScrub = False
+
+
+class VirtualDiskRawDiskMappingVer1BackingInfo(DataObject):
+ """VirtualDiskRawDiskMappingVer1BackingInfo class."""
+
+ def __init__(self):
+ super(VirtualDiskRawDiskMappingVer1BackingInfo, self).__init__()
+ self.lunUuid = ""
class VirtualLsiLogicController(DataObject):
diff --git a/nova/virt/vmwareapi/io_util.py b/nova/virt/vmwareapi/io_util.py
index 999e7a085..6a50c4d6e 100644
--- a/nova/virt/vmwareapi/io_util.py
+++ b/nova/virt/vmwareapi/io_util.py
@@ -1,5 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack LLC.
#
@@ -57,6 +58,14 @@ class ThreadSafePipe(queue.LightQueue):
"""Put a data item in the pipe."""
self.put(data)
+ def seek(self, offset, whence=0):
+ """Set the file's current position at the offset."""
+ pass
+
+ def tell(self):
+ """Get size of the file to be read."""
+ return self.transfer_size
+
def close(self):
"""A place-holder to maintain consistency."""
pass
diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py
index e03b88804..381c47193 100644
--- a/nova/virt/vmwareapi/vm_util.py
+++ b/nova/virt/vmwareapi/vm_util.py
@@ -1,5 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack LLC.
#
@@ -18,6 +19,9 @@
The VMware API VM utility module to build SOAP object specs.
"""
+import copy
+from nova.virt.vmwareapi import vim_util
+
def build_datastore_path(datastore_name, path):
"""Build the datastore compliant path."""
@@ -42,7 +46,7 @@ def get_vm_create_spec(client_factory, instance, data_store_name,
vif_infos, os_type="otherGuest"):
"""Builds the VM Create spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
- config_spec.name = instance.name
+ config_spec.name = instance['name']
config_spec.guestId = os_type
vm_file_info = client_factory.create('ns0:VirtualMachineFileInfo')
@@ -57,8 +61,8 @@ def get_vm_create_spec(client_factory, instance, data_store_name,
tools_info.beforeGuestReboot = True
config_spec.tools = tools_info
- config_spec.numCPUs = int(instance.vcpus)
- config_spec.memoryMB = int(instance.memory_mb)
+ config_spec.numCPUs = int(instance['vcpus'])
+ config_spec.memoryMB = int(instance['memory_mb'])
vif_spec_list = []
for vif_info in vif_infos:
@@ -71,9 +75,9 @@ def get_vm_create_spec(client_factory, instance, data_store_name,
return config_spec
-def create_controller_spec(client_factory, key):
+def create_controller_spec(client_factory, key, adapter_type="lsiLogic"):
"""
- Builds a Config Spec for the LSI Logic Controller's addition
+ Builds a Config Spec for the LSI or Bus Logic Controller's addition
which acts as the controller for the virtual hard disk to be attached
to the VM.
"""
@@ -81,11 +85,16 @@ def create_controller_spec(client_factory, key):
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "add"
- virtual_lsi = client_factory.create('ns0:VirtualLsiLogicController')
- virtual_lsi.key = key
- virtual_lsi.busNumber = 0
- virtual_lsi.sharedBus = "noSharing"
- virtual_device_config.device = virtual_lsi
+ if adapter_type == "busLogic":
+ virtual_controller = client_factory.create(
+ 'ns0:VirtualBusLogicController')
+ else:
+ virtual_controller = client_factory.create(
+ 'ns0:VirtualLsiLogicController')
+ virtual_controller.key = key
+ virtual_controller.busNumber = 0
+ virtual_controller.sharedBus = "noSharing"
+ virtual_device_config.device = virtual_controller
return virtual_device_config
@@ -142,8 +151,15 @@ def create_network_spec(client_factory, vif_info):
return network_spec
-def get_vmdk_attach_config_spec(client_factory, disksize, file_path,
- adapter_type="lsiLogic"):
+def get_vmdk_attach_config_spec(client_factory,
+ adapter_type="lsiLogic",
+ disk_type="preallocated",
+ file_path=None,
+ disk_size=None,
+ linked_clone=False,
+ controller_key=None,
+ unit_number=None,
+ device_name=None):
"""Builds the vmdk attach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
@@ -152,15 +168,19 @@ def get_vmdk_attach_config_spec(client_factory, disksize, file_path,
device_config_spec = []
# For IDE devices, there are these two default controllers created in the
# VM having keys 200 and 201
- if adapter_type == "ide":
- controller_key = 200
- else:
- controller_key = -101
- controller_spec = create_controller_spec(client_factory,
- controller_key)
- device_config_spec.append(controller_spec)
+ if controller_key is None:
+ if adapter_type == "ide":
+ controller_key = 200
+ else:
+ controller_key = -101
+ controller_spec = create_controller_spec(client_factory,
+ controller_key,
+ adapter_type)
+ device_config_spec.append(controller_spec)
virtual_device_config_spec = create_virtual_disk_spec(client_factory,
- disksize, controller_key, file_path)
+ controller_key, disk_type, file_path,
+ disk_size, linked_clone,
+ unit_number, device_name)
device_config_spec.append(virtual_device_config_spec)
@@ -168,20 +188,45 @@ def get_vmdk_attach_config_spec(client_factory, disksize, file_path,
return config_spec
-def get_vmdk_file_path_and_adapter_type(client_factory, hardware_devices):
+def get_vmdk_detach_config_spec(client_factory, device):
+ """Builds the vmdk detach config spec."""
+ config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
+
+ device_config_spec = []
+ virtual_device_config_spec = delete_virtual_disk_spec(client_factory,
+ device)
+
+ device_config_spec.append(virtual_device_config_spec)
+
+ config_spec.deviceChange = device_config_spec
+ return config_spec
+
+
+def get_vmdk_path_and_adapter_type(hardware_devices):
"""Gets the vmdk file path and the storage adapter type."""
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
vmdk_file_path = None
vmdk_controler_key = None
+ disk_type = None
+ unit_number = 0
adapter_type_dict = {}
for device in hardware_devices:
- if (device.__class__.__name__ == "VirtualDisk" and
- device.backing.__class__.__name__ ==
- "VirtualDiskFlatVer2BackingInfo"):
- vmdk_file_path = device.backing.fileName
- vmdk_controler_key = device.controllerKey
+ if device.__class__.__name__ == "VirtualDisk":
+ if device.backing.__class__.__name__ == \
+ "VirtualDiskFlatVer2BackingInfo":
+ vmdk_file_path = device.backing.fileName
+ vmdk_controler_key = device.controllerKey
+ if getattr(device.backing, 'thinProvisioned', False):
+ disk_type = "thin"
+ else:
+ if getattr(device.backing, 'eagerlyScrub', False):
+ disk_type = "eagerZeroedThick"
+ else:
+ disk_type = "preallocated"
+ if device.unitNumber > unit_number:
+ unit_number = device.unitNumber
elif device.__class__.__name__ == "VirtualLsiLogicController":
adapter_type_dict[device.key] = "lsiLogic"
elif device.__class__.__name__ == "VirtualBusLogicController":
@@ -193,28 +238,59 @@ def get_vmdk_file_path_and_adapter_type(client_factory, hardware_devices):
adapter_type = adapter_type_dict.get(vmdk_controler_key, "")
- return vmdk_file_path, adapter_type
+ return (vmdk_file_path, vmdk_controler_key, adapter_type,
+ disk_type, unit_number)
+
+
+def get_rdm_disk(hardware_devices, uuid):
+ """Gets the RDM disk key."""
+ if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
+ hardware_devices = hardware_devices.VirtualDevice
+
+ for device in hardware_devices:
+ if (device.__class__.__name__ == "VirtualDisk" and
+ device.backing.__class__.__name__ ==
+ "VirtualDiskRawDiskMappingVer1BackingInfo" and
+ device.backing.lunUuid == uuid):
+ return device
-def get_copy_virtual_disk_spec(client_factory, adapter_type="lsilogic"):
+def get_copy_virtual_disk_spec(client_factory, adapter_type="lsilogic",
+ disk_type="preallocated"):
"""Builds the Virtual Disk copy spec."""
dest_spec = client_factory.create('ns0:VirtualDiskSpec')
dest_spec.adapterType = adapter_type
- dest_spec.diskType = "thick"
+ dest_spec.diskType = disk_type
return dest_spec
-def get_vmdk_create_spec(client_factory, size_in_kb, adapter_type="lsiLogic"):
+def get_vmdk_create_spec(client_factory, size_in_kb, adapter_type="lsiLogic",
+ disk_type="preallocated"):
"""Builds the virtual disk create spec."""
create_vmdk_spec = client_factory.create('ns0:FileBackedVirtualDiskSpec')
create_vmdk_spec.adapterType = adapter_type
- create_vmdk_spec.diskType = "thick"
+ create_vmdk_spec.diskType = disk_type
create_vmdk_spec.capacityKb = size_in_kb
return create_vmdk_spec
-def create_virtual_disk_spec(client_factory, disksize, controller_key,
- file_path=None):
+def get_rdm_create_spec(client_factory, device, adapter_type="lsiLogic",
+ disk_type="rdmp"):
+ """Builds the RDM virtual disk create spec."""
+ create_vmdk_spec = client_factory.create('ns0:DeviceBackedVirtualDiskSpec')
+ create_vmdk_spec.adapterType = adapter_type
+ create_vmdk_spec.diskType = disk_type
+ create_vmdk_spec.device = device
+ return create_vmdk_spec
+
+
+def create_virtual_disk_spec(client_factory, controller_key,
+ disk_type="preallocated",
+ file_path=None,
+ disk_size=None,
+ linked_clone=False,
+ unit_number=None,
+ device_name=None):
"""
Builds spec for the creation of a new/ attaching of an already existing
Virtual Disk to the VM.
@@ -222,26 +298,40 @@ def create_virtual_disk_spec(client_factory, disksize, controller_key,
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "add"
- if file_path is None:
+ if (file_path is None) or linked_clone:
virtual_device_config.fileOperation = "create"
virtual_disk = client_factory.create('ns0:VirtualDisk')
- disk_file_backing = client_factory.create(
- 'ns0:VirtualDiskFlatVer2BackingInfo')
- disk_file_backing.diskMode = "persistent"
- disk_file_backing.thinProvisioned = False
- if file_path is not None:
- disk_file_backing.fileName = file_path
+ if disk_type == "rdm" or disk_type == "rdmp":
+ disk_file_backing = client_factory.create(
+ 'ns0:VirtualDiskRawDiskMappingVer1BackingInfo')
+ disk_file_backing.compatibilityMode = "virtualMode" \
+ if disk_type == "rdm" else "physicalMode"
+ disk_file_backing.diskMode = "independent_persistent"
+ disk_file_backing.deviceName = device_name or ""
else:
- disk_file_backing.fileName = ""
+ disk_file_backing = client_factory.create(
+ 'ns0:VirtualDiskFlatVer2BackingInfo')
+ disk_file_backing.diskMode = "persistent"
+ if disk_type == "thin":
+ disk_file_backing.thinProvisioned = True
+ else:
+ if disk_type == "eagerZeroedThick":
+ disk_file_backing.eagerlyScrub = True
+ disk_file_backing.fileName = file_path or ""
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = False
connectable_spec.connected = True
- virtual_disk.backing = disk_file_backing
+ if not linked_clone:
+ virtual_disk.backing = disk_file_backing
+ else:
+ virtual_disk.backing = copy.copy(disk_file_backing)
+ virtual_disk.backing.fileName = ""
+ virtual_disk.backing.parent = disk_file_backing
virtual_disk.connectable = connectable_spec
# The Server assigns a Key to the device. Here we pass a -ve random key.
@@ -249,14 +339,27 @@ def create_virtual_disk_spec(client_factory, disksize, controller_key,
# want a clash with the key that server might associate with the device
virtual_disk.key = -100
virtual_disk.controllerKey = controller_key
- virtual_disk.unitNumber = 0
- virtual_disk.capacityInKB = disksize
+ virtual_disk.unitNumber = unit_number or 0
+ virtual_disk.capacityInKB = disk_size or 0
virtual_device_config.device = virtual_disk
return virtual_device_config
+def delete_virtual_disk_spec(client_factory, device):
+ """
+ Builds spec for the deletion of an already existing Virtual Disk from VM.
+ """
+ virtual_device_config = client_factory.create(
+ 'ns0:VirtualDeviceConfigSpec')
+ virtual_device_config.operation = "remove"
+ virtual_device_config.fileOperation = "destroy"
+ virtual_device_config.device = device
+
+ return virtual_device_config
+
+
def get_dummy_vm_create_spec(client_factory, name, data_store_name):
"""Builds the dummy VM create spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
@@ -318,3 +421,13 @@ def get_add_vswitch_port_group_spec(client_factory, vswitch_name,
vswitch_port_group_spec.policy = policy
return vswitch_port_group_spec
+
+
+def get_vm_ref_from_name(session, vm_name):
+ """Get reference to the VM with the name specified."""
+ vms = session._call_method(vim_util, "get_objects",
+ "VirtualMachine", ["name"])
+ for vm in vms:
+ if vm.propSet[0].val == vm_name:
+ return vm.obj
+ return None
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index 883e751a8..625d6290e 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -309,8 +309,8 @@ class VMwareVMOps(object):
"""
vmdk_attach_config_spec = vm_util.get_vmdk_attach_config_spec(
client_factory,
- vmdk_file_size_in_kb, uploaded_vmdk_path,
- adapter_type)
+ adapter_type, "preallocated",
+ uploaded_vmdk_path, vmdk_file_size_in_kb)
LOG.debug(_("Reconfiguring VM instance to attach the image disk"),
instance=instance)
reconfig_task = self._session._call_method(
@@ -361,19 +361,19 @@ class VMwareVMOps(object):
hardware_devices = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "config.hardware.device")
- _vmdk_info = vm_util.get_vmdk_file_path_and_adapter_type(
- client_factory, hardware_devices)
- vmdk_file_path_before_snapshot, adapter_type = _vmdk_info
+ (vmdk_file_path_before_snapshot, controller_key, adapter_type,
+ disk_type, unit_number) = vm_util.get_vmdk_path_and_adapter_type(
+ hardware_devices)
datastore_name = vm_util.split_datastore_path(
vmdk_file_path_before_snapshot)[0]
os_type = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "summary.config.guestId")
- return (vmdk_file_path_before_snapshot, adapter_type,
+ return (vmdk_file_path_before_snapshot, adapter_type, disk_type,
datastore_name, os_type)
- (vmdk_file_path_before_snapshot, adapter_type, datastore_name,
- os_type) = _get_vm_and_vmdk_attribs()
+ (vmdk_file_path_before_snapshot, adapter_type, disk_type,
+ datastore_name, os_type) = _get_vm_and_vmdk_attribs()
def _create_vm_snapshot():
# Create a snapshot of the VM
@@ -384,7 +384,7 @@ class VMwareVMOps(object):
"CreateSnapshot_Task", vm_ref,
name="%s-snapshot" % instance.name,
description="Taking Snapshot of the VM",
- memory=True,
+ memory=False,
quiesce=True)
self._session._wait_for_task(instance['uuid'], snapshot_task)
LOG.debug(_("Created Snapshot of the VM instance"),
diff --git a/nova/virt/vmwareapi/volume_util.py b/nova/virt/vmwareapi/volume_util.py
new file mode 100644
index 000000000..9d556cd26
--- /dev/null
+++ b/nova/virt/vmwareapi/volume_util.py
@@ -0,0 +1,178 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 VMware, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Helper methods for operations related to the management of volumes,
+and storage repositories
+"""
+
+import re
+import string
+
+from nova import exception
+from nova.openstack.common import log as logging
+from nova.virt.vmwareapi import vim_util
+
+LOG = logging.getLogger(__name__)
+
+
+class StorageError(Exception):
+ """To raise errors related to Volume commands."""
+
+ def __init__(self, message=None):
+ super(StorageError, self).__init__(message)
+
+
+def get_host_iqn(session):
+ """
+ Return the host iSCSI IQN.
+ """
+ host_mor = session._call_method(vim_util, "get_objects",
+ "HostSystem")[0].obj
+ hbas_ret = session._call_method(vim_util, "get_dynamic_property",
+ host_mor, "HostSystem",
+ "config.storageDevice.hostBusAdapter")
+
+ # Meaning there are no host bus adapters on the host
+ if not hbas_ret:
+ return
+ host_hbas = hbas_ret.HostHostBusAdapter
+ for hba in host_hbas:
+ if hba.__class__.__name__ == 'HostInternetScsiHba':
+ return hba.iScsiName
+
+
+def find_st(session, data):
+ """
+ Return the iSCSI Target given a volume info.
+ """
+ target_portal = data['target_portal']
+ target_iqn = data['target_iqn']
+ host_mor = session._call_method(vim_util, "get_objects",
+ "HostSystem")[0].obj
+
+ lst_properties = ["config.storageDevice.hostBusAdapter",
+ "config.storageDevice.scsiTopology",
+ "config.storageDevice.scsiLun"]
+ props = session._call_method(vim_util, "get_object_properties",
+ None, host_mor, "HostSystem",
+ lst_properties)
+ result = (None, None)
+ hbas_ret = None
+ scsi_topology = None
+ scsi_lun_ret = None
+ for elem in props:
+ for prop in elem.propSet:
+ if prop.name == "config.storageDevice.hostBusAdapter":
+ hbas_ret = prop.val
+ elif prop.name == "config.storageDevice.scsiTopology":
+ scsi_topology = prop.val
+ elif prop.name == "config.storageDevice.scsiLun":
+ scsi_lun_ret = prop.val
+
+ # Meaning there are no host bus adapters on the host
+ if hbas_ret is None:
+ return result
+ host_hbas = hbas_ret.HostHostBusAdapter
+ if not host_hbas:
+ return result
+ for hba in host_hbas:
+ if hba.__class__.__name__ == 'HostInternetScsiHba':
+ hba_key = hba.key
+ break
+ else:
+ return result
+
+ if scsi_topology is None:
+ return result
+ host_adapters = scsi_topology.adapter
+ if not host_adapters:
+ return result
+ scsi_lun_key = None
+ for adapter in host_adapters:
+ if adapter.adapter == hba_key:
+ if not getattr(adapter, 'target', None):
+ return result
+ for target in adapter.target:
+ if (getattr(target.transport, 'address', None) and
+ target.transport.address[0] == target_portal and
+ target.transport.iScsiName == target_iqn):
+ if not target.lun:
+ return result
+ for lun in target.lun:
+ if 'host.ScsiDisk' in lun.scsiLun:
+ scsi_lun_key = lun.scsiLun
+ break
+ break
+ break
+
+ if scsi_lun_key is None:
+ return result
+
+ if scsi_lun_ret is None:
+ return result
+ host_scsi_luns = scsi_lun_ret.ScsiLun
+ if not host_scsi_luns:
+ return result
+ for scsi_lun in host_scsi_luns:
+ if scsi_lun.key == scsi_lun_key:
+ return (scsi_lun.deviceName, scsi_lun.uuid)
+
+ return result
+
+
+def rescan_iscsi_hba(session):
+ """
+ Rescan the iSCSI HBA to discover iSCSI targets.
+ """
+ # There is only one default storage system in a standalone ESX host
+ storage_system_mor = session._call_method(vim_util, "get_objects",
+ "HostSystem", ["configManager.storageSystem"])[0].propSet[0].val
+ hbas_ret = session._call_method(vim_util,
+ "get_dynamic_property",
+ storage_system_mor,
+ "HostStorageSystem",
+ "storageDeviceInfo.hostBusAdapter")
+ # Meaning there are no host bus adapters on the host
+ if hbas_ret is None:
+ return
+ host_hbas = hbas_ret.HostHostBusAdapter
+ if not host_hbas:
+ return
+ for hba in host_hbas:
+ if hba.__class__.__name__ == 'HostInternetScsiHba':
+ hba_device = hba.device
+ break
+ else:
+ return
+
+ LOG.debug(_("Rescanning HBA %s") % hba_device)
+ session._call_method(session._get_vim(), "RescanHba", storage_system_mor,
+ hbaDevice=hba_device)
+ LOG.debug(_("Rescanned HBA %s ") % hba_device)
+
+
+def mountpoint_to_number(mountpoint):
+ """Translate a mountpoint like /dev/sdc into a numeric."""
+ if mountpoint.startswith('/dev/'):
+ mountpoint = mountpoint[5:]
+ if re.match('^[hsv]d[a-p]$', mountpoint):
+ return (ord(mountpoint[2:3]) - ord('a'))
+ elif re.match('^[0-9]+$', mountpoint):
+ return string.atoi(mountpoint, 10)
+ else:
+ LOG.warn(_("Mountpoint cannot be translated: %s") % mountpoint)
+ return -1
diff --git a/nova/virt/vmwareapi/volumeops.py b/nova/virt/vmwareapi/volumeops.py
new file mode 100644
index 000000000..5ec389f80
--- /dev/null
+++ b/nova/virt/vmwareapi/volumeops.py
@@ -0,0 +1,183 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 VMware, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Management class for Storage-related functions (attach, detach, etc).
+"""
+
+from nova import context
+from nova import exception
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova.virt.vmwareapi import vim_util
+from nova.virt.vmwareapi import vm_util
+from nova.virt.vmwareapi import volume_util
+
+
+CONF = cfg.CONF
+LOG = logging.getLogger(__name__)
+
+
+class VMwareVolumeOps(object):
+ """
+ Management class for Volume-related tasks
+ """
+
+ def __init__(self, session):
+ self._session = session
+
+ def attach_disk_to_vm(self, vm_ref, instance_name,
+ adapter_type, disk_type, vmdk_path=None,
+ disk_size=None, linked_clone=False,
+ controller_key=None, unit_number=None,
+ device_name=None):
+ """
+ Attach disk to VM by reconfiguration.
+ """
+ client_factory = self._session._get_vim().client.factory
+ vmdk_attach_config_spec = vm_util.get_vmdk_attach_config_spec(
+ client_factory, adapter_type, disk_type,
+ vmdk_path, disk_size, linked_clone,
+ controller_key, unit_number, device_name)
+
+ LOG.debug(_("Reconfiguring VM instance %(instance_name)s to attach "
+ "disk %(vmdk_path)s or device %(device_name)s with type "
+ "%(disk_type)s") % locals())
+ reconfig_task = self._session._call_method(
+ self._session._get_vim(),
+ "ReconfigVM_Task", vm_ref,
+ spec=vmdk_attach_config_spec)
+ self._session._wait_for_task(instance_name, reconfig_task)
+ LOG.debug(_("Reconfigured VM instance %(instance_name)s to attach "
+ "disk %(vmdk_path)s or device %(device_name)s with type "
+ "%(disk_type)s") % locals())
+
+ def detach_disk_from_vm(self, vm_ref, instance_name, device):
+ """
+ Detach disk from VM by reconfiguration.
+ """
+ client_factory = self._session._get_vim().client.factory
+ vmdk_detach_config_spec = vm_util.get_vmdk_detach_config_spec(
+ client_factory, device)
+ disk_key = device.key
+ LOG.debug(_("Reconfiguring VM instance %(instance_name)s to detach "
+ "disk %(disk_key)s") % locals())
+ reconfig_task = self._session._call_method(
+ self._session._get_vim(),
+ "ReconfigVM_Task", vm_ref,
+ spec=vmdk_detach_config_spec)
+ self._session._wait_for_task(instance_name, reconfig_task)
+ LOG.debug(_("Reconfigured VM instance %(instance_name)s to detach "
+ "disk %(disk_key)s") % locals())
+
+ def discover_st(self, data):
+ """Discover iSCSI targets."""
+ target_portal = data['target_portal']
+ target_iqn = data['target_iqn']
+ LOG.debug(_("Discovering iSCSI target %(target_iqn)s from "
+ "%(target_portal)s.") % locals())
+ device_name, uuid = volume_util.find_st(self._session, data)
+ if device_name:
+ LOG.debug(_("Storage target found. No need to discover"))
+ return (device_name, uuid)
+ # Rescan iSCSI HBA
+ volume_util.rescan_iscsi_hba(self._session)
+ # Find iSCSI Target again
+ device_name, uuid = volume_util.find_st(self._session, data)
+ if device_name:
+ LOG.debug(_("Discovered iSCSI target %(target_iqn)s from "
+ "%(target_portal)s.") % locals())
+ else:
+ LOG.debug(_("Unable to discovered iSCSI target %(target_iqn)s "
+ "from %(target_portal)s.") % locals())
+ return (device_name, uuid)
+
+ def get_volume_connector(self, instance):
+ """Return volume connector information."""
+ iqn = volume_util.get_host_iqn(self._session)
+ return {
+ 'ip': CONF.vmwareapi_host_ip,
+ 'initiator': iqn
+ }
+
+ def attach_volume(self, connection_info, instance, mountpoint):
+ """Attach volume storage to VM instance."""
+ instance_name = instance['name']
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance_name)
+ if vm_ref is None:
+ raise exception.InstanceNotFound(instance_id=instance_name)
+ # Attach Volume to VM
+ LOG.debug(_("Attach_volume: %(connection_info)s, %(instance_name)s, "
+ "%(mountpoint)s") % locals())
+ driver_type = connection_info['driver_volume_type']
+ if driver_type not in ['iscsi']:
+ raise exception.VolumeDriverNotFound(driver_type=driver_type)
+ data = connection_info['data']
+ mount_unit = volume_util.mountpoint_to_number(mountpoint)
+
+ # Discover iSCSI Target
+ device_name, uuid = self.discover_st(data)
+ if device_name is None:
+ raise volume_util.StorageError(_("Unable to find iSCSI Target"))
+
+ # Get the vmdk file name that the VM is pointing to
+ hardware_devices = self._session._call_method(vim_util,
+ "get_dynamic_property", vm_ref,
+ "VirtualMachine", "config.hardware.device")
+ vmdk_file_path, controller_key, adapter_type, disk_type, unit_number \
+ = vm_util.get_vmdk_path_and_adapter_type(hardware_devices)
+ # Figure out the correct unit number
+ if unit_number < mount_unit:
+ unit_number = mount_unit
+ else:
+ unit_number = unit_number + 1
+ self.attach_disk_to_vm(vm_ref, instance_name,
+ adapter_type, disk_type="rdmp",
+ controller_key=controller_key,
+ unit_number=unit_number,
+ device_name=device_name)
+ LOG.info(_("Mountpoint %(mountpoint)s attached to "
+ "instance %(instance_name)s") % locals())
+
+ def detach_volume(self, connection_info, instance, mountpoint):
+ """Detach volume storage to VM instance."""
+ instance_name = instance['name']
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance_name)
+ if vm_ref is None:
+ raise exception.InstanceNotFound(instance_id=instance_name)
+ # Detach Volume from VM
+ LOG.debug(_("Detach_volume: %(instance_name)s, %(mountpoint)s")
+ % locals())
+ driver_type = connection_info['driver_volume_type']
+ if driver_type not in ['iscsi']:
+ raise exception.VolumeDriverNotFound(driver_type=driver_type)
+ data = connection_info['data']
+
+ # Discover iSCSI Target
+ device_name, uuid = volume_util.find_st(self._session, data)
+ if device_name is None:
+ raise volume_util.StorageError(_("Unable to find iSCSI Target"))
+
+ # Get the vmdk file name that the VM is pointing to
+ hardware_devices = self._session._call_method(vim_util,
+ "get_dynamic_property", vm_ref,
+ "VirtualMachine", "config.hardware.device")
+ device = vm_util.get_rdm_disk(hardware_devices, uuid)
+ if device is None:
+ raise volume_util.StorageError(_("Unable to find volume"))
+ self.detach_disk_from_vm(vm_ref, instance_name, device)
+ LOG.info(_("Mountpoint %(mountpoint)s detached from "
+ "instance %(instance_name)s") % locals())