summaryrefslogtreecommitdiffstats
path: root/nova
diff options
context:
space:
mode:
authorArmando Migliaccio <armando.migliaccio@citrix.com>2010-11-30 19:03:13 +0000
committerArmando Migliaccio <armando.migliaccio@citrix.com>2010-11-30 19:03:13 +0000
commit40de074f44059f89caa15420a7174f63c76eec48 (patch)
tree22202f0520e98f9295dd980edd2598d7ee3061cb /nova
parent004704f0fec2280f75ec0cf9757008228b01410d (diff)
iscsi volumes attach/detach complete. There is only one minor issue on how to discover targets from device_path
Diffstat (limited to 'nova')
-rw-r--r--nova/virt/xenapi/vm_utils.py38
-rw-r--r--nova/virt/xenapi/volume_utils.py210
-rw-r--r--nova/virt/xenapi/volumeops.py93
3 files changed, 313 insertions, 28 deletions
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index b68df2791..6966e7b7b 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -104,6 +104,44 @@ class VMHelper():
defer.returnValue(vbd_ref)
@classmethod
+ @utils.deferredToThread
+ def find_vbd_by_number(self, session, vm_ref, number):
+ return VMHelper.find_vbd_by_number_blocking(session, vm_ref, number)
+
+ @classmethod
+ def find_vbd_by_number_blocking(self, session, vm_ref, number):
+ vbds = session.get_xenapi().VM.get_VBDs(vm_ref)
+ if vbds:
+ for vbd in vbds:
+ try:
+ vbd_rec = session.get_xenapi().VBD.get_record(vbd)
+ if vbd_rec['userdevice'] == str(number):
+ return vbd
+ except Exception, exc:
+ logging.warn(exc)
+ raise Exception('VBD not found in instance %s' % vm_ref)
+
+ @classmethod
+ @defer.inlineCallbacks
+ def unplug_vbd(self, session, vbd_ref):
+ try:
+ vbd_ref = yield session.call_xenapi('VBD.unplug', vbd_ref)
+ except Exception, exc:
+ logging.warn(exc)
+ if exc.details[0] != 'DEVICE_ALREADY_DETACHED':
+ raise Exception('Unable to unplug VBD %s' % vbd_ref)
+
+ @classmethod
+ @defer.inlineCallbacks
+ def destroy_vbd(self, session, vbd_ref):
+ try:
+ task = yield session.call_xenapi('Async.VBD.destroy', vbd_ref)
+ yield session.wait_for_task(task)
+ except Exception, exc:
+ logging.warn(exc)
+ raise Exception('Unable to destroy VBD %s' % vbd_ref)
+
+ @classmethod
@defer.inlineCallbacks
def create_vif(self, session, vm_ref, network_ref, mac_address):
"""Create a VIF record. Returns a Deferred that gives the new
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
new file mode 100644
index 000000000..b982ac124
--- /dev/null
+++ b/nova/virt/xenapi/volume_utils.py
@@ -0,0 +1,210 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Helper methods for operations related to the management of volumes,
+and storage repositories
+"""
+
+import logging
+import re
+import string
+
+from twisted.internet import defer
+
+from nova import utils
+from nova import flags
+
+FLAGS = flags.FLAGS
+
+#FIXME: replace with proper target discovery
+flags.DEFINE_string('target_host', None, 'iSCSI Target Host')
+flags.DEFINE_string('target_port', '3260', 'iSCSI Target Port, 3260 Default')
+flags.DEFINE_string('iqn_prefix', 'iqn.2010-10.org.openstack', 'IQN Prefix')
+
+
+class VolumeHelper():
+ def __init__(self, session):
+ return
+
+ @classmethod
+ @utils.deferredToThread
+ def create_iscsi_storage(self, session, target, port, target_iqn,
+ username, password, label, description):
+
+ return VolumeHelper.create_iscsi_storage_blocking(session, target,
+ port,
+ target_iqn,
+ username,
+ password,
+ label,
+ description)
+
+ @classmethod
+ def create_iscsi_storage_blocking(self, session, target, port, target_iqn,
+ username, password, label, description):
+
+ sr_ref = session.get_xenapi().SR.get_by_name_label(label)
+ if len(sr_ref) == 0:
+ logging.debug('Introducing %s...' % label)
+ try:
+ sr_ref = session.get_xenapi().SR.create(
+ session.get_xenapi_host(),
+ {'target': target,
+ 'port': port,
+ 'targetIQN': target_iqn
+ # TODO: when/if chap authentication is used
+ #'chapuser': username,
+ #'chappassword': password
+ },
+ '0', label, description, 'iscsi', '', False, {})
+ logging.debug('Introduced %s as %s.' % (label, sr_ref))
+ return sr_ref
+ except Exception, exc:
+ logging.warn(exc)
+ raise Exception('Unable to create Storage Repository')
+ else:
+ return sr_ref[0]
+
+ @classmethod
+ @defer.inlineCallbacks
+ def find_sr_from_vbd(self, session, vbd_ref):
+ vdi_ref = yield session.get_xenapi().VBD.get_VDI(vbd_ref)
+ sr_ref = yield session.get_xenapi().VDI.get_SR(vdi_ref)
+ defer.returnValue(sr_ref)
+
+ @classmethod
+ @utils.deferredToThread
+ def destroy_iscsi_storage(self, session, sr_ref):
+ VolumeHelper.destroy_iscsi_storage_blocking(session, sr_ref)
+
+ @classmethod
+ def destroy_iscsi_storage_blocking(self, session, sr_ref):
+ logging.debug("Forgetting SR %s ... ", sr_ref)
+ pbds = []
+ try:
+ pbds = session.get_xenapi().SR.get_PBDs(sr_ref)
+ except Exception, exc:
+ logging.warn('Ignoring exception %s when getting PBDs for %s',
+ exc, sr_ref)
+ for pbd in pbds:
+ try:
+ session.get_xenapi().PBD.unplug(pbd)
+ except Exception, exc:
+ logging.warn('Ignoring exception %s when unplugging PBD %s',
+ exc, pbd)
+ try:
+ session.get_xenapi().SR.forget(sr_ref)
+ logging.debug("Forgetting SR %s done.", sr_ref)
+ except Exception, exc:
+ logging.warn('Ignoring exception %s when forgetting SR %s',
+ exc, sr_ref)
+
+ @classmethod
+ @utils.deferredToThread
+ def introduce_vdi(self, session, sr_ref):
+ return VolumeHelper.introduce_vdi_blocking(session, sr_ref)
+
+ @classmethod
+ def introduce_vdi_blocking(self, session, sr_ref):
+ try:
+ vdis = session.get_xenapi().SR.get_VDIs(sr_ref)
+ except Exception, exc:
+ raise Exception('Unable to introduce VDI on SR %s' % sr_ref)
+ try:
+ vdi_rec = session.get_xenapi().VDI.get_record(vdis[0])
+ except Exception, exc:
+ raise Exception('Unable to get record of VDI %s on' % vdis[0])
+ else:
+ return session.get_xenapi().VDI.introduce(
+ vdi_rec['uuid'],
+ vdi_rec['name_label'],
+ vdi_rec['name_description'],
+ vdi_rec['SR'],
+ vdi_rec['type'],
+ vdi_rec['sharable'],
+ vdi_rec['read_only'],
+ vdi_rec['other_config'],
+ vdi_rec['location'],
+ vdi_rec['xenstore_data'],
+ vdi_rec['sm_config'])
+
+ @classmethod
+ def parse_volume_info(self, device_path, mountpoint):
+ # Because XCP/XS want a device number instead of a mountpoint
+ device_number = VolumeHelper.mountpoint_to_number(mountpoint)
+ volume_id = _get_volume_id(device_path)
+ target_host = _get_target_host(device_path)
+ target_port = _get_target_port(device_path)
+ target_iqn = _get_iqn(device_path)
+
+ if (device_number < 0) or \
+ (volume_id is None) or \
+ (target_host is None) or \
+ (target_iqn is None):
+ raise Exception('Unable to obtain target information %s, %s' %
+ (device_path, mountpoint))
+
+ volume_info = {}
+ volume_info['deviceNumber'] = device_number
+ volume_info['volumeId'] = volume_id
+ volume_info['targetHost'] = target_host
+ volume_info['targetPort'] = target_port
+ volume_info['targeIQN'] = target_iqn
+ return volume_info
+
+ @classmethod
+ def mountpoint_to_number(self, mountpoint):
+ if mountpoint.startswith('/dev/'):
+ mountpoint = mountpoint[5:]
+ if re.match('^[hs]d[a-p]$', mountpoint):
+ return (ord(mountpoint[2:3]) - ord('a'))
+ elif re.match('^vd[a-p]$', mountpoint):
+ return (ord(mountpoint[2:3]) - ord('a'))
+ elif re.match('^[0-9]+$', mountpoint):
+ return string.atoi(mountpoint, 10)
+ else:
+ logging.warn('Mountpoint cannot be translated: %s', mountpoint)
+ return -1
+
+
+def _get_volume_id(n):
+ # FIXME: n must contain at least the volume_id
+ # /vol- is for remote volumes
+ # -vol- is for local volumes
+ # see compute/manager->setup_compute_volume
+ volume_id = n[n.find('/vol-') + 1:]
+ if volume_id == n:
+ volume_id = n[n.find('-vol-') + 1:].replace('--', '-')
+ return volume_id
+
+
+def _get_target_host(n):
+ # FIXME: if n is none fall back on flags
+ if n is None or FLAGS.target_host:
+ return FLAGS.target_host
+
+
+def _get_target_port(n):
+ # FIXME: if n is none fall back on flags
+ return FLAGS.target_port
+
+
+def _get_iqn(n):
+ # FIXME: n must contain at least the volume_id
+ volume_id = _get_volume_id(n)
+ if n is None or FLAGS.iqn_prefix:
+ return '%s:%s' % (FLAGS.iqn_prefix, volume_id)
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index 5aefa0611..d5c309240 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -17,6 +17,12 @@
"""
Management class for Storage-related functions (attach, detach, etc).
"""
+import logging
+
+from twisted.internet import defer
+
+from volume_utils import VolumeHelper
+from vm_utils import VMHelper
class VolumeOps(object):
@@ -25,58 +31,89 @@ class VolumeOps(object):
@defer.inlineCallbacks
def attach_volume(self, instance_name, device_path, mountpoint):
+ # Before we start, check that the VM exists
+ vm_ref = yield VMHelper.lookup(self._session, instance_name)
+ if vm_ref is None:
+ raise Exception('Instance %s does not exist' % instance_name)
# NOTE: No Resource Pool concept so far
logging.debug("Attach_volume: %s, %s, %s",
instance_name, device_path, mountpoint)
- volume_info = _parse_volume_info(device_path, mountpoint)
+ vol_rec = VolumeHelper.parse_volume_info(device_path, mountpoint)
# Create the iSCSI SR, and the PDB through which hosts access SRs.
# But first, retrieve target info, like Host, IQN, LUN and SCSIID
- target = yield self._get_target(volume_info)
- label = 'SR-%s' % volume_info['volumeId']
- description = 'Attached-to:%s' % instance_name
- # Create SR and check the physical space available for the VDI allocation
- sr_ref = yield self._create_sr(target, label, description)
- disk_size = int(target['size'])
- #disk_size = yield self._get_sr_available_space(sr_ref)
- # Create VDI and attach VBD to VM
- vm_ref = yield self._lookup(instance_name)
- logging.debug("Mounting disk of: %s GB", (disk_size / (1024*1024*1024.0)))
+ label = 'SR-%s' % vol_rec['volumeId']
+ description = 'Disk-for:%s' % instance_name
+ # Create SR
+ sr_ref = yield VolumeHelper.create_iscsi_storage(self._session,
+ vol_rec['targetHost'],
+ vol_rec['targetPort'],
+ vol_rec['targeIQN'],
+ '', # no CHAP auth
+ '',
+ label,
+ description)
+ # Introduce VDI and attach VBD to VM
try:
- vdi_ref = yield self._create_vdi(sr_ref, disk_size,
- 'user', volume_info['volumeId'], '',
- False, False)
+ vdi_ref = yield VolumeHelper.introduce_vdi(self._session, sr_ref)
except Exception, exc:
logging.warn(exc)
- yield self._destroy_sr(sr_ref)
+ yield VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
raise Exception('Unable to create VDI on SR %s for instance %s'
% (sr_ref,
instance_name))
else:
try:
- userdevice = 2 # FIXME: this depends on the numbers of attached disks
- vbd_ref = yield self._create_vbd(vm_ref, vdi_ref, userdevice, False, True, False)
+ vbd_ref = yield VMHelper.create_vbd(self._session,
+ vm_ref, vdi_ref,
+ vol_rec['deviceNumber'],
+ False)
except Exception, exc:
logging.warn(exc)
- yield self._destroy_sr(sr_ref)
+ yield VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
raise Exception('Unable to create VBD on SR %s for instance %s'
% (sr_ref,
instance_name))
else:
try:
- raise Exception('')
- task = yield self._call_xenapi('Async.VBD.plug', vbd_ref)
- yield self._wait_for_task(task)
+ #raise Exception('')
+ task = yield self._session.call_xenapi('Async.VBD.plug',
+ vbd_ref)
+ yield self._session.wait_for_task(task)
except Exception, exc:
logging.warn(exc)
- yield self._destroy_sr(sr_ref)
- raise Exception('Unable to attach volume to instance %s' % instance_name)
-
+ yield VolumeHelper.destroy_iscsi_storage(self._session,
+ sr_ref)
+ raise Exception('Unable to attach volume to instance %s' %
+ instance_name)
yield True
@defer.inlineCallbacks
def detach_volume(self, instance_name, mountpoint):
- logging.debug("Detach_volume: %s, %s, %s", instance_name, mountpoint)
+ # Before we start, check that the VM exists
+ vm_ref = yield VMHelper.lookup(self._session, instance_name)
+ if vm_ref is None:
+ raise Exception('Instance %s does not exist' % instance_name)
# Detach VBD from VM
- # Forget SR/PDB info associated with host
- # TODO: can we avoid destroying the SR every time we detach?
- yield True \ No newline at end of file
+ logging.debug("Detach_volume: %s, %s", instance_name, mountpoint)
+ device_number = VolumeHelper.mountpoint_to_number(mountpoint)
+ try:
+ vbd_ref = yield VMHelper.find_vbd_by_number(self._session,
+ vm_ref, device_number)
+ except Exception, exc:
+ logging.warn(exc)
+ raise Exception('Unable to locate volume %s' % mountpoint)
+ else:
+ try:
+ sr_ref = yield VolumeHelper.find_sr_from_vbd(self._session,
+ vbd_ref)
+ yield VMHelper.unplug_vbd(self._session, vbd_ref)
+ except Exception, exc:
+ logging.warn(exc)
+ raise Exception('Unable to detach volume %s' % mountpoint)
+ try:
+ yield VMHelper.destroy_vbd(self._session, vbd_ref)
+ except Exception, exc:
+ logging.warn(exc)
+ # Forget SR
+ yield VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
+ yield True