From 9ca0b3435d93a87407ca42a853562cd06aaa896e Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 22 Nov 2010 12:57:03 +0000 Subject: added placeholders --- nova/virt/xenapi.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index 0f563aa41..4ed5d047f 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -296,6 +296,14 @@ class XenAPIConnection(object): yield self._wait_for_task(task) except Exception, exc: logging.warn(exc) + + @defer.inlineCallbacks + def attach_volume(self, instance_name, device_path, mountpoint): + return True + + @defer.inlineCallbacks + def detach_volume(self, instance_name, mountpoint): + return True def get_info(self, instance_id): vm = self._lookup_blocking(instance_id) -- cgit From 9f722a0bcdb987c228f4ebf1e42c904a26d0ef73 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 25 Nov 2010 10:42:06 +0000 Subject: first cut of changes for the attach_volume call --- nova/virt/xenapi.py | 94 +++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 87 insertions(+), 7 deletions(-) diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index 4ed5d047f..ec5e7456a 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -63,6 +63,9 @@ from nova.compute import instance_types from nova.compute import power_state from nova.virt import images +from xml.dom.minidom import parseString + + XenAPI = None @@ -90,7 +93,7 @@ XENAPI_POWER_STATE = { 'Halted': power_state.SHUTDOWN, 'Running': power_state.RUNNING, 'Paused': power_state.PAUSED, - 'Suspended': power_state.SHUTDOWN, # FIXME + 'Suspended': power_state.SHUTDOWN, # FIXME 'Crashed': power_state.CRASHED} @@ -126,7 +129,7 @@ class XenAPIConnection(object): def spawn(self, instance): vm = yield self._lookup(instance.name) if vm is not None: - raise Exception('Attempted to create non-unique name %s' % + raise Exception('Attempted to create non-unique name %s' % instance.name) network = db.project_get_network(None, instance.project_id) @@ -296,14 +299,34 @@ class XenAPIConnection(object): yield self._wait_for_task(task) except Exception, exc: logging.warn(exc) - + @defer.inlineCallbacks def attach_volume(self, instance_name, device_path, mountpoint): - return True + # NOTE: No Resource Pool concept so far + logging.debug("Attach_volume: %s, %s, %s", + instance_name, device_path, mountpoint) + volume_info = _parse_volume_info(device_path, mountpoint) + # Create the iSCSI SR, and the PDB through which hosts access SRs. + # But first, retrieve target info, like Host, IQN, LUN and SCSIID + target = yield self._get_target(volume_info) + label = 'SR-%s' % volume_info['volumeId'] + sr_ref = yield self._create_sr(target, label) + # Create VDI and attach VBD to VM + vm = None + try: + task = yield self._call_xenapi('', vm) + yield self._wait_for_task(task) + except Exception, exc: + logging.warn(exc) + yield True @defer.inlineCallbacks def detach_volume(self, instance_name, mountpoint): - return True + logging.debug("Detach_volume: %s, %s, %s", instance_name, mountpoint) + # Detach VBD from VM + # Forget SR/PDB info associated with host + # TODO: can we avoid destroying the SR every time we detach? + yield True def get_info(self, instance_id): vm = self._lookup_blocking(instance_id) @@ -333,6 +356,52 @@ class XenAPIConnection(object): else: return vms[0] + @utils.deferredToThread + def _get_target(self, volume_info): + return self._get_target_blocking(volume_info) + + def _get_target_blocking(self, volume_info): + target = {} + target['target'] = volume_info['targetHost'] + target['port'] = volume_info['targetPort'] + target['targetIQN'] = volume_info['iqn'] + # We expect SR_BACKEND_FAILURE_107 to retrieve params to create the SR + try: + self._conn.xenapi.SR.create(self._get_xenapi_host(), + target, '-1', '', '', + 'lvmoiscsi', '', False, {}) + except XenAPI.Failure, exc: + if exc.details[0] == 'SR_BACKEND_FAILURE_107': + xml_response = parseString(exc.details[3]) + isciTargets = xml_response.getElementsByTagName('iscsi-target') + # Make sure that only the correct Lun is visible + if len(isciTargets) > 1: + raise Exception('More than one ISCSI Target available') + isciLuns = isciTargets.item(0).getElementsByTagName('LUN') + if len(isciLuns) > 1: + raise Exception('More than one ISCSI Lun available') + # Parse params from the xml response into the dictionary + for n in isciLuns.item(0).childNodes: + if n.nodeType == 1: + target[n.nodeName] = str(n.firstChild.data).strip() + return target + + @utils.deferredToThread + def _create_sr(self, target, label): + return self._create_sr_blocking(target, label) + + def _create_sr_blocking(self, target, label): + # TODO: we might want to put all these string literals into constants + sr = self._conn.xenapi.SR.create(self._get_xenapi_host(), + target, + target['size'], + label, + '', + 'lvmoiscsi', + '', + True, {}) + return sr + def _wait_for_task(self, task): """Return a Deferred that will give the result of the given task. The task is polled until it completes.""" @@ -412,7 +481,18 @@ def _parse_xmlrpc_value(val): if not val: return val x = xmlrpclib.loads( - '' + - val + + '' + + val + '') return x[0][0] + + +def _parse_volume_info(device_path, mountpoint): + volume_info = {} + volume_info['volumeId'] = 'vol-qurmrzn9' + # XCP and XenServer add an x to the device name + volume_info['xenMountpoint'] = '/dev/xvdb' + volume_info['targetHost'] = '10.70.177.40' + volume_info['targetPort'] = '3260' # default 3260 + volume_info['iqn'] = 'iqn.2010-10.org.openstack:vol-qurmrzn9' + return volume_info -- cgit From 688d564668aefa4b644236421a3a45fc90486634 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 25 Nov 2010 20:31:32 +0000 Subject: work on attach_volume, with a few things to iron out --- nova/virt/xenapi.py | 99 ++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 83 insertions(+), 16 deletions(-) diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index 5bf98468e..d3167ebf3 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -147,7 +147,7 @@ class XenAPIConnection(object): vdi_ref = yield self._call_xenapi('VDI.get_by_uuid', vdi_uuid) vm_ref = yield self._create_vm(instance, kernel, ramdisk) - yield self._create_vbd(vm_ref, vdi_ref, 0, True) + yield self._create_vbd(vm_ref, vdi_ref, 0, True, True, False) if network_ref: yield self._create_vif(vm_ref, network_ref, instance.mac_address) logging.debug('Starting VM %s...', vm_ref) @@ -197,7 +197,21 @@ class XenAPIConnection(object): defer.returnValue(vm_ref) @defer.inlineCallbacks - def _create_vbd(self, vm_ref, vdi_ref, userdevice, bootable): + def _create_vdi(self, sr_ref, size, type, label, description, read_only, sharable): + vdi_rec = {} + vdi_rec['read_only'] = read_only + vdi_rec['SR'] = sr_ref + vdi_rec['virtual_size'] = str(size) + vdi_rec['name_label'] = label + vdi_rec['name_description'] = description + vdi_rec['sharable'] = sharable + vdi_rec['type'] = type + vdi_rec['other_config'] = {} + vdi_ref = yield self._call_xenapi('VDI.create', vdi_rec) + defer.returnValue(vdi_ref) + + @defer.inlineCallbacks + def _create_vbd(self, vm_ref, vdi_ref, userdevice, bootable, unpluggable, empty): """Create a VBD record. Returns a Deferred that gives the new VBD reference.""" @@ -208,8 +222,8 @@ class XenAPIConnection(object): vbd_rec['bootable'] = bootable vbd_rec['mode'] = 'RW' vbd_rec['type'] = 'disk' - vbd_rec['unpluggable'] = True - vbd_rec['empty'] = False + vbd_rec['unpluggable'] = unpluggable + vbd_rec['empty'] = empty vbd_rec['other_config'] = {} vbd_rec['qos_algorithm_type'] = '' vbd_rec['qos_algorithm_params'] = {} @@ -320,14 +334,33 @@ class XenAPIConnection(object): # But first, retrieve target info, like Host, IQN, LUN and SCSIID target = yield self._get_target(volume_info) label = 'SR-%s' % volume_info['volumeId'] - sr_ref = yield self._create_sr(target, label) + description = 'Attached-to:%s' % instance_name + # Create SR and check the physical space available for the VDI allocation + sr_ref = yield self._create_sr(target, label, description) + disk_size = yield self._get_sr_available_space(sr_ref) # Create VDI and attach VBD to VM - vm = None + vm_ref = yield self._lookup(instance_name) + logging.debug("Mounting disk of: %s GB", (disk_size / (1024*1024*1024.0))) try: - task = yield self._call_xenapi('', vm) - yield self._wait_for_task(task) + vdi_ref = yield self._create_vdi(sr_ref, disk_size, + 'user', volume_info['volumeId'], '', + False, False) except Exception, exc: logging.warn(exc) + if sr_ref: + yield self._destroy_sr(sr_ref) + raise Exception('Unable to create VDI on SR %s' % sr_ref) + else: + try: + userdevice = 2 # FIXME: this depends on the numbers of attached disks + vbd_ref = yield self._create_vbd(vm_ref, vdi_ref, userdevice, False, True, False) + task = yield self._call_xenapi('Async.VBD.plug', vbd_ref) + yield self._wait_for_task(task) + except Exception, exc: + logging.warn(exc) + if sr_ref: + yield self._destroy_sr(sr_ref) + raise Exception('Unable to create VBD on SR %s' % sr_ref) yield True @defer.inlineCallbacks @@ -395,23 +428,57 @@ class XenAPIConnection(object): if n.nodeType == 1: target[n.nodeName] = str(n.firstChild.data).strip() return target + + @utils.deferredToThread + def _get_sr_available_space(self, sr_ref): + return self._get_sr_available_space_blocking(sr_ref) + + def _get_sr_available_space_blocking(self, sr_ref): + pu = self._conn.xenapi.SR.get_physical_utilisation(sr_ref) + ps = self._conn.xenapi.SR.get_physical_size(sr_ref) + return (int(ps) - int(pu)) - (8 * 1024 * 1024) @utils.deferredToThread - def _create_sr(self, target, label): - return self._create_sr_blocking(target, label) + def _create_sr(self, target, label, description): + return self._create_sr_blocking(target, label, description) - def _create_sr_blocking(self, target, label): + def _create_sr_blocking(self, target, label, description): # TODO: we might want to put all these string literals into constants - sr = self._conn.xenapi.SR.create(self._get_xenapi_host(), + sr_ref = self._conn.xenapi.SR.create(self._get_xenapi_host(), target, target['size'], label, - '', + description, 'lvmoiscsi', '', True, {}) - return sr + # TODO: there might be some timing issues here + self._conn.xenapi.SR.scan(sr_ref) + return sr_ref + @defer.inlineCallbacks + def _destroy_sr(self, sr_ref): + # Some clean-up depending on the state of the SR + #yield self._destroy_vdbs(sr_ref) + #yield self._destroy_vdis(sr_ref) + # Destroy PDBs + pbds = yield self._conn.xenapi.SR.get_PBDs(sr_ref) + for pbd_ref in pbds: + try: + task = yield self._call_xenapi('Async.PBD.unplug', pbd_ref) + yield self._wait_for_task(task) + except Exception, exc: + logging.warn(exc) + else: + task = yield self._call_xenapi('Async.PBD.destroy', pbd_ref) + yield self._wait_for_task(task) + # Forget SR + try: + task = yield self._call_xenapi('Async.SR.forget', sr_ref) + yield self._wait_for_task(task) + except Exception, exc: + logging.warn(exc) + @utils.deferredToThread def _lookup_vm_vdis(self, vm): return self._lookup_vm_vdis_blocking(vm) @@ -524,8 +591,8 @@ def _parse_xmlrpc_value(val): def _parse_volume_info(device_path, mountpoint): volume_info = {} volume_info['volumeId'] = 'vol-qurmrzn9' - # XCP and XenServer add an x to the device name - volume_info['xenMountpoint'] = '/dev/xvdb' + # Because XCP/XS want an x beforehand + volume_info['xenMountpoint'] = '/dev/xvdc' volume_info['targetHost'] = '10.70.177.40' volume_info['targetPort'] = '3260' # default 3260 volume_info['iqn'] = 'iqn.2010-10.org.openstack:vol-qurmrzn9' -- cgit From a9b900d24020b68284e402a98ee28c107de0bd71 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 25 Nov 2010 20:42:22 +0000 Subject: added attach_volume implementation --- nova/virt/xenapi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index d3167ebf3..236360f11 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -593,7 +593,7 @@ def _parse_volume_info(device_path, mountpoint): volume_info['volumeId'] = 'vol-qurmrzn9' # Because XCP/XS want an x beforehand volume_info['xenMountpoint'] = '/dev/xvdc' - volume_info['targetHost'] = '10.70.177.40' + volume_info['targetHost'] = '' volume_info['targetPort'] = '3260' # default 3260 volume_info['iqn'] = 'iqn.2010-10.org.openstack:vol-qurmrzn9' return volume_info -- cgit From 04b1740c991d6d499364c21c2524c46ed5fc2522 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 29 Nov 2010 17:26:44 +0000 Subject: changes --- nova/virt/xenapi.py | 93 +++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 68 insertions(+), 25 deletions(-) diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index 236360f11..f2ba71306 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -198,6 +198,9 @@ class XenAPIConnection(object): @defer.inlineCallbacks def _create_vdi(self, sr_ref, size, type, label, description, read_only, sharable): + """Create a VDI record. Returns a Deferred that gives the new + VDI reference.""" + vdi_rec = {} vdi_rec['read_only'] = read_only vdi_rec['SR'] = sr_ref @@ -337,7 +340,8 @@ class XenAPIConnection(object): description = 'Attached-to:%s' % instance_name # Create SR and check the physical space available for the VDI allocation sr_ref = yield self._create_sr(target, label, description) - disk_size = yield self._get_sr_available_space(sr_ref) + disk_size = int(target['size']) + #disk_size = yield self._get_sr_available_space(sr_ref) # Create VDI and attach VBD to VM vm_ref = yield self._lookup(instance_name) logging.debug("Mounting disk of: %s GB", (disk_size / (1024*1024*1024.0))) @@ -347,20 +351,30 @@ class XenAPIConnection(object): False, False) except Exception, exc: logging.warn(exc) - if sr_ref: - yield self._destroy_sr(sr_ref) - raise Exception('Unable to create VDI on SR %s' % sr_ref) + yield self._destroy_sr(sr_ref) + raise Exception('Unable to create VDI on SR %s for instance %s' + % (sr_ref, + instance_name)) else: - try: + try: userdevice = 2 # FIXME: this depends on the numbers of attached disks vbd_ref = yield self._create_vbd(vm_ref, vdi_ref, userdevice, False, True, False) - task = yield self._call_xenapi('Async.VBD.plug', vbd_ref) - yield self._wait_for_task(task) except Exception, exc: logging.warn(exc) - if sr_ref: + yield self._destroy_sr(sr_ref) + raise Exception('Unable to create VBD on SR %s for instance %s' + % (sr_ref, + instance_name)) + else: + try: + raise Exception('') + task = yield self._call_xenapi('Async.VBD.plug', vbd_ref) + yield self._wait_for_task(task) + except Exception, exc: + logging.warn(exc) yield self._destroy_sr(sr_ref) - raise Exception('Unable to create VBD on SR %s' % sr_ref) + raise Exception('Unable to attach volume to instance %s' % instance_name) + yield True @defer.inlineCallbacks @@ -412,7 +426,7 @@ class XenAPIConnection(object): try: self._conn.xenapi.SR.create(self._get_xenapi_host(), target, '-1', '', '', - 'lvmoiscsi', '', False, {}) + 'iscsi', '', False, {}) except XenAPI.Failure, exc: if exc.details[0] == 'SR_BACKEND_FAILURE_107': xml_response = parseString(exc.details[3]) @@ -427,6 +441,9 @@ class XenAPIConnection(object): for n in isciLuns.item(0).childNodes: if n.nodeType == 1: target[n.nodeName] = str(n.firstChild.data).strip() + else: + logging.warn(exc) + raise Exception('Unable to access SR') return target @utils.deferredToThread @@ -444,24 +461,45 @@ class XenAPIConnection(object): def _create_sr_blocking(self, target, label, description): # TODO: we might want to put all these string literals into constants - sr_ref = self._conn.xenapi.SR.create(self._get_xenapi_host(), + sr_ref = self._conn.xenapi.SR.get_by_name_label(label) + if sr_ref is None: + sr_ref = self._conn.xenapi.SR.create(self._get_xenapi_host(), target, target['size'], label, description, - 'lvmoiscsi', + 'iscsi', '', True, {}) - # TODO: there might be some timing issues here - self._conn.xenapi.SR.scan(sr_ref) - return sr_ref + if sr_ref: + #self._conn.xenapi.SR.scan(sr_ref) + return sr_ref + else: + raise Exception('Unable to create SR') @defer.inlineCallbacks def _destroy_sr(self, sr_ref): # Some clean-up depending on the state of the SR - #yield self._destroy_vdbs(sr_ref) - #yield self._destroy_vdis(sr_ref) - # Destroy PDBs + # Remove VBDs + #vbds = yield self._conn.xenapi.SR.get_VBDs(sr_ref) + #for vbd_ref in vbds: + # try: + # task = yield self._call_xenapi('Async.VBD.destroy', vbd_ref) + # yield self._wait_for_task(task) + # except Exception, exc: + # logging.warn(exc) + # Remove VDIs + #======================================================================= + # vdis = yield self._conn.xenapi.SR.get_VDIs(sr_ref) + # for vdi_ref in vdis: + # try: + # task = yield self._call_xenapi('Async.VDI.destroy', vdi_ref) + # yield self._wait_for_task(task) + # except Exception, exc: + # logging.warn(exc) + #======================================================================= + sr_rec = self._conn.xenapi.SR.get_record(sr_ref) + # Detach from host pbds = yield self._conn.xenapi.SR.get_PBDs(sr_ref) for pbd_ref in pbds: try: @@ -469,16 +507,21 @@ class XenAPIConnection(object): yield self._wait_for_task(task) except Exception, exc: logging.warn(exc) - else: - task = yield self._call_xenapi('Async.PBD.destroy', pbd_ref) - yield self._wait_for_task(task) - # Forget SR + # Destroy SR try: - task = yield self._call_xenapi('Async.SR.forget', sr_ref) + task = yield self._call_xenapi('Async.SR.destroy', sr_ref) yield self._wait_for_task(task) except Exception, exc: logging.warn(exc) + def _sr_dispose_action(self, action, records): + for rec_ref in records: + try: + task = yield self._call_xenapi(action, rec_ref) + yield self._wait_for_task(task) + except Exception, exc: + logging.warn(exc) + @utils.deferredToThread def _lookup_vm_vdis(self, vm): return self._lookup_vm_vdis_blocking(vm) @@ -592,8 +635,8 @@ def _parse_volume_info(device_path, mountpoint): volume_info = {} volume_info['volumeId'] = 'vol-qurmrzn9' # Because XCP/XS want an x beforehand - volume_info['xenMountpoint'] = '/dev/xvdc' - volume_info['targetHost'] = '' + volume_info['mountpoint'] = '/dev/xvdc' # translate + volume_info['targetHost'] = '10.70.177.40' volume_info['targetPort'] = '3260' # default 3260 volume_info['iqn'] = 'iqn.2010-10.org.openstack:vol-qurmrzn9' return volume_info -- cgit From 40de074f44059f89caa15420a7174f63c76eec48 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Tue, 30 Nov 2010 19:03:13 +0000 Subject: iscsi volumes attach/detach complete. There is only one minor issue on how to discover targets from device_path --- nova/virt/xenapi/vm_utils.py | 38 +++++++ nova/virt/xenapi/volume_utils.py | 210 +++++++++++++++++++++++++++++++++++++++ nova/virt/xenapi/volumeops.py | 93 +++++++++++------ 3 files changed, 313 insertions(+), 28 deletions(-) create mode 100644 nova/virt/xenapi/volume_utils.py diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index b68df2791..6966e7b7b 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -103,6 +103,44 @@ class VMHelper(): vdi_ref) defer.returnValue(vbd_ref) + @classmethod + @utils.deferredToThread + def find_vbd_by_number(self, session, vm_ref, number): + return VMHelper.find_vbd_by_number_blocking(session, vm_ref, number) + + @classmethod + def find_vbd_by_number_blocking(self, session, vm_ref, number): + vbds = session.get_xenapi().VM.get_VBDs(vm_ref) + if vbds: + for vbd in vbds: + try: + vbd_rec = session.get_xenapi().VBD.get_record(vbd) + if vbd_rec['userdevice'] == str(number): + return vbd + except Exception, exc: + logging.warn(exc) + raise Exception('VBD not found in instance %s' % vm_ref) + + @classmethod + @defer.inlineCallbacks + def unplug_vbd(self, session, vbd_ref): + try: + vbd_ref = yield session.call_xenapi('VBD.unplug', vbd_ref) + except Exception, exc: + logging.warn(exc) + if exc.details[0] != 'DEVICE_ALREADY_DETACHED': + raise Exception('Unable to unplug VBD %s' % vbd_ref) + + @classmethod + @defer.inlineCallbacks + def destroy_vbd(self, session, vbd_ref): + try: + task = yield session.call_xenapi('Async.VBD.destroy', vbd_ref) + yield session.wait_for_task(task) + except Exception, exc: + logging.warn(exc) + raise Exception('Unable to destroy VBD %s' % vbd_ref) + @classmethod @defer.inlineCallbacks def create_vif(self, session, vm_ref, network_ref, mac_address): diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py new file mode 100644 index 000000000..b982ac124 --- /dev/null +++ b/nova/virt/xenapi/volume_utils.py @@ -0,0 +1,210 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Helper methods for operations related to the management of volumes, +and storage repositories +""" + +import logging +import re +import string + +from twisted.internet import defer + +from nova import utils +from nova import flags + +FLAGS = flags.FLAGS + +#FIXME: replace with proper target discovery +flags.DEFINE_string('target_host', None, 'iSCSI Target Host') +flags.DEFINE_string('target_port', '3260', 'iSCSI Target Port, 3260 Default') +flags.DEFINE_string('iqn_prefix', 'iqn.2010-10.org.openstack', 'IQN Prefix') + + +class VolumeHelper(): + def __init__(self, session): + return + + @classmethod + @utils.deferredToThread + def create_iscsi_storage(self, session, target, port, target_iqn, + username, password, label, description): + + return VolumeHelper.create_iscsi_storage_blocking(session, target, + port, + target_iqn, + username, + password, + label, + description) + + @classmethod + def create_iscsi_storage_blocking(self, session, target, port, target_iqn, + username, password, label, description): + + sr_ref = session.get_xenapi().SR.get_by_name_label(label) + if len(sr_ref) == 0: + logging.debug('Introducing %s...' % label) + try: + sr_ref = session.get_xenapi().SR.create( + session.get_xenapi_host(), + {'target': target, + 'port': port, + 'targetIQN': target_iqn + # TODO: when/if chap authentication is used + #'chapuser': username, + #'chappassword': password + }, + '0', label, description, 'iscsi', '', False, {}) + logging.debug('Introduced %s as %s.' % (label, sr_ref)) + return sr_ref + except Exception, exc: + logging.warn(exc) + raise Exception('Unable to create Storage Repository') + else: + return sr_ref[0] + + @classmethod + @defer.inlineCallbacks + def find_sr_from_vbd(self, session, vbd_ref): + vdi_ref = yield session.get_xenapi().VBD.get_VDI(vbd_ref) + sr_ref = yield session.get_xenapi().VDI.get_SR(vdi_ref) + defer.returnValue(sr_ref) + + @classmethod + @utils.deferredToThread + def destroy_iscsi_storage(self, session, sr_ref): + VolumeHelper.destroy_iscsi_storage_blocking(session, sr_ref) + + @classmethod + def destroy_iscsi_storage_blocking(self, session, sr_ref): + logging.debug("Forgetting SR %s ... ", sr_ref) + pbds = [] + try: + pbds = session.get_xenapi().SR.get_PBDs(sr_ref) + except Exception, exc: + logging.warn('Ignoring exception %s when getting PBDs for %s', + exc, sr_ref) + for pbd in pbds: + try: + session.get_xenapi().PBD.unplug(pbd) + except Exception, exc: + logging.warn('Ignoring exception %s when unplugging PBD %s', + exc, pbd) + try: + session.get_xenapi().SR.forget(sr_ref) + logging.debug("Forgetting SR %s done.", sr_ref) + except Exception, exc: + logging.warn('Ignoring exception %s when forgetting SR %s', + exc, sr_ref) + + @classmethod + @utils.deferredToThread + def introduce_vdi(self, session, sr_ref): + return VolumeHelper.introduce_vdi_blocking(session, sr_ref) + + @classmethod + def introduce_vdi_blocking(self, session, sr_ref): + try: + vdis = session.get_xenapi().SR.get_VDIs(sr_ref) + except Exception, exc: + raise Exception('Unable to introduce VDI on SR %s' % sr_ref) + try: + vdi_rec = session.get_xenapi().VDI.get_record(vdis[0]) + except Exception, exc: + raise Exception('Unable to get record of VDI %s on' % vdis[0]) + else: + return session.get_xenapi().VDI.introduce( + vdi_rec['uuid'], + vdi_rec['name_label'], + vdi_rec['name_description'], + vdi_rec['SR'], + vdi_rec['type'], + vdi_rec['sharable'], + vdi_rec['read_only'], + vdi_rec['other_config'], + vdi_rec['location'], + vdi_rec['xenstore_data'], + vdi_rec['sm_config']) + + @classmethod + def parse_volume_info(self, device_path, mountpoint): + # Because XCP/XS want a device number instead of a mountpoint + device_number = VolumeHelper.mountpoint_to_number(mountpoint) + volume_id = _get_volume_id(device_path) + target_host = _get_target_host(device_path) + target_port = _get_target_port(device_path) + target_iqn = _get_iqn(device_path) + + if (device_number < 0) or \ + (volume_id is None) or \ + (target_host is None) or \ + (target_iqn is None): + raise Exception('Unable to obtain target information %s, %s' % + (device_path, mountpoint)) + + volume_info = {} + volume_info['deviceNumber'] = device_number + volume_info['volumeId'] = volume_id + volume_info['targetHost'] = target_host + volume_info['targetPort'] = target_port + volume_info['targeIQN'] = target_iqn + return volume_info + + @classmethod + def mountpoint_to_number(self, mountpoint): + if mountpoint.startswith('/dev/'): + mountpoint = mountpoint[5:] + if re.match('^[hs]d[a-p]$', mountpoint): + return (ord(mountpoint[2:3]) - ord('a')) + elif re.match('^vd[a-p]$', mountpoint): + return (ord(mountpoint[2:3]) - ord('a')) + elif re.match('^[0-9]+$', mountpoint): + return string.atoi(mountpoint, 10) + else: + logging.warn('Mountpoint cannot be translated: %s', mountpoint) + return -1 + + +def _get_volume_id(n): + # FIXME: n must contain at least the volume_id + # /vol- is for remote volumes + # -vol- is for local volumes + # see compute/manager->setup_compute_volume + volume_id = n[n.find('/vol-') + 1:] + if volume_id == n: + volume_id = n[n.find('-vol-') + 1:].replace('--', '-') + return volume_id + + +def _get_target_host(n): + # FIXME: if n is none fall back on flags + if n is None or FLAGS.target_host: + return FLAGS.target_host + + +def _get_target_port(n): + # FIXME: if n is none fall back on flags + return FLAGS.target_port + + +def _get_iqn(n): + # FIXME: n must contain at least the volume_id + volume_id = _get_volume_id(n) + if n is None or FLAGS.iqn_prefix: + return '%s:%s' % (FLAGS.iqn_prefix, volume_id) diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index 5aefa0611..d5c309240 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -17,6 +17,12 @@ """ Management class for Storage-related functions (attach, detach, etc). """ +import logging + +from twisted.internet import defer + +from volume_utils import VolumeHelper +from vm_utils import VMHelper class VolumeOps(object): @@ -25,58 +31,89 @@ class VolumeOps(object): @defer.inlineCallbacks def attach_volume(self, instance_name, device_path, mountpoint): + # Before we start, check that the VM exists + vm_ref = yield VMHelper.lookup(self._session, instance_name) + if vm_ref is None: + raise Exception('Instance %s does not exist' % instance_name) # NOTE: No Resource Pool concept so far logging.debug("Attach_volume: %s, %s, %s", instance_name, device_path, mountpoint) - volume_info = _parse_volume_info(device_path, mountpoint) + vol_rec = VolumeHelper.parse_volume_info(device_path, mountpoint) # Create the iSCSI SR, and the PDB through which hosts access SRs. # But first, retrieve target info, like Host, IQN, LUN and SCSIID - target = yield self._get_target(volume_info) - label = 'SR-%s' % volume_info['volumeId'] - description = 'Attached-to:%s' % instance_name - # Create SR and check the physical space available for the VDI allocation - sr_ref = yield self._create_sr(target, label, description) - disk_size = int(target['size']) - #disk_size = yield self._get_sr_available_space(sr_ref) - # Create VDI and attach VBD to VM - vm_ref = yield self._lookup(instance_name) - logging.debug("Mounting disk of: %s GB", (disk_size / (1024*1024*1024.0))) + label = 'SR-%s' % vol_rec['volumeId'] + description = 'Disk-for:%s' % instance_name + # Create SR + sr_ref = yield VolumeHelper.create_iscsi_storage(self._session, + vol_rec['targetHost'], + vol_rec['targetPort'], + vol_rec['targeIQN'], + '', # no CHAP auth + '', + label, + description) + # Introduce VDI and attach VBD to VM try: - vdi_ref = yield self._create_vdi(sr_ref, disk_size, - 'user', volume_info['volumeId'], '', - False, False) + vdi_ref = yield VolumeHelper.introduce_vdi(self._session, sr_ref) except Exception, exc: logging.warn(exc) - yield self._destroy_sr(sr_ref) + yield VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) raise Exception('Unable to create VDI on SR %s for instance %s' % (sr_ref, instance_name)) else: try: - userdevice = 2 # FIXME: this depends on the numbers of attached disks - vbd_ref = yield self._create_vbd(vm_ref, vdi_ref, userdevice, False, True, False) + vbd_ref = yield VMHelper.create_vbd(self._session, + vm_ref, vdi_ref, + vol_rec['deviceNumber'], + False) except Exception, exc: logging.warn(exc) - yield self._destroy_sr(sr_ref) + yield VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) raise Exception('Unable to create VBD on SR %s for instance %s' % (sr_ref, instance_name)) else: try: - raise Exception('') - task = yield self._call_xenapi('Async.VBD.plug', vbd_ref) - yield self._wait_for_task(task) + #raise Exception('') + task = yield self._session.call_xenapi('Async.VBD.plug', + vbd_ref) + yield self._session.wait_for_task(task) except Exception, exc: logging.warn(exc) - yield self._destroy_sr(sr_ref) - raise Exception('Unable to attach volume to instance %s' % instance_name) - + yield VolumeHelper.destroy_iscsi_storage(self._session, + sr_ref) + raise Exception('Unable to attach volume to instance %s' % + instance_name) yield True @defer.inlineCallbacks def detach_volume(self, instance_name, mountpoint): - logging.debug("Detach_volume: %s, %s, %s", instance_name, mountpoint) + # Before we start, check that the VM exists + vm_ref = yield VMHelper.lookup(self._session, instance_name) + if vm_ref is None: + raise Exception('Instance %s does not exist' % instance_name) # Detach VBD from VM - # Forget SR/PDB info associated with host - # TODO: can we avoid destroying the SR every time we detach? - yield True \ No newline at end of file + logging.debug("Detach_volume: %s, %s", instance_name, mountpoint) + device_number = VolumeHelper.mountpoint_to_number(mountpoint) + try: + vbd_ref = yield VMHelper.find_vbd_by_number(self._session, + vm_ref, device_number) + except Exception, exc: + logging.warn(exc) + raise Exception('Unable to locate volume %s' % mountpoint) + else: + try: + sr_ref = yield VolumeHelper.find_sr_from_vbd(self._session, + vbd_ref) + yield VMHelper.unplug_vbd(self._session, vbd_ref) + except Exception, exc: + logging.warn(exc) + raise Exception('Unable to detach volume %s' % mountpoint) + try: + yield VMHelper.destroy_vbd(self._session, vbd_ref) + except Exception, exc: + logging.warn(exc) + # Forget SR + yield VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) + yield True -- cgit From f26489ef1ad2a7df0e9f72a8c9ad4f2e3a65ae57 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Wed, 1 Dec 2010 12:02:02 +0000 Subject: minor refactoring --- nova/virt/xenapi/novadeps.py | 82 ++++++++++++++++++++++++++++++++++++++++ nova/virt/xenapi/volume_utils.py | 77 ------------------------------------- nova/virt/xenapi/volumeops.py | 6 ++- 3 files changed, 86 insertions(+), 79 deletions(-) diff --git a/nova/virt/xenapi/novadeps.py b/nova/virt/xenapi/novadeps.py index ba62468fb..db51982a6 100644 --- a/nova/virt/xenapi/novadeps.py +++ b/nova/virt/xenapi/novadeps.py @@ -14,6 +14,9 @@ # License for the specific language governing permissions and limitations # under the License. +import re +import string + from nova import db from nova import flags from nova import process @@ -32,6 +35,15 @@ XENAPI_POWER_STATE = { 'Suspended': power_state.SHUTDOWN, # FIXME 'Crashed': power_state.CRASHED} +from nova import flags + +FLAGS = flags.FLAGS + +#FIXME: replace with proper target discovery +flags.DEFINE_string('target_host', None, 'iSCSI Target Host') +flags.DEFINE_string('target_port', '3260', 'iSCSI Target Port, 3260 Default') +flags.DEFINE_string('iqn_prefix', 'iqn.2010-10.org.openstack', 'IQN Prefix') + class Instance(object): @@ -101,3 +113,73 @@ class User(object): @classmethod def get_secret(self, user): return user.secret + + +class Volume(object): + + @classmethod + def parse_volume_info(self, device_path, mountpoint): + # Because XCP/XS want a device number instead of a mountpoint + device_number = Volume.mountpoint_to_number(mountpoint) + volume_id = Volume.get_volume_id(device_path) + target_host = Volume.get_target_host(device_path) + target_port = Volume.get_target_port(device_path) + target_iqn = Volume.get_iqn(device_path) + + if (device_number < 0) or \ + (volume_id is None) or \ + (target_host is None) or \ + (target_iqn is None): + raise Exception('Unable to obtain target information %s, %s' % + (device_path, mountpoint)) + + volume_info = {} + volume_info['deviceNumber'] = device_number + volume_info['volumeId'] = volume_id + volume_info['targetHost'] = target_host + volume_info['targetPort'] = target_port + volume_info['targeIQN'] = target_iqn + return volume_info + + @classmethod + def mountpoint_to_number(self, mountpoint): + if mountpoint.startswith('/dev/'): + mountpoint = mountpoint[5:] + if re.match('^[hs]d[a-p]$', mountpoint): + return (ord(mountpoint[2:3]) - ord('a')) + elif re.match('^vd[a-p]$', mountpoint): + return (ord(mountpoint[2:3]) - ord('a')) + elif re.match('^[0-9]+$', mountpoint): + return string.atoi(mountpoint, 10) + else: + logging.warn('Mountpoint cannot be translated: %s', mountpoint) + return -1 + + @classmethod + def get_volume_id(self, n): + # FIXME: n must contain at least the volume_id + # /vol- is for remote volumes + # -vol- is for local volumes + # see compute/manager->setup_compute_volume + volume_id = n[n.find('/vol-') + 1:] + if volume_id == n: + volume_id = n[n.find('-vol-') + 1:].replace('--', '-') + return volume_id + + @classmethod + def get_target_host(self, n): + # FIXME: if n is none fall back on flags + if n is None or FLAGS.target_host: + return FLAGS.target_host + + @classmethod + def get_target_port(self, n): + # FIXME: if n is none fall back on flags + return FLAGS.target_port + + @classmethod + def get_iqn(self, n): + # FIXME: n must contain at least the volume_id + volume_id = Volume.get_volume_id(n) + if n is None or FLAGS.iqn_prefix: + return '%s:%s' % (FLAGS.iqn_prefix, volume_id) diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index b982ac124..3b3e8894c 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -20,20 +20,10 @@ and storage repositories """ import logging -import re -import string from twisted.internet import defer from nova import utils -from nova import flags - -FLAGS = flags.FLAGS - -#FIXME: replace with proper target discovery -flags.DEFINE_string('target_host', None, 'iSCSI Target Host') -flags.DEFINE_string('target_port', '3260', 'iSCSI Target Port, 3260 Default') -flags.DEFINE_string('iqn_prefix', 'iqn.2010-10.org.openstack', 'IQN Prefix') class VolumeHelper(): @@ -141,70 +131,3 @@ class VolumeHelper(): vdi_rec['location'], vdi_rec['xenstore_data'], vdi_rec['sm_config']) - - @classmethod - def parse_volume_info(self, device_path, mountpoint): - # Because XCP/XS want a device number instead of a mountpoint - device_number = VolumeHelper.mountpoint_to_number(mountpoint) - volume_id = _get_volume_id(device_path) - target_host = _get_target_host(device_path) - target_port = _get_target_port(device_path) - target_iqn = _get_iqn(device_path) - - if (device_number < 0) or \ - (volume_id is None) or \ - (target_host is None) or \ - (target_iqn is None): - raise Exception('Unable to obtain target information %s, %s' % - (device_path, mountpoint)) - - volume_info = {} - volume_info['deviceNumber'] = device_number - volume_info['volumeId'] = volume_id - volume_info['targetHost'] = target_host - volume_info['targetPort'] = target_port - volume_info['targeIQN'] = target_iqn - return volume_info - - @classmethod - def mountpoint_to_number(self, mountpoint): - if mountpoint.startswith('/dev/'): - mountpoint = mountpoint[5:] - if re.match('^[hs]d[a-p]$', mountpoint): - return (ord(mountpoint[2:3]) - ord('a')) - elif re.match('^vd[a-p]$', mountpoint): - return (ord(mountpoint[2:3]) - ord('a')) - elif re.match('^[0-9]+$', mountpoint): - return string.atoi(mountpoint, 10) - else: - logging.warn('Mountpoint cannot be translated: %s', mountpoint) - return -1 - - -def _get_volume_id(n): - # FIXME: n must contain at least the volume_id - # /vol- is for remote volumes - # -vol- is for local volumes - # see compute/manager->setup_compute_volume - volume_id = n[n.find('/vol-') + 1:] - if volume_id == n: - volume_id = n[n.find('-vol-') + 1:].replace('--', '-') - return volume_id - - -def _get_target_host(n): - # FIXME: if n is none fall back on flags - if n is None or FLAGS.target_host: - return FLAGS.target_host - - -def _get_target_port(n): - # FIXME: if n is none fall back on flags - return FLAGS.target_port - - -def _get_iqn(n): - # FIXME: n must contain at least the volume_id - volume_id = _get_volume_id(n) - if n is None or FLAGS.iqn_prefix: - return '%s:%s' % (FLAGS.iqn_prefix, volume_id) diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index d5c309240..ec4343329 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -24,6 +24,8 @@ from twisted.internet import defer from volume_utils import VolumeHelper from vm_utils import VMHelper +from novadeps import Volume + class VolumeOps(object): def __init__(self, session): @@ -38,9 +40,9 @@ class VolumeOps(object): # NOTE: No Resource Pool concept so far logging.debug("Attach_volume: %s, %s, %s", instance_name, device_path, mountpoint) - vol_rec = VolumeHelper.parse_volume_info(device_path, mountpoint) # Create the iSCSI SR, and the PDB through which hosts access SRs. # But first, retrieve target info, like Host, IQN, LUN and SCSIID + vol_rec = Volume.parse_volume_info(device_path, mountpoint) label = 'SR-%s' % vol_rec['volumeId'] description = 'Disk-for:%s' % instance_name # Create SR @@ -95,7 +97,7 @@ class VolumeOps(object): raise Exception('Instance %s does not exist' % instance_name) # Detach VBD from VM logging.debug("Detach_volume: %s, %s", instance_name, mountpoint) - device_number = VolumeHelper.mountpoint_to_number(mountpoint) + device_number = Volume.mountpoint_to_number(mountpoint) try: vbd_ref = yield VMHelper.find_vbd_by_number(self._session, vm_ref, device_number) -- cgit From e4cfd7f3fe7d3c50d65c61abf21bf998fde85147 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 2 Dec 2010 14:09:23 +0000 Subject: minor refactoring after merge --- nova/virt/xenapi/novadeps.py | 28 ++++++++++++++++++++-------- nova/virt/xenapi_conn.py | 5 ++++- 2 files changed, 24 insertions(+), 9 deletions(-) diff --git a/nova/virt/xenapi/novadeps.py b/nova/virt/xenapi/novadeps.py index b4802764e..aa3535162 100644 --- a/nova/virt/xenapi/novadeps.py +++ b/nova/virt/xenapi/novadeps.py @@ -36,8 +36,6 @@ XENAPI_POWER_STATE = { 'Suspended': power_state.SHUTDOWN, # FIXME 'Crashed': power_state.CRASHED} -FLAGS = flags.FLAGS - flags.DEFINE_string('xenapi_connection_url', None, 'URL for connection to XenServer/Xen Cloud Platform.' @@ -81,6 +79,21 @@ class Configuration(object): def xenapi_task_poll_interval(self): return self._flags.xenapi_task_poll_interval + @property + def target_host(self): + return self._flags.target_host + + @property + def target_port(self): + return self._flags.target_port + + @property + def iqn_prefix(self): + return self._flags.iqn_prefix + + +config = Configuration() + class Instance(object): @@ -206,18 +219,17 @@ class Volume(object): @classmethod def get_target_host(self, n): # FIXME: if n is none fall back on flags - if n is None or FLAGS.target_host: - return FLAGS.target_host + if n is None or config.target_host: + return config.target_host @classmethod def get_target_port(self, n): # FIXME: if n is none fall back on flags - return FLAGS.target_port + return config.target_port @classmethod def get_iqn(self, n): # FIXME: n must contain at least the volume_id volume_id = Volume.get_volume_id(n) - if n is None or FLAGS.iqn_prefix: - return '%s:%s' % (FLAGS.iqn_prefix, volume_id) - + if n is None or config.iqn_prefix: + return '%s:%s' % (config.iqn_prefix, volume_id) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 948fade7e..d3f66b12c 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -44,7 +44,10 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block. :xenapi_task_poll_interval: The interval (seconds) used for polling of remote tasks (Async.VM.start, etc) (default: 0.5). - +:target_host: the iSCSI Target Host IP address, i.e. the IP + address for the nova-volume host +:target_port: iSCSI Target Port, 3260 Default +:iqn_prefix: IQN Prefix, e.g. 'iqn.2010-10.org.openstack' """ import logging -- cgit From da010f311c07ee31d7d00ceb48d0f8656f1825ca Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Fri, 3 Dec 2010 00:01:21 +0000 Subject: * Removes unused schema * Removes MUST uid from novaUser * Changes isAdmin to isNovaAdmin * Adds two new configuration options: ** ldap_user_id_attribute, with a default of uid ** ldap_user_name_attribute, with a default of cn * ldapdriver.py has been modified to use these changes Rationale: Removing uid from novaUser: Requiring uid makes the schema very posix specific. Other schemas don't use uid for identifiers at all. This change makes the schema more interoperable. Changing isAdmin to isNovaAdmin: This attribute is too generic. It doesn't describe what the user is an admin of, and in a pre-existing directory is out of place. This change is to make the attribute more specific to the software. Adding config options for id and name: This is another interoperability change. This change makes the driver more compatible with directories like AD, where sAMAccountName is used instead of uid. Also, some directory admins prefer to use displayName rather than CN for full names of users. --- nova/auth/ldapdriver.py | 21 ++++++++++++--------- nova/auth/nova_openldap.schema | 26 +++----------------------- nova/auth/nova_sun.schema | 6 ++---- nova/auth/openssh-lpk_openldap.schema | 19 ------------------- nova/auth/openssh-lpk_sun.schema | 10 ---------- 5 files changed, 17 insertions(+), 65 deletions(-) delete mode 100644 nova/auth/openssh-lpk_openldap.schema delete mode 100644 nova/auth/openssh-lpk_sun.schema diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index ceade1d65..e4c36c28d 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -37,6 +37,8 @@ flags.DEFINE_string('ldap_url', 'ldap://localhost', flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password') flags.DEFINE_string('ldap_user_dn', 'cn=Manager,dc=example,dc=com', 'DN of admin user') +flags.DEFINE_string('ldap_user_id_attribute', 'uid', 'Attribute to use as id') +flags.DEFINE_string('ldap_user_name_attribute', 'cn', 'Attribute to use as name') flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users') flags.DEFINE_string('ldap_user_subtree', 'ou=Users,dc=example,dc=com', 'OU for Users') @@ -131,12 +133,12 @@ class LdapDriver(object): 'inetOrgPerson', 'novaUser']), ('ou', [FLAGS.ldap_user_unit]), - ('uid', [name]), + (FLAGS.ldap_user_id_attribute, [name]), ('sn', [name]), - ('cn', [name]), + (FLAGS.ldap_user_name_attribute, [name]), ('secretKey', [secret_key]), ('accessKey', [access_key]), - ('isAdmin', [str(is_admin).upper()]), + ('isNovaAdmin', [str(is_admin).upper()]), ] self.conn.add_s(self.__uid_to_dn(name), attr) return self.__to_user(dict(attr)) @@ -274,7 +276,7 @@ class LdapDriver(object): if secret_key: attr.append((self.ldap.MOD_REPLACE, 'secretKey', secret_key)) if admin is not None: - attr.append((self.ldap.MOD_REPLACE, 'isAdmin', str(admin).upper())) + attr.append((self.ldap.MOD_REPLACE, 'isNovaAdmin', str(admin).upper())) self.conn.modify_s(self.__uid_to_dn(uid), attr) def __user_exists(self, uid): @@ -450,11 +452,11 @@ class LdapDriver(object): if attr == None: return None return { - 'id': attr['uid'][0], - 'name': attr['cn'][0], + 'id': attr[FLAGS.ldap_user_id_attribute][0], + 'name': attr[FLAGS.ldap_user_name_attribute][0], 'access': attr['accessKey'][0], 'secret': attr['secretKey'][0], - 'admin': (attr['isAdmin'][0] == 'TRUE')} + 'admin': (attr['isNovaAdmin'][0] == 'TRUE')} def __to_project(self, attr): """Convert ldap attributes to Project object""" @@ -474,9 +476,10 @@ class LdapDriver(object): return dn.split(',')[0].split('=')[1] @staticmethod - def __uid_to_dn(dn): + def __uid_to_dn(uid): """Convert uid to dn""" - return 'uid=%s,%s' % (dn, FLAGS.ldap_user_subtree) + return FLAGS.ldap_user_id_attribute + '=%s,%s' \ + % (uid, FLAGS.ldap_user_subtree) class FakeLdapDriver(LdapDriver): diff --git a/nova/auth/nova_openldap.schema b/nova/auth/nova_openldap.schema index 4047361de..9e528f58b 100644 --- a/nova/auth/nova_openldap.schema +++ b/nova/auth/nova_openldap.schema @@ -30,20 +30,10 @@ attributetype ( SINGLE-VALUE ) -attributetype ( - novaAttrs:3 - NAME 'keyFingerprint' - DESC 'Fingerprint of private key' - EQUALITY caseIgnoreMatch - SUBSTR caseIgnoreSubstringsMatch - SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 - SINGLE-VALUE - ) - attributetype ( novaAttrs:4 - NAME 'isAdmin' - DESC 'Is user an administrator?' + NAME 'isNovaAdmin' + DESC 'Is user an nova administrator?' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE @@ -61,17 +51,7 @@ objectClass ( NAME 'novaUser' DESC 'access and secret keys' AUXILIARY - MUST ( uid ) - MAY ( accessKey $ secretKey $ isAdmin ) - ) - -objectClass ( - novaOCs:2 - NAME 'novaKeyPair' - DESC 'Key pair for User' - SUP top - STRUCTURAL - MUST ( cn $ sshPublicKey $ keyFingerprint ) + MAY ( accessKey $ secretKey $ isNovaAdmin ) ) objectClass ( diff --git a/nova/auth/nova_sun.schema b/nova/auth/nova_sun.schema index e925e05e4..decf10f06 100644 --- a/nova/auth/nova_sun.schema +++ b/nova/auth/nova_sun.schema @@ -8,9 +8,7 @@ dn: cn=schema attributeTypes: ( 1.3.6.1.3.1.666.666.3.1 NAME 'accessKey' DESC 'Key for accessing data' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributeTypes: ( 1.3.6.1.3.1.666.666.3.2 NAME 'secretKey' DESC 'Secret key' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) -attributeTypes: ( 1.3.6.1.3.1.666.666.3.3 NAME 'keyFingerprint' DESC 'Fingerprint of private key' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE) -attributeTypes: ( 1.3.6.1.3.1.666.666.3.4 NAME 'isAdmin' DESC 'Is user an administrator?' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE ) +attributeTypes: ( 1.3.6.1.3.1.666.666.3.4 NAME 'isNovaAdmin' DESC 'Is user a nova administrator?' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE ) attributeTypes: ( 1.3.6.1.3.1.666.666.3.5 NAME 'projectManager' DESC 'Project Managers of a project' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 ) -objectClasses: ( 1.3.6.1.3.1.666.666.4.1 NAME 'novaUser' DESC 'access and secret keys' SUP top AUXILIARY MUST ( uid ) MAY ( accessKey $ secretKey $ isAdmin ) ) -objectClasses: ( 1.3.6.1.3.1.666.666.4.2 NAME 'novaKeyPair' DESC 'Key pair for User' SUP top STRUCTURAL MUST ( cn $ sshPublicKey $ keyFingerprint ) ) +objectClasses: ( 1.3.6.1.3.1.666.666.4.1 NAME 'novaUser' DESC 'access and secret keys' SUP top AUXILIARY MAY ( accessKey $ secretKey $ isNovaAdmin ) ) objectClasses: ( 1.3.6.1.3.1.666.666.4.3 NAME 'novaProject' DESC 'Container for project' SUP groupOfNames STRUCTURAL MUST ( cn $ projectManager ) ) diff --git a/nova/auth/openssh-lpk_openldap.schema b/nova/auth/openssh-lpk_openldap.schema deleted file mode 100644 index 93351da6d..000000000 --- a/nova/auth/openssh-lpk_openldap.schema +++ /dev/null @@ -1,19 +0,0 @@ -# -# LDAP Public Key Patch schema for use with openssh-ldappubkey -# Author: Eric AUGE -# -# Based on the proposal of : Mark Ruijter -# - - -# octetString SYNTAX -attributetype ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey' - DESC 'MANDATORY: OpenSSH Public key' - EQUALITY octetStringMatch - SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 ) - -# printableString SYNTAX yes|no -objectclass ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY - DESC 'MANDATORY: OpenSSH LPK objectclass' - MAY ( sshPublicKey $ uid ) - ) diff --git a/nova/auth/openssh-lpk_sun.schema b/nova/auth/openssh-lpk_sun.schema deleted file mode 100644 index 5f52db3b6..000000000 --- a/nova/auth/openssh-lpk_sun.schema +++ /dev/null @@ -1,10 +0,0 @@ -# -# LDAP Public Key Patch schema for use with openssh-ldappubkey -# Author: Eric AUGE -# -# Schema for Sun Directory Server. -# Based on the original schema, modified by Stefan Fischer. -# -dn: cn=schema -attributeTypes: ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey' DESC 'MANDATORY: OpenSSH Public key' EQUALITY octetStringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 ) -objectClasses: ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY DESC 'MANDATORY: OpenSSH LPK objectclass' MAY ( sshPublicKey $ uid ) ) -- cgit From ee71c0accbb540bcb9d08cdcdc8b659f29a0edd6 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 6 Dec 2010 19:06:32 +0000 Subject: added interim solution for target discovery. Now info can either be passed via flags or discovered via iscsiadm. Long term solution is to add a few more fields to the db in the iscsi_target table with the necessary info and modify the iscsi driver to set them --- nova/virt/xenapi/novadeps.py | 89 ++++++++++++++++++++++++++++++++++--------- nova/virt/xenapi/volumeops.py | 2 +- 2 files changed, 71 insertions(+), 20 deletions(-) diff --git a/nova/virt/xenapi/novadeps.py b/nova/virt/xenapi/novadeps.py index 65576019e..66c8233b8 100644 --- a/nova/virt/xenapi/novadeps.py +++ b/nova/virt/xenapi/novadeps.py @@ -27,6 +27,9 @@ import string from nova import db from nova import flags from nova import context +from nova import process + +from twisted.internet import defer from nova.compute import power_state from nova.auth.manager import AuthManager @@ -193,15 +196,28 @@ class User(object): class Volume(object): + """ Wraps up volume specifics """ @classmethod - def parse_volume_info(self, device_path, mountpoint): - # Because XCP/XS want a device number instead of a mountpoint + @defer.inlineCallbacks + def parse_volume_info(cls, device_path, mountpoint): + """ + Parse device_path and mountpoint as they can be used by XenAPI. + In particular, the mountpoint (e.g. /dev/sdc) must be translated + into a numeric literal. + FIXME: As for device_path, currently cannot be used as it is, + because it does not contain target information. As for interim + solution, target details are passed either via Flags or obtained + by iscsiadm. Long-term solution is to add a few more fields to the + db in the iscsi_target table with the necessary info and modify + the iscsi driver to set them. + """ device_number = Volume.mountpoint_to_number(mountpoint) volume_id = Volume.get_volume_id(device_path) - target_host = Volume.get_target_host(device_path) - target_port = Volume.get_target_port(device_path) - target_iqn = Volume.get_iqn(device_path) + (iscsi_name, iscsi_portal) = yield Volume.get_target(volume_id) + target_host = Volume.get_target_host(iscsi_portal) + target_port = Volume.get_target_port(iscsi_portal) + target_iqn = Volume.get_iqn(iscsi_name, volume_id) if (device_number < 0) or \ (volume_id is None) or \ @@ -216,10 +232,11 @@ class Volume(object): volume_info['targetHost'] = target_host volume_info['targetPort'] = target_port volume_info['targeIQN'] = target_iqn - return volume_info + defer.returnValue(volume_info) @classmethod - def mountpoint_to_number(self, mountpoint): + def mountpoint_to_number(cls, mountpoint): + """ Translate a mountpoint like /dev/sdc into a numberic """ if mountpoint.startswith('/dev/'): mountpoint = mountpoint[5:] if re.match('^[hs]d[a-p]$', mountpoint): @@ -233,8 +250,9 @@ class Volume(object): return -1 @classmethod - def get_volume_id(self, n): - # FIXME: n must contain at least the volume_id + def get_volume_id(cls, n): + """ Retrieve the volume id from device_path """ + # n must contain at least the volume_id # /vol- is for remote volumes # -vol- is for local volumes # see compute/manager->setup_compute_volume @@ -244,19 +262,52 @@ class Volume(object): return volume_id @classmethod - def get_target_host(self, n): - # FIXME: if n is none fall back on flags - if n is None or config.target_host: + def get_target_host(cls, n): + """ Retrieve target host """ + if n: + return n[0:n.find(':')] + elif n is None or config.target_host: return config.target_host @classmethod - def get_target_port(self, n): - # FIXME: if n is none fall back on flags - return config.target_port + def get_target_port(cls, n): + """ Retrieve target port """ + if n: + return n[n.find(':') + 1:] + elif n is None or config.target_port: + return config.target_port @classmethod - def get_iqn(self, n): - # FIXME: n must contain at least the volume_id - volume_id = Volume.get_volume_id(n) - if n is None or config.iqn_prefix: + def get_iqn(cls, n, id): + """ Retrieve target IQN """ + if n: + return n + elif n is None or config.iqn_prefix: + volume_id = Volume.get_volume_id(id) return '%s:%s' % (config.iqn_prefix, volume_id) + + @classmethod + @defer.inlineCallbacks + def get_target(self, volume_id): + """ + Gets iscsi name and portal from volume name and host. + For this method to work the following are needed: + 1) volume_ref['host'] to resolve the public IP address + 2) ietd to listen only to the public network interface + If any of the two are missing, fall back on Flags + """ + volume_ref = db.volume_get_by_ec2_id(context.get_admin_context(), + volume_id) + + (r, _e) = yield process.simple_execute("sudo iscsiadm -m discovery -t " + "sendtargets -p %s" % + volume_ref['host']) + if len(_e) == 0: + for target in r.splitlines(): + if volume_id in target: + (location, _sep, iscsi_name) = target.partition(" ") + break + iscsi_portal = location.split(",")[0] + defer.returnValue((iscsi_name, iscsi_portal)) + else: + defer.returnValue((None, None)) diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index 6c48f6491..a052aaf95 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -47,7 +47,7 @@ class VolumeOps(object): instance_name, device_path, mountpoint) # Create the iSCSI SR, and the PDB through which hosts access SRs. # But first, retrieve target info, like Host, IQN, LUN and SCSIID - vol_rec = Volume.parse_volume_info(device_path, mountpoint) + vol_rec = yield Volume.parse_volume_info(device_path, mountpoint) label = 'SR-%s' % vol_rec['volumeId'] description = 'Disk-for:%s' % instance_name # Create SR -- cgit From 06c52051005f5e43a1f543e2d1c5922aa91c7918 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 6 Dec 2010 19:29:00 +0000 Subject: minor changes to docstrings --- nova/virt/xenapi/novadeps.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/nova/virt/xenapi/novadeps.py b/nova/virt/xenapi/novadeps.py index 66c8233b8..3680a5dd2 100644 --- a/nova/virt/xenapi/novadeps.py +++ b/nova/virt/xenapi/novadeps.py @@ -60,10 +60,15 @@ flags.DEFINE_float('xenapi_task_poll_interval', 'The interval used for polling of remote tasks ' '(Async.VM.start, etc). Used only if ' 'connection_type=xenapi.') -#FIXME: replace with proper target discovery -flags.DEFINE_string('target_host', None, 'iSCSI Target Host') -flags.DEFINE_string('target_port', '3260', 'iSCSI Target Port, 3260 Default') -flags.DEFINE_string('iqn_prefix', 'iqn.2010-10.org.openstack', 'IQN Prefix') +flags.DEFINE_string('target_host', + None, + 'iSCSI Target Host') +flags.DEFINE_string('target_port', + '3260', + 'iSCSI Target Port, 3260 Default') +flags.DEFINE_string('iqn_prefix', + 'iqn.2010-10.org.openstack', + 'IQN Prefix') class Configuration(object): @@ -292,9 +297,9 @@ class Volume(object): """ Gets iscsi name and portal from volume name and host. For this method to work the following are needed: - 1) volume_ref['host'] to resolve the public IP address - 2) ietd to listen only to the public network interface - If any of the two are missing, fall back on Flags + 1) volume_ref['host'] must resolve to something rather than loopback + 2) ietd must bind only to the address as resolved above + If any of the two conditions are not met, fall back on Flags. """ volume_ref = db.volume_get_by_ec2_id(context.get_admin_context(), volume_id) -- cgit From e9597d1370211de15ca96f1fa52fcbe3c9166a7e Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Tue, 7 Dec 2010 14:15:22 +0000 Subject: fixed pylint violations that slipped out from a previous check --- nova/virt/xenapi/vm_utils.py | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 762cbae83..2ee6737ab 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -29,6 +29,7 @@ from nova.auth.manager import AuthManager from nova.compute import instance_types from nova.virt import images from nova.compute import power_state +from nova.virt.xenapi.volume_utils import StorageError XENAPI_POWER_STATE = { 'Halted': power_state.SHUTDOWN, @@ -115,11 +116,13 @@ class VMHelper(): @classmethod @utils.deferredToThread - def find_vbd_by_number(self, session, vm_ref, number): + def find_vbd_by_number(cls, session, vm_ref, number): + """ Get the VBD reference from the device number """ return VMHelper.find_vbd_by_number_blocking(session, vm_ref, number) @classmethod - def find_vbd_by_number_blocking(self, session, vm_ref, number): + def find_vbd_by_number_blocking(cls, session, vm_ref, number): + """ Synchronous find_vbd_by_number """ vbds = session.get_xenapi().VM.get_VBDs(vm_ref) if vbds: for vbd in vbds: @@ -127,29 +130,31 @@ class VMHelper(): vbd_rec = session.get_xenapi().VBD.get_record(vbd) if vbd_rec['userdevice'] == str(number): return vbd - except Exception, exc: + except XenAPI.Failure, exc: logging.warn(exc) raise Exception('VBD not found in instance %s' % vm_ref) @classmethod @defer.inlineCallbacks - def unplug_vbd(self, session, vbd_ref): + def unplug_vbd(cls, session, vbd_ref): + """ Unplug VBD from VM """ try: vbd_ref = yield session.call_xenapi('VBD.unplug', vbd_ref) - except Exception, exc: + except XenAPI.Failure, exc: logging.warn(exc) if exc.details[0] != 'DEVICE_ALREADY_DETACHED': - raise Exception('Unable to unplug VBD %s' % vbd_ref) + raise StorageError('Unable to unplug VBD %s' % vbd_ref) @classmethod @defer.inlineCallbacks - def destroy_vbd(self, session, vbd_ref): + def destroy_vbd(cls, session, vbd_ref): + """ Destroy VBD from host database """ try: task = yield session.call_xenapi('Async.VBD.destroy', vbd_ref) yield session.wait_for_task(task) - except Exception, exc: + except XenAPI.Failure, exc: logging.warn(exc) - raise Exception('Unable to destroy VBD %s' % vbd_ref) + raise StorageError('Unable to destroy VBD %s' % vbd_ref) @classmethod @defer.inlineCallbacks @@ -244,6 +249,7 @@ class VMHelper(): @classmethod def compile_info(cls, record): + """ Fill record with VM status information """ return {'state': XENAPI_POWER_STATE[record['power_state']], 'max_mem': long(record['memory_static_max']) >> 10, 'mem': long(record['memory_dynamic_max']) >> 10, -- cgit From 88777c09ad909c68da8d433800cae862e9bbff4a Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Tue, 7 Dec 2010 14:26:38 +0000 Subject: and yet another pylint fix --- nova/virt/xenapi/vm_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 2ee6737ab..039e72981 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -132,7 +132,7 @@ class VMHelper(): return vbd except XenAPI.Failure, exc: logging.warn(exc) - raise Exception('VBD not found in instance %s' % vm_ref) + raise StorageError('VBD not found in instance %s' % vm_ref) @classmethod @defer.inlineCallbacks -- cgit From c0fc8a5e9e72ecb780258d9cf41b32973620eb4c Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Tue, 7 Dec 2010 15:35:56 +0000 Subject: small fixes on Exception handling --- nova/virt/xenapi/vm_utils.py | 2 +- nova/virt/xenapi/volume_utils.py | 10 +++++++--- nova/virt/xenapi/volumeops.py | 7 ++++--- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 039e72981..f29803136 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -132,7 +132,7 @@ class VMHelper(): return vbd except XenAPI.Failure, exc: logging.warn(exc) - raise StorageError('VBD not found in instance %s' % vm_ref) + raise StorageError('VBD not found in instance %s' % vm_ref) @classmethod @defer.inlineCallbacks diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 48aff7ef5..debaa6906 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -95,9 +95,13 @@ class VolumeHelper(): @defer.inlineCallbacks def find_sr_from_vbd(cls, session, vbd_ref): """ Find the SR reference from the VBD reference """ - vdi_ref = yield session.get_xenapi().VBD.get_VDI(vbd_ref) - sr_ref = yield session.get_xenapi().VDI.get_SR(vdi_ref) - defer.returnValue(sr_ref) + try: + vdi_ref = yield session.get_xenapi().VBD.get_VDI(vbd_ref) + sr_ref = yield session.get_xenapi().VDI.get_SR(vdi_ref) + defer.returnValue(sr_ref) + except XenAPI.Failure, exc: + logging.warn(exc) + raise StorageError('Unable to find SR from VBD %s' % vbd_ref) @classmethod @utils.deferredToThread diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index 4055688e3..b9f260756 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -18,6 +18,7 @@ Management class for Storage-related functions (attach, detach, etc). """ import logging +import XenAPI from twisted.internet import defer @@ -68,10 +69,10 @@ class VolumeOps(object): vm_ref, vdi_ref, vol_rec['deviceNumber'], False) - except StorageError, exc: + except XenAPI.Failure, exc: logging.warn(exc) yield VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) - raise StorageError('Unable to use SR %s for instance %s' + raise Exception('Unable to use SR %s for instance %s' % (sr_ref, instance_name)) else: @@ -79,7 +80,7 @@ class VolumeOps(object): task = yield self._session.call_xenapi('Async.VBD.plug', vbd_ref) yield self._session.wait_for_task(task) - except StorageError, exc: + except XenAPI.Failure, exc: logging.warn(exc) yield VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) -- cgit From 699ac0785240307ef2396d688e6c0a2acb446665 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Tue, 7 Dec 2010 22:22:48 +0000 Subject: pylint fixes --- nova/virt/xenapi/vm_utils.py | 2 +- nova/virt/xenapi/volume_utils.py | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 35d89d835..0549dc9fb 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -44,7 +44,7 @@ class VMHelper(): """ The class that wraps the helper methods together. """ - def __init__(self, session): + def __init__(self): return @classmethod diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 84eb82f15..051d0fe85 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -1,5 +1,4 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -from orca.scripts import self_voicing # Copyright (c) 2010 Citrix Systems, Inc. # @@ -46,7 +45,7 @@ class VolumeHelper(): """ The class that wraps the helper methods together. """ - def __init__(self, session): + def __init__(self): return @classmethod -- cgit From 03920759ac485e76c9104b4c9a1bf53231e2c47c Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Wed, 8 Dec 2010 10:22:29 +0000 Subject: Removing novaProject from the schema. This change may look odd at first; here's how it works: Both roles are projects are groupOfNames. Previously, we were differentiating projects from project roles by using the novaProject objectclass on the project, and not on the roles. This change removes novaProject, and uses the owner attribute instead of the projectManager attribute. Only projects should have an owner. We can differentiate projects from project roles by checking for the existence of this attribute. To check for the existence of an attribute in LDAP, a wildcard search is used. The fake LDAP driver did not support wildcard searches, so I put in "all or nothing" support for it. The wildcard search support doesn't work exactly like wildcard searches in LDAP, but will work for the case that's required. --- nova/auth/fakeldap.py | 3 +++ nova/auth/ldapdriver.py | 16 ++++++++-------- nova/auth/nova_openldap.schema | 16 ---------------- nova/auth/nova_sun.schema | 2 -- nova/auth/opendj.sh | 2 -- nova/auth/slap.sh | 4 +--- 6 files changed, 12 insertions(+), 31 deletions(-) diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py index 46e0135b4..2dcb69267 100644 --- a/nova/auth/fakeldap.py +++ b/nova/auth/fakeldap.py @@ -119,6 +119,9 @@ def _match(key, value, attrs): """Match a given key and value against an attribute list.""" if key not in attrs: return False + # This is a wild card search. Implemented as all or nothing for now. + if value == "*": + return True if key != "objectclass": return value in attrs[key] # it is an objectclass check, so check subclasses diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 871515663..705e89ee8 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -106,7 +106,7 @@ class LdapDriver(object): """Retrieve project by id""" dn = 'cn=%s,%s' % (pid, FLAGS.ldap_project_subtree) - attr = self.__find_object(dn, '(objectclass=novaProject)') + attr = self.__find_object(dn, '(owner=*)') return self.__to_project(attr) def get_users(self): @@ -122,7 +122,7 @@ class LdapDriver(object): def get_projects(self, uid=None): """Retrieve list of projects""" - pattern = '(objectclass=novaProject)' + pattern = '(owner=*)' if uid: pattern = "(&%s(member=%s))" % (pattern, self.__uid_to_dn(uid)) attrs = self.__find_objects(FLAGS.ldap_project_subtree, @@ -205,10 +205,10 @@ class LdapDriver(object): if not manager_dn in members: members.append(manager_dn) attr = [ - ('objectclass', ['novaProject']), + ('objectclass', ['groupOfNames']), ('cn', [name]), ('description', [description]), - ('projectManager', [manager_dn]), + ('owner', [manager_dn]), ('member', members)] self.conn.add_s('cn=%s,%s' % (name, FLAGS.ldap_project_subtree), attr) return self.__to_project(dict(attr)) @@ -224,7 +224,7 @@ class LdapDriver(object): "manager %s doesn't exist" % manager_uid) manager_dn = self.__uid_to_dn(manager_uid) - attr.append((self.ldap.MOD_REPLACE, 'projectManager', manager_dn)) + attr.append((self.ldap.MOD_REPLACE, 'owner', manager_dn)) if description: attr.append((self.ldap.MOD_REPLACE, 'description', description)) self.conn.modify_s('cn=%s,%s' % (project_id, @@ -286,7 +286,7 @@ class LdapDriver(object): project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) roles = self.__find_objects(project_dn, '(&(&(objectclass=groupOfNames)' - '(!(objectclass=novaProject)))' + '(!(owner=*)))' '(member=%s))' % self.__uid_to_dn(uid)) return [role['cn'][0] for role in roles] @@ -385,7 +385,7 @@ class LdapDriver(object): def __find_role_dns(self, tree): """Find dns of role objects in given tree""" return self.__find_dns(tree, - '(&(objectclass=groupOfNames)(!(objectclass=novaProject)))') + '(&(objectclass=groupOfNames)(!(owner=*)))') def __find_group_dns_with_member(self, tree, uid): """Find dns of group objects in a given tree that contain member""" @@ -534,7 +534,7 @@ class LdapDriver(object): return { 'id': attr['cn'][0], 'name': attr['cn'][0], - 'project_manager_id': self.__dn_to_uid(attr['projectManager'][0]), + 'project_manager_id': self.__dn_to_uid(attr['owner'][0]), 'description': attr.get('description', [None])[0], 'member_ids': [self.__dn_to_uid(x) for x in member_dns]} diff --git a/nova/auth/nova_openldap.schema b/nova/auth/nova_openldap.schema index 9e528f58b..1a10a445d 100644 --- a/nova/auth/nova_openldap.schema +++ b/nova/auth/nova_openldap.schema @@ -39,13 +39,6 @@ attributetype ( SINGLE-VALUE ) -attributetype ( - novaAttrs:5 - NAME 'projectManager' - DESC 'Project Managers of a project' - SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 - ) - objectClass ( novaOCs:1 NAME 'novaUser' @@ -53,12 +46,3 @@ objectClass ( AUXILIARY MAY ( accessKey $ secretKey $ isNovaAdmin ) ) - -objectClass ( - novaOCs:3 - NAME 'novaProject' - DESC 'Container for project' - SUP groupOfNames - STRUCTURAL - MUST ( cn $ projectManager ) - ) diff --git a/nova/auth/nova_sun.schema b/nova/auth/nova_sun.schema index decf10f06..1a04601b5 100644 --- a/nova/auth/nova_sun.schema +++ b/nova/auth/nova_sun.schema @@ -9,6 +9,4 @@ dn: cn=schema attributeTypes: ( 1.3.6.1.3.1.666.666.3.1 NAME 'accessKey' DESC 'Key for accessing data' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributeTypes: ( 1.3.6.1.3.1.666.666.3.2 NAME 'secretKey' DESC 'Secret key' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributeTypes: ( 1.3.6.1.3.1.666.666.3.4 NAME 'isNovaAdmin' DESC 'Is user a nova administrator?' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE ) -attributeTypes: ( 1.3.6.1.3.1.666.666.3.5 NAME 'projectManager' DESC 'Project Managers of a project' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 ) objectClasses: ( 1.3.6.1.3.1.666.666.4.1 NAME 'novaUser' DESC 'access and secret keys' SUP top AUXILIARY MAY ( accessKey $ secretKey $ isNovaAdmin ) ) -objectClasses: ( 1.3.6.1.3.1.666.666.4.3 NAME 'novaProject' DESC 'Container for project' SUP groupOfNames STRUCTURAL MUST ( cn $ projectManager ) ) diff --git a/nova/auth/opendj.sh b/nova/auth/opendj.sh index 8052c077d..9a9600342 100755 --- a/nova/auth/opendj.sh +++ b/nova/auth/opendj.sh @@ -30,9 +30,7 @@ fi abspath=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"` schemapath='/var/opendj/instance/config/schema' -cp $abspath/openssh-lpk_sun.schema $schemapath/97-openssh-lpk_sun.ldif cp $abspath/nova_sun.schema $schemapath/98-nova_sun.ldif -chown opendj:opendj $schemapath/97-openssh-lpk_sun.ldif chown opendj:opendj $schemapath/98-nova_sun.ldif cat >/etc/ldap/ldap.conf </dev/null; echo "$PWD"/"${0##*/}")"` -cp $abspath/openssh-lpk_openldap.schema /etc/ldap/schema/openssh-lpk_openldap.schema -cp $abspath/nova_openldap.schema /etc/ldap/schema/nova_openldap.schema +cp $abspath/nova_openldap.schema /etc/ldap/schema/nova.schema mv /etc/ldap/slapd.conf /etc/ldap/slapd.conf.orig cat >/etc/ldap/slapd.conf </etc/ldap/slapd.conf < Date: Wed, 8 Dec 2010 16:23:59 +0000 Subject: Adding support for choosing a schema version, so that users can more easily migrate from an old schema to the new schema. --- nova/auth/ldapdriver.py | 79 ++++++++++++++++++++++++------------------ nova/auth/nova_openldap.schema | 4 ++- nova/auth/nova_sun.schema | 5 +-- 3 files changed, 52 insertions(+), 36 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 705e89ee8..21d8f8065 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -32,6 +32,8 @@ from nova import flags FLAGS = flags.FLAGS +flags.DEFINE_integer('ldap_schema_version', 1, + 'Current version of the LDAP schema') flags.DEFINE_string('ldap_url', 'ldap://localhost', 'Point this at your ldap server') flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password') @@ -75,10 +77,20 @@ class LdapDriver(object): Defines enter and exit and therefore supports the with/as syntax. """ + project_pattern = '(owner=*)' + isadmin_attribute = 'isNovaAdmin' + project_attribute = 'owner' + project_objectclass = 'groupOfNames' + def __init__(self): """Imports the LDAP module""" self.ldap = __import__('ldap') self.conn = None + if FLAGS.ldap_schema_version == 1: + LdapDriver.project_pattern = '(objectclass=novaProject)' + LdapDriver.isadmin_attribute = 'isAdmin' + LdapDriver.project_attribute = 'projectManager' + LdapDriver.project_objectclass = 'novaProject' def __enter__(self): """Creates the connection to LDAP""" @@ -106,7 +118,7 @@ class LdapDriver(object): """Retrieve project by id""" dn = 'cn=%s,%s' % (pid, FLAGS.ldap_project_subtree) - attr = self.__find_object(dn, '(owner=*)') + attr = self.__find_object(dn, LdapDriver.project_pattern) return self.__to_project(attr) def get_users(self): @@ -122,7 +134,7 @@ class LdapDriver(object): def get_projects(self, uid=None): """Retrieve list of projects""" - pattern = '(owner=*)' + pattern = LdapDriver.project_pattern if uid: pattern = "(&%s(member=%s))" % (pattern, self.__uid_to_dn(uid)) attrs = self.__find_objects(FLAGS.ldap_project_subtree, @@ -152,11 +164,11 @@ class LdapDriver(object): else: attr.append((self.ldap.MOD_ADD, 'accessKey', \ [access_key])) - if 'isNovaAdmin' in user.keys(): - attr.append((self.ldap.MOD_REPLACE, 'isNovaAdmin', \ + if LdapDriver.isadmin_attribute in user.keys(): + attr.append((self.ldap.MOD_REPLACE, LdapDriver.isadmin_attribute, \ [str(is_admin).upper()])) else: - attr.append((self.ldap.MOD_ADD, 'isNovaAdmin', \ + attr.append((self.ldap.MOD_ADD, LdapDriver.isadmin_attribute, \ [str(is_admin).upper()])) self.conn.modify_s(self.__uid_to_dn(name), attr) return self.get_user(name) @@ -175,7 +187,7 @@ class LdapDriver(object): (FLAGS.ldap_user_name_attribute, [name]), ('secretKey', [secret_key]), ('accessKey', [access_key]), - ('isNovaAdmin', [str(is_admin).upper()]), + (LdapDriver.isadmin_attribute, [str(is_admin).upper()]), ] self.conn.add_s(self.__uid_to_dn(name), attr) return self.__to_user(dict(attr)) @@ -205,10 +217,10 @@ class LdapDriver(object): if not manager_dn in members: members.append(manager_dn) attr = [ - ('objectclass', ['groupOfNames']), + ('objectclass', [LdapDriver.project_objectclass]), ('cn', [name]), ('description', [description]), - ('owner', [manager_dn]), + (LdapDriver.project_attribute, [manager_dn]), ('member', members)] self.conn.add_s('cn=%s,%s' % (name, FLAGS.ldap_project_subtree), attr) return self.__to_project(dict(attr)) @@ -224,7 +236,7 @@ class LdapDriver(object): "manager %s doesn't exist" % manager_uid) manager_dn = self.__uid_to_dn(manager_uid) - attr.append((self.ldap.MOD_REPLACE, 'owner', manager_dn)) + attr.append((self.ldap.MOD_REPLACE, LdapDriver.project_attribute, manager_dn)) if description: attr.append((self.ldap.MOD_REPLACE, 'description', description)) self.conn.modify_s('cn=%s,%s' % (project_id, @@ -284,10 +296,9 @@ class LdapDriver(object): return roles else: project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) - roles = self.__find_objects(project_dn, - '(&(&(objectclass=groupOfNames)' - '(!(owner=*)))' - '(member=%s))' % self.__uid_to_dn(uid)) + query = ('(&(&(objectclass=groupOfNames)(!%s))(member=%s))' % + (LdapDriver.project_pattern, self.__uid_to_dn(uid))) + roles = self.__find_objects(project_dn, query) return [role['cn'][0] for role in roles] def delete_user(self, uid): @@ -306,9 +317,9 @@ class LdapDriver(object): if 'accessKey' in user.keys(): attr.append((self.ldap.MOD_DELETE, 'accessKey', \ user['accessKey'])) - if 'isNovaAdmin' in user.keys(): - attr.append((self.ldap.MOD_DELETE, 'isNovaAdmin', \ - user['isNovaAdmin'])) + if LdapDriver.isadmin_attribute in user.keys(): + attr.append((self.ldap.MOD_DELETE, LdapDriver.isadmin_attribute, \ + user[LdapDriver.isadmin_attribute])) self.conn.modify_s(self.__uid_to_dn(uid), attr) else: # Delete entry @@ -330,7 +341,7 @@ class LdapDriver(object): if secret_key: attr.append((self.ldap.MOD_REPLACE, 'secretKey', secret_key)) if admin is not None: - attr.append((self.ldap.MOD_REPLACE, 'isNovaAdmin', str(admin).upper())) + attr.append((self.ldap.MOD_REPLACE, LdapDriver.isadmin_attribute, str(admin).upper())) self.conn.modify_s(self.__uid_to_dn(uid), attr) def __user_exists(self, uid): @@ -384,19 +395,20 @@ class LdapDriver(object): def __find_role_dns(self, tree): """Find dns of role objects in given tree""" - return self.__find_dns(tree, - '(&(objectclass=groupOfNames)(!(owner=*)))') + query = '(&(objectclass=groupOfNames)(!%s))' % LdapDriver.project_pattern + return self.__find_dns(tree, query) def __find_group_dns_with_member(self, tree, uid): """Find dns of group objects in a given tree that contain member""" - dns = self.__find_dns(tree, - '(&(objectclass=groupOfNames)(member=%s))' % - self.__uid_to_dn(uid)) + query = ('(&(objectclass=groupOfNames)(member=%s))' % + self.__uid_to_dn(uid)) + dns = self.__find_dns(tree, query) return dns def __group_exists(self, dn): """Check if group exists""" - return self.__find_object(dn, '(objectclass=groupOfNames)') is not None + query = '(objectclass=groupOfNames)' + return self.__find_object(dn, query) is not None @staticmethod def __role_to_dn(role, project_id=None): @@ -435,7 +447,7 @@ class LdapDriver(object): """Check if user is in group""" if not self.__user_exists(uid): raise exception.NotFound("User %s can't be searched in group " - "becuase the user doesn't exist" % (uid,)) + "because the user doesn't exist" % uid) if not self.__group_exists(group_dn): return False res = self.__find_object(group_dn, @@ -447,10 +459,10 @@ class LdapDriver(object): """Add user to group""" if not self.__user_exists(uid): raise exception.NotFound("User %s can't be added to the group " - "becuase the user doesn't exist" % (uid,)) + "because the user doesn't exist" % uid) if not self.__group_exists(group_dn): raise exception.NotFound("The group at dn %s doesn't exist" % - (group_dn,)) + group_dn) if self.__is_in_group(uid, group_dn): raise exception.Duplicate("User %s is already a member of " "the group %s" % (uid, group_dn)) @@ -461,13 +473,13 @@ class LdapDriver(object): """Remove user from group""" if not self.__group_exists(group_dn): raise exception.NotFound("The group at dn %s doesn't exist" % - (group_dn,)) + group_dn) if not self.__user_exists(uid): raise exception.NotFound("User %s can't be removed from the " - "group because the user doesn't exist" % (uid,)) + "group because the user doesn't exist" % uid) if not self.__is_in_group(uid, group_dn): raise exception.NotFound("User %s is not a member of the group" % - (uid,)) + uid) # NOTE(vish): remove user from group and any sub_groups sub_dns = self.__find_group_dns_with_member( group_dn, uid) @@ -489,7 +501,7 @@ class LdapDriver(object): """Remove user from all roles and projects""" if not self.__user_exists(uid): raise exception.NotFound("User %s can't be removed from all " - "because the user doesn't exist" % (uid,)) + "because the user doesn't exist" % uid) role_dns = self.__find_group_dns_with_member( FLAGS.role_project_subtree, uid) for role_dn in role_dns: @@ -516,13 +528,13 @@ class LdapDriver(object): if attr is None: return None if ('accessKey' in attr.keys() and 'secretKey' in attr.keys() \ - and 'isNovaAdmin' in attr.keys()): + and LdapDriver.isadmin_attribute in attr.keys()): return { 'id': attr[FLAGS.ldap_user_id_attribute][0], 'name': attr[FLAGS.ldap_user_name_attribute][0], 'access': attr['accessKey'][0], 'secret': attr['secretKey'][0], - 'admin': (attr['isNovaAdmin'][0] == 'TRUE')} + 'admin': (attr[LdapDriver.isadmin_attribute][0] == 'TRUE')} else: return None @@ -534,7 +546,8 @@ class LdapDriver(object): return { 'id': attr['cn'][0], 'name': attr['cn'][0], - 'project_manager_id': self.__dn_to_uid(attr['owner'][0]), + 'project_manager_id': + self.__dn_to_uid(attr[LdapDriver.project_attribute][0]), 'description': attr.get('description', [None])[0], 'member_ids': [self.__dn_to_uid(x) for x in member_dns]} diff --git a/nova/auth/nova_openldap.schema b/nova/auth/nova_openldap.schema index 1a10a445d..daa3a8442 100644 --- a/nova/auth/nova_openldap.schema +++ b/nova/auth/nova_openldap.schema @@ -1,7 +1,9 @@ # # Person object for Nova # inetorgperson with extra attributes -# Author: Vishvananda Ishaya +# Schema version: 2 +# Authors: Vishvananda Ishaya +# Ryan Lane # # diff --git a/nova/auth/nova_sun.schema b/nova/auth/nova_sun.schema index 1a04601b5..8e9052ded 100644 --- a/nova/auth/nova_sun.schema +++ b/nova/auth/nova_sun.schema @@ -1,8 +1,9 @@ # # Person object for Nova # inetorgperson with extra attributes -# Author: Vishvananda Ishaya -# Modified for strict RFC 4512 compatibility by: Ryan Lane +# Schema version: 2 +# Authors: Vishvananda Ishaya +# Ryan Lane # # using internet experimental oid arc as per BP64 3.1 dn: cn=schema -- cgit From 5e79e5957a016c1f38fb2d126f710078a4b7f9a2 Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Wed, 8 Dec 2010 16:26:12 +0000 Subject: Setting the default schema version to the new schema --- nova/auth/ldapdriver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 21d8f8065..eac1db547 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -32,7 +32,7 @@ from nova import flags FLAGS = flags.FLAGS -flags.DEFINE_integer('ldap_schema_version', 1, +flags.DEFINE_integer('ldap_schema_version', 2, 'Current version of the LDAP schema') flags.DEFINE_string('ldap_url', 'ldap://localhost', 'Point this at your ldap server') -- cgit From 55bc83b07abc8700c2b619be6be88b348f42a4d8 Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Wed, 8 Dec 2010 16:38:35 +0000 Subject: PEP8 fixes --- nova/auth/ldapdriver.py | 41 +++++++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index eac1db547..870262a15 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -40,7 +40,8 @@ flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password') flags.DEFINE_string('ldap_user_dn', 'cn=Manager,dc=example,dc=com', 'DN of admin user') flags.DEFINE_string('ldap_user_id_attribute', 'uid', 'Attribute to use as id') -flags.DEFINE_string('ldap_user_name_attribute', 'cn', 'Attribute to use as name') +flags.DEFINE_string('ldap_user_name_attribute', 'cn', + 'Attribute to use as name') flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users') flags.DEFINE_string('ldap_user_subtree', 'ou=Users,dc=example,dc=com', 'OU for Users') @@ -153,23 +154,23 @@ class LdapDriver(object): # Malformed entries are useless, replace attributes found. attr = [] if 'secretKey' in user.keys(): - attr.append((self.ldap.MOD_REPLACE, 'secretKey', \ + attr.append((self.ldap.MOD_REPLACE, 'secretKey', [secret_key])) else: - attr.append((self.ldap.MOD_ADD, 'secretKey', \ + attr.append((self.ldap.MOD_ADD, 'secretKey', [secret_key])) if 'accessKey' in user.keys(): - attr.append((self.ldap.MOD_REPLACE, 'accessKey', \ + attr.append((self.ldap.MOD_REPLACE, 'accessKey', [access_key])) else: - attr.append((self.ldap.MOD_ADD, 'accessKey', \ + attr.append((self.ldap.MOD_ADD, 'accessKey', [access_key])) if LdapDriver.isadmin_attribute in user.keys(): - attr.append((self.ldap.MOD_REPLACE, LdapDriver.isadmin_attribute, \ - [str(is_admin).upper()])) + attr.append((self.ldap.MOD_REPLACE, + LdapDriver.isadmin_attribute, [str(is_admin).upper()])) else: - attr.append((self.ldap.MOD_ADD, LdapDriver.isadmin_attribute, \ - [str(is_admin).upper()])) + attr.append((self.ldap.MOD_ADD, + LdapDriver.isadmin_attribute, [str(is_admin).upper()])) self.conn.modify_s(self.__uid_to_dn(name), attr) return self.get_user(name) else: @@ -236,7 +237,8 @@ class LdapDriver(object): "manager %s doesn't exist" % manager_uid) manager_dn = self.__uid_to_dn(manager_uid) - attr.append((self.ldap.MOD_REPLACE, LdapDriver.project_attribute, manager_dn)) + attr.append((self.ldap.MOD_REPLACE, LdapDriver.project_attribute, + manager_dn)) if description: attr.append((self.ldap.MOD_REPLACE, 'description', description)) self.conn.modify_s('cn=%s,%s' % (project_id, @@ -312,14 +314,15 @@ class LdapDriver(object): # Retrieve user by name user = self.__get_ldap_user(uid) if 'secretKey' in user.keys(): - attr.append((self.ldap.MOD_DELETE, 'secretKey', \ - user['secretKey'])) + attr.append((self.ldap.MOD_DELETE, 'secretKey', + user['secretKey'])) if 'accessKey' in user.keys(): - attr.append((self.ldap.MOD_DELETE, 'accessKey', \ - user['accessKey'])) + attr.append((self.ldap.MOD_DELETE, 'accessKey', + user['accessKey'])) if LdapDriver.isadmin_attribute in user.keys(): - attr.append((self.ldap.MOD_DELETE, LdapDriver.isadmin_attribute, \ - user[LdapDriver.isadmin_attribute])) + attr.append((self.ldap.MOD_DELETE, + LdapDriver.isadmin_attribute, + user[LdapDriver.isadmin_attribute])) self.conn.modify_s(self.__uid_to_dn(uid), attr) else: # Delete entry @@ -341,7 +344,8 @@ class LdapDriver(object): if secret_key: attr.append((self.ldap.MOD_REPLACE, 'secretKey', secret_key)) if admin is not None: - attr.append((self.ldap.MOD_REPLACE, LdapDriver.isadmin_attribute, str(admin).upper())) + attr.append((self.ldap.MOD_REPLACE, LdapDriver.isadmin_attribute, + str(admin).upper())) self.conn.modify_s(self.__uid_to_dn(uid), attr) def __user_exists(self, uid): @@ -395,7 +399,8 @@ class LdapDriver(object): def __find_role_dns(self, tree): """Find dns of role objects in given tree""" - query = '(&(objectclass=groupOfNames)(!%s))' % LdapDriver.project_pattern + query = ('(&(objectclass=groupOfNames)(!%s))' % + LdapDriver.project_pattern) return self.__find_dns(tree, query) def __find_group_dns_with_member(self, tree, uid): -- cgit From 63006a18701ff185e6837aa2b88f001052643460 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Wed, 8 Dec 2010 18:49:28 +0000 Subject: typo fix --- nova/virt/xenapi/volume_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 051d0fe85..d247066aa 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -78,14 +78,14 @@ class VolumeHelper(): if 'chapuser' in info and 'chappassword' in info: record = {'target': info['targetHost'], 'port': info['targetPort'], - 'targetIQN': info['targeIQN'], + 'targetIQN': info['targetIQN'], 'chapuser': info['chapuser'], 'chappassword': info['chappassword'] } else: record = {'target': info['targetHost'], 'port': info['targetPort'], - 'targetIQN': info['targeIQN'] + 'targetIQN': info['targetIQN'] } try: sr_ref = session.get_xenapi().SR.create( @@ -211,7 +211,7 @@ class VolumeHelper(): volume_info['volumeId'] = volume_id volume_info['targetHost'] = target_host volume_info['targetPort'] = target_port - volume_info['targeIQN'] = target_iqn + volume_info['targetIQN'] = target_iqn defer.returnValue(volume_info) @classmethod -- cgit From 2a5ad56319dfdf75bf2eab1337032f035822f272 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Fri, 10 Dec 2010 18:37:17 +0000 Subject: There is always the odd change that one forgets! --- nova/virt/xenapi_conn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index b82862764..4ace6da14 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -65,7 +65,7 @@ from nova.virt.xenapi.volumeops import VolumeOps FLAGS = flags.FLAGS flags.DEFINE_boolean('xenapi_use_fake_session', - True, + False, 'Set to true in order to use the fake XenAPI SDK') flags.DEFINE_string('xenapi_connection_url', None, -- cgit From 8d08206cb4759328e7cf3b836eeff824e0d22052 Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Fri, 10 Dec 2010 18:49:54 +0000 Subject: Format fixes and modification of Vish's email address. --- nova/auth/ldapdriver.py | 41 ++++++++++++++++++++++------------------- nova/auth/nova_openldap.schema | 2 +- nova/auth/nova_sun.schema | 2 +- 3 files changed, 24 insertions(+), 21 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 870262a15..1b928e7d8 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -125,7 +125,7 @@ class LdapDriver(object): def get_users(self): """Retrieve list of users""" attrs = self.__find_objects(FLAGS.ldap_user_subtree, - '(objectclass=novaUser)') + '(objectclass=novaUser)') users = [] for attr in attrs: user = self.__to_user(attr) @@ -155,22 +155,24 @@ class LdapDriver(object): attr = [] if 'secretKey' in user.keys(): attr.append((self.ldap.MOD_REPLACE, 'secretKey', - [secret_key])) + [secret_key])) else: attr.append((self.ldap.MOD_ADD, 'secretKey', - [secret_key])) + [secret_key])) if 'accessKey' in user.keys(): attr.append((self.ldap.MOD_REPLACE, 'accessKey', - [access_key])) + [access_key])) else: attr.append((self.ldap.MOD_ADD, 'accessKey', - [access_key])) + [access_key])) if LdapDriver.isadmin_attribute in user.keys(): attr.append((self.ldap.MOD_REPLACE, - LdapDriver.isadmin_attribute, [str(is_admin).upper()])) + LdapDriver.isadmin_attribute, + [str(is_admin).upper()])) else: attr.append((self.ldap.MOD_ADD, - LdapDriver.isadmin_attribute, [str(is_admin).upper()])) + LdapDriver.isadmin_attribute, + [str(is_admin).upper()])) self.conn.modify_s(self.__uid_to_dn(name), attr) return self.get_user(name) else: @@ -299,7 +301,7 @@ class LdapDriver(object): else: project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) query = ('(&(&(objectclass=groupOfNames)(!%s))(member=%s))' % - (LdapDriver.project_pattern, self.__uid_to_dn(uid))) + (LdapDriver.project_pattern, self.__uid_to_dn(uid))) roles = self.__find_objects(project_dn, query) return [role['cn'][0] for role in roles] @@ -363,7 +365,7 @@ class LdapDriver(object): def __get_ldap_user(self, uid): """Retrieve LDAP user entry by id""" attr = self.__find_object(self.__uid_to_dn(uid), - '(objectclass=novaUser)') + '(objectclass=novaUser)') return attr def __find_object(self, dn, query=None, scope=None): @@ -406,7 +408,7 @@ class LdapDriver(object): def __find_group_dns_with_member(self, tree, uid): """Find dns of group objects in a given tree that contain member""" query = ('(&(objectclass=groupOfNames)(member=%s))' % - self.__uid_to_dn(uid)) + self.__uid_to_dn(uid)) dns = self.__find_dns(tree, query) return dns @@ -436,7 +438,8 @@ class LdapDriver(object): for member_uid in member_uids: if not self.__user_exists(member_uid): raise exception.NotFound("Group can't be created " - "because user %s doesn't exist" % member_uid) + "because user %s doesn't exist" % + member_uid) members.append(self.__uid_to_dn(member_uid)) dn = self.__uid_to_dn(uid) if not dn in members: @@ -452,7 +455,7 @@ class LdapDriver(object): """Check if user is in group""" if not self.__user_exists(uid): raise exception.NotFound("User %s can't be searched in group " - "because the user doesn't exist" % uid) + "because the user doesn't exist" % uid) if not self.__group_exists(group_dn): return False res = self.__find_object(group_dn, @@ -464,7 +467,7 @@ class LdapDriver(object): """Add user to group""" if not self.__user_exists(uid): raise exception.NotFound("User %s can't be added to the group " - "because the user doesn't exist" % uid) + "because the user doesn't exist" % uid) if not self.__group_exists(group_dn): raise exception.NotFound("The group at dn %s doesn't exist" % group_dn) @@ -481,13 +484,13 @@ class LdapDriver(object): group_dn) if not self.__user_exists(uid): raise exception.NotFound("User %s can't be removed from the " - "group because the user doesn't exist" % uid) + "group because the user doesn't exist" % + uid) if not self.__is_in_group(uid, group_dn): raise exception.NotFound("User %s is not a member of the group" % uid) # NOTE(vish): remove user from group and any sub_groups - sub_dns = self.__find_group_dns_with_member( - group_dn, uid) + sub_dns = self.__find_group_dns_with_member(group_dn, uid) for sub_dn in sub_dns: self.__safe_remove_from_group(uid, sub_dn) @@ -506,7 +509,7 @@ class LdapDriver(object): """Remove user from all roles and projects""" if not self.__user_exists(uid): raise exception.NotFound("User %s can't be removed from all " - "because the user doesn't exist" % uid) + "because the user doesn't exist" % uid) role_dns = self.__find_group_dns_with_member( FLAGS.role_project_subtree, uid) for role_dn in role_dns: @@ -564,8 +567,8 @@ class LdapDriver(object): @staticmethod def __uid_to_dn(uid): """Convert uid to dn""" - return FLAGS.ldap_user_id_attribute + '=%s,%s' \ - % (uid, FLAGS.ldap_user_subtree) + return (FLAGS.ldap_user_id_attribute + '=%s,%s' + % (uid, FLAGS.ldap_user_subtree)) class FakeLdapDriver(LdapDriver): diff --git a/nova/auth/nova_openldap.schema b/nova/auth/nova_openldap.schema index daa3a8442..539a5c42d 100644 --- a/nova/auth/nova_openldap.schema +++ b/nova/auth/nova_openldap.schema @@ -2,7 +2,7 @@ # Person object for Nova # inetorgperson with extra attributes # Schema version: 2 -# Authors: Vishvananda Ishaya +# Authors: Vishvananda Ishaya # Ryan Lane # # diff --git a/nova/auth/nova_sun.schema b/nova/auth/nova_sun.schema index 8e9052ded..4a6a78839 100644 --- a/nova/auth/nova_sun.schema +++ b/nova/auth/nova_sun.schema @@ -2,7 +2,7 @@ # Person object for Nova # inetorgperson with extra attributes # Schema version: 2 -# Authors: Vishvananda Ishaya +# Authors: Vishvananda Ishaya # Ryan Lane # # using internet experimental oid arc as per BP64 3.1 -- cgit From 669d5f5612840c9ed6449d91ee5aae97842cac72 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 13 Dec 2010 18:22:56 +0000 Subject: second round for unit testing framework --- nova/tests/virt_unittest.py | 65 +++++++++++++++++++++++++++----------- nova/virt/xenapi/__init__.py | 5 +++ nova/virt/xenapi/fake.py | 75 ++++++++++++++++++++++++++++++++++++++++++-- nova/virt/xenapi/vmops.py | 3 +- nova/virt/xenapi_conn.py | 2 +- 5 files changed, 127 insertions(+), 23 deletions(-) diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py index ba3fba83b..611022632 100644 --- a/nova/tests/virt_unittest.py +++ b/nova/tests/virt_unittest.py @@ -30,6 +30,9 @@ from nova.virt.xenapi import volume_utils FLAGS = flags.FLAGS flags.DECLARE('instances_path', 'nova.compute.manager') +# Those are XenAPI related +flags.DECLARE('target_host', 'nova.virt.xenapi_conn') +FLAGS.target_host = '127.0.0.1' class LibvirtConnTestCase(test.TrialTestCase): @@ -270,36 +273,62 @@ class XenAPIVolumeTestCase(test.TrialTestCase): self.helper = volume_utils.VolumeHelper self.helper.late_import() + def _create_volume(self, size='0'): + """Create a volume object.""" + vol = {} + vol['size'] = size + vol['user_id'] = 'fake' + vol['project_id'] = 'fake' + vol['host'] = 'localhost' + vol['availability_zone'] = FLAGS.storage_availability_zone + vol['status'] = "creating" + vol['attach_status'] = "detached" + return db.volume_create(context.get_admin_context(), vol) + def test_create_iscsi_storage_raise_no_exception(self): - info = self.helper.parse_volume_info(None, None) - label = 'SR-' - description = '' - self.helper.create_iscsi_storage_blocking(self.session, - info, - label, - description) + vol = self._create_volume() + info = yield self.helper.parse_volume_info(vol['ec2_id'], '/dev/sdc') + label = None # For testing new SRs + description = 'Test-SR' + self.session.fail_next_call = False + sr_ref = self.helper.create_iscsi_storage_blocking(self.session, + info, + label, + description) + self.assertEqual(sr_ref, self.session.SR.FAKE_REF) + db.volume_destroy(context.get_admin_context(), vol['id']) def test_create_iscsi_storage_raise_unable_to_create_sr_exception(self): - info = self.helper.parse_volume_info(None, None) - label = None + vol = self._create_volume() + info = yield self.helper.parse_volume_info(vol['ec2_id'], '/dev/sdc') + label = None # For testing new SRs description = None - self.assertFailure(self.helper.create_iscsi_storage_blocking(self.session, - info, - label, - description), - StorageError) + self.session.fail_next_call = True + self.assertRaises(volume_utils.StorageError, + self.helper.create_iscsi_storage_blocking, + self.session, + info, + label, + description) def test_find_sr_from_vbd_raise_no_exception(self): - pass + sr_ref = yield self.helper.find_sr_from_vbd(self.session, + self.session.VBD.FAKE_REF) + self.assertEqual(sr_ref, self.session.SR.FAKE_REF) - def test_destroy_iscsi_storage_raise_no_exception(self): + def test_destroy_iscsi_storage(self): pass def test_introduce_vdi_raise_no_exception(self): - pass + sr_ref = self.session.SR.FAKE_REF + self.helper.introduce_vdi_blocking(self.session, sr_ref) def test_introduce_vdi_raise_unable_get_vdi_record_exception(self): - pass + sr_ref = self.session.SR.FAKE_REF + self.session.fail_next_call = True + self.assertRaises(volume_utils.StorageError, + self.helper.introduce_vdi_blocking, + self.session, sr_ref) def tearDown(self): super(XenAPIVolumeTestCase, self).tearDown() diff --git a/nova/virt/xenapi/__init__.py b/nova/virt/xenapi/__init__.py index 3d598c463..d9abe54c5 100644 --- a/nova/virt/xenapi/__init__.py +++ b/nova/virt/xenapi/__init__.py @@ -13,3 +13,8 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + +""" +:mod:`xenapi` -- Nova support for XenServer and XCP through XenAPI +================================================================== +""" diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 75ff587e1..3a01f9c3d 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -1,4 +1,6 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 +from twisted.web.domhelpers import _get +from aptdaemon.defer import defer # Copyright (c) 2010 Citrix Systems, Inc. # @@ -33,15 +35,15 @@ class Failure(Exception): class FakeXenAPISession(object): """ The session to invoke XenAPI SDK calls """ def __init__(self): - pass + self.fail_next_call = False def get_xenapi(self): """ Return the xenapi object """ - raise NotImplementedError() + return self def get_xenapi_host(self): """ Return the xenapi host """ - raise NotImplementedError() + return 'FAKE_XENAPI_HOST' def call_xenapi(self, method, *args): """Call the specified XenAPI method on a background thread. Returns @@ -57,3 +59,70 @@ class FakeXenAPISession(object): """Return a Deferred that will give the result of the given task. The task is polled until it completes.""" raise NotImplementedError() + + def __getattr__(self, name): + return FakeXenAPIObject(name, self) + + +class FakeXenAPIObject(object): + def __init__(self, name, session): + self.name = name + self.session = session + self.FAKE_REF = 'FAKE_REFERENCE_%s' % name + + def get_by_name_label(self, label): + if label is None: + return '' # 'No object found' + else: + return 'FAKE_OBJECT_%s_%s' % (self.name, label) + + def getter(self, *args): + self._check_fail() + return self.FAKE_REF + + def ref_list(self, *args): + self._check_fail() + return [FakeXenAPIRecord()] + + def __getattr__(self, name): + if name == 'create': + return self._create + elif name == 'get_record': + return self._record + elif name == 'introduce': + return self._introduce + elif name.startswith('get_'): + getter = 'get_%s' % self.name + if name == getter: + return self.getter + else: + child = name[name.find('_') + 1:] + if child.endswith('s'): + return FakeXenAPIObject(child[:-1], self.session).ref_list + else: + return FakeXenAPIObject(child, self.session).getter + + def _create(self, *args): + self._check_fail() + return self.FAKE_REF + + def _record(self, *args): + self._check_fail() + return FakeXenAPIRecord() + + def _introduce(self, *args): + self._check_fail() + pass + + def _check_fail(self): + if self.session.fail_next_call: + self.session.fail_next_call = False # Reset! + raise Failure('Unable to create %s' % self.name) + + +class FakeXenAPIRecord(dict): + def __init__(self): + pass + + def __getitem__(self, attr): + return '' diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index a5d923a3b..9a8db0ad4 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -24,6 +24,7 @@ from twisted.internet import defer from nova import db from nova import context +from nova import exception from nova.auth.manager import AuthManager from nova.virt.xenapi.network_utils import NetworkHelper @@ -123,7 +124,7 @@ class VMOps(object): """ Return data about VM instance """ vm = VMHelper.lookup_blocking(self._session, instance_id) if vm is None: - raise Exception('instance not present %s' % instance_id) + raise exception.NotFound('Instance not found %s' % instance_id) rec = self._session.get_xenapi().VM.get_record(vm) return VMHelper.compile_info(rec) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 4ace6da14..d8d21e24d 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -91,7 +91,7 @@ flags.DEFINE_string('target_port', '3260', 'iSCSI Target Port, 3260 Default') flags.DEFINE_string('iqn_prefix', - 'iqn.2010-10.org.openstack', + 'iqn.2010-12.org.openstack', 'IQN Prefix') -- cgit From fe667352c3e25c744a989ca45f4f9ed472778ae3 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 13 Dec 2010 18:43:24 +0000 Subject: removing imports that should have not been there --- nova/tests/virt_unittest.py | 3 ++- nova/virt/xenapi/fake.py | 10 +++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py index 611022632..1095662c3 100644 --- a/nova/tests/virt_unittest.py +++ b/nova/tests/virt_unittest.py @@ -317,7 +317,8 @@ class XenAPIVolumeTestCase(test.TrialTestCase): self.assertEqual(sr_ref, self.session.SR.FAKE_REF) def test_destroy_iscsi_storage(self): - pass + sr_ref = self.session.SR.FAKE_REF + self.helper.destroy_iscsi_storage_blocking(self.session, sr_ref) def test_introduce_vdi_raise_no_exception(self): sr_ref = self.session.SR.FAKE_REF diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 3a01f9c3d..2fed28609 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -1,6 +1,4 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -from twisted.web.domhelpers import _get -from aptdaemon.defer import defer # Copyright (c) 2010 Citrix Systems, Inc. # @@ -89,8 +87,10 @@ class FakeXenAPIObject(object): return self._create elif name == 'get_record': return self._record - elif name == 'introduce': - return self._introduce + elif name == 'introduce' or\ + name == 'forget' or\ + name == 'unplug': + return self._fake_action elif name.startswith('get_'): getter = 'get_%s' % self.name if name == getter: @@ -110,7 +110,7 @@ class FakeXenAPIObject(object): self._check_fail() return FakeXenAPIRecord() - def _introduce(self, *args): + def _fake_action(self, *args): self._check_fail() pass -- cgit From e30801445f8b543d78494ca63be60f85b94d3a53 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 13 Dec 2010 20:31:33 +0000 Subject: moving xenapi unittests changes into another branch --- nova/tests/virt_unittest.py | 76 -------------------------- nova/virt/xenapi/fake.py | 128 -------------------------------------------- 2 files changed, 204 deletions(-) delete mode 100644 nova/virt/xenapi/fake.py diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py index 1095662c3..d49383fb7 100644 --- a/nova/tests/virt_unittest.py +++ b/nova/tests/virt_unittest.py @@ -25,14 +25,9 @@ from nova import utils from nova.api.ec2 import cloud from nova.auth import manager from nova.virt import libvirt_conn -from nova.virt.xenapi import fake -from nova.virt.xenapi import volume_utils FLAGS = flags.FLAGS flags.DECLARE('instances_path', 'nova.compute.manager') -# Those are XenAPI related -flags.DECLARE('target_host', 'nova.virt.xenapi_conn') -FLAGS.target_host = '127.0.0.1' class LibvirtConnTestCase(test.TrialTestCase): @@ -262,74 +257,3 @@ class NWFilterTestCase(test.TrialTestCase): d.addCallback(lambda _: self.teardown_security_group()) return d - - -class XenAPIVolumeTestCase(test.TrialTestCase): - - def setUp(self): - super(XenAPIVolumeTestCase, self).setUp() - self.flags(xenapi_use_fake_session=True) - self.session = fake.FakeXenAPISession() - self.helper = volume_utils.VolumeHelper - self.helper.late_import() - - def _create_volume(self, size='0'): - """Create a volume object.""" - vol = {} - vol['size'] = size - vol['user_id'] = 'fake' - vol['project_id'] = 'fake' - vol['host'] = 'localhost' - vol['availability_zone'] = FLAGS.storage_availability_zone - vol['status'] = "creating" - vol['attach_status'] = "detached" - return db.volume_create(context.get_admin_context(), vol) - - def test_create_iscsi_storage_raise_no_exception(self): - vol = self._create_volume() - info = yield self.helper.parse_volume_info(vol['ec2_id'], '/dev/sdc') - label = None # For testing new SRs - description = 'Test-SR' - self.session.fail_next_call = False - sr_ref = self.helper.create_iscsi_storage_blocking(self.session, - info, - label, - description) - self.assertEqual(sr_ref, self.session.SR.FAKE_REF) - db.volume_destroy(context.get_admin_context(), vol['id']) - - def test_create_iscsi_storage_raise_unable_to_create_sr_exception(self): - vol = self._create_volume() - info = yield self.helper.parse_volume_info(vol['ec2_id'], '/dev/sdc') - label = None # For testing new SRs - description = None - self.session.fail_next_call = True - self.assertRaises(volume_utils.StorageError, - self.helper.create_iscsi_storage_blocking, - self.session, - info, - label, - description) - - def test_find_sr_from_vbd_raise_no_exception(self): - sr_ref = yield self.helper.find_sr_from_vbd(self.session, - self.session.VBD.FAKE_REF) - self.assertEqual(sr_ref, self.session.SR.FAKE_REF) - - def test_destroy_iscsi_storage(self): - sr_ref = self.session.SR.FAKE_REF - self.helper.destroy_iscsi_storage_blocking(self.session, sr_ref) - - def test_introduce_vdi_raise_no_exception(self): - sr_ref = self.session.SR.FAKE_REF - self.helper.introduce_vdi_blocking(self.session, sr_ref) - - def test_introduce_vdi_raise_unable_get_vdi_record_exception(self): - sr_ref = self.session.SR.FAKE_REF - self.session.fail_next_call = True - self.assertRaises(volume_utils.StorageError, - self.helper.introduce_vdi_blocking, - self.session, sr_ref) - - def tearDown(self): - super(XenAPIVolumeTestCase, self).tearDown() diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py deleted file mode 100644 index 2fed28609..000000000 --- a/nova/virt/xenapi/fake.py +++ /dev/null @@ -1,128 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2010 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -A fake XenAPI SDK. - -Allows for xenapi helper classes testing. -""" - - -class Failure(Exception): - def __init__(self, message=None): - super(Failure, self).__init__(message) - self.details = [] - - def __str__(self): - return 'Fake XenAPI Exception' - - -class FakeXenAPISession(object): - """ The session to invoke XenAPI SDK calls """ - def __init__(self): - self.fail_next_call = False - - def get_xenapi(self): - """ Return the xenapi object """ - return self - - def get_xenapi_host(self): - """ Return the xenapi host """ - return 'FAKE_XENAPI_HOST' - - def call_xenapi(self, method, *args): - """Call the specified XenAPI method on a background thread. Returns - a Deferred for the result.""" - raise NotImplementedError() - - def async_call_plugin(self, plugin, fn, args): - """Call Async.host.call_plugin on a background thread. Returns a - Deferred with the task reference.""" - raise NotImplementedError() - - def wait_for_task(self, task): - """Return a Deferred that will give the result of the given task. - The task is polled until it completes.""" - raise NotImplementedError() - - def __getattr__(self, name): - return FakeXenAPIObject(name, self) - - -class FakeXenAPIObject(object): - def __init__(self, name, session): - self.name = name - self.session = session - self.FAKE_REF = 'FAKE_REFERENCE_%s' % name - - def get_by_name_label(self, label): - if label is None: - return '' # 'No object found' - else: - return 'FAKE_OBJECT_%s_%s' % (self.name, label) - - def getter(self, *args): - self._check_fail() - return self.FAKE_REF - - def ref_list(self, *args): - self._check_fail() - return [FakeXenAPIRecord()] - - def __getattr__(self, name): - if name == 'create': - return self._create - elif name == 'get_record': - return self._record - elif name == 'introduce' or\ - name == 'forget' or\ - name == 'unplug': - return self._fake_action - elif name.startswith('get_'): - getter = 'get_%s' % self.name - if name == getter: - return self.getter - else: - child = name[name.find('_') + 1:] - if child.endswith('s'): - return FakeXenAPIObject(child[:-1], self.session).ref_list - else: - return FakeXenAPIObject(child, self.session).getter - - def _create(self, *args): - self._check_fail() - return self.FAKE_REF - - def _record(self, *args): - self._check_fail() - return FakeXenAPIRecord() - - def _fake_action(self, *args): - self._check_fail() - pass - - def _check_fail(self): - if self.session.fail_next_call: - self.session.fail_next_call = False # Reset! - raise Failure('Unable to create %s' % self.name) - - -class FakeXenAPIRecord(dict): - def __init__(self): - pass - - def __getitem__(self, attr): - return '' -- cgit From 6e37cf42d758b5040442d9c296b21955d10a7327 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Tue, 14 Dec 2010 11:48:29 +0000 Subject: final cleanup, after moving unittest work into another branch --- nova/virt/xenapi/__init__.py | 30 ++++++++++++++++++ nova/virt/xenapi/fake.py | 66 +++++++++++++++++++++++++++++++++++++++ nova/virt/xenapi/network_utils.py | 3 +- nova/virt/xenapi/vm_utils.py | 19 ++--------- nova/virt/xenapi/vmops.py | 6 ++-- nova/virt/xenapi/volume_utils.py | 20 ++---------- nova/virt/xenapi/volumeops.py | 8 +++-- nova/virt/xenapi_conn.py | 10 ++++-- 8 files changed, 118 insertions(+), 44 deletions(-) create mode 100644 nova/virt/xenapi/fake.py diff --git a/nova/virt/xenapi/__init__.py b/nova/virt/xenapi/__init__.py index d9abe54c5..1a2903b98 100644 --- a/nova/virt/xenapi/__init__.py +++ b/nova/virt/xenapi/__init__.py @@ -18,3 +18,33 @@ :mod:`xenapi` -- Nova support for XenServer and XCP through XenAPI ================================================================== """ + + +def load_sdk(flags): + """ + This method is used for loading the XenAPI SDK (fake or real) + """ + xenapi_module = \ + flags.xenapi_use_fake_session and 'nova.virt.xenapi.fake' or 'XenAPI' + from_list = \ + flags.xenapi_use_fake_session and ['fake'] or [] + + return __import__(xenapi_module, globals(), locals(), from_list, -1) + + +class HelperBase(): + """ + The class that wraps the helper methods together. + """ + XenAPI = None + + def __init__(self): + return + + @classmethod + def late_import(cls, FLAGS): + """ + Load XenAPI module in for helper class + """ + if cls.XenAPI is None: + cls.XenAPI = load_sdk(FLAGS) diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py new file mode 100644 index 000000000..8d6a39a87 --- /dev/null +++ b/nova/virt/xenapi/fake.py @@ -0,0 +1,66 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +A fake XenAPI SDK. + +Allows for xenapi helper classes testing. +""" + + +class Failure(Exception): + def __init__(self, message=None): + super(Failure, self).__init__(message) + self.details = [] + + def __str__(self): + return 'Fake XenAPI Exception' + + +class FakeSession(object): + """ + The session to invoke XenAPI SDK calls. + FIXME(armando): this is a placeholder + for the xenapi unittests branch. + """ + def __init__(self, url): + pass + + def get_xenapi(self): + """ Return the xenapi object """ + raise NotImplementedError() + + def get_xenapi_host(self): + """ Return the xenapi host """ + raise NotImplementedError() + + def call_xenapi(self, method, *args): + """Call the specified XenAPI method on a background thread. Returns + a Deferred for the result.""" + raise NotImplementedError() + + def async_call_plugin(self, plugin, fn, args): + """Call Async.host.call_plugin on a background thread. Returns a + Deferred with the task reference.""" + raise NotImplementedError() + + def wait_for_task(self, task): + """Return a Deferred that will give the result of the given task. + The task is polled until it completes.""" + raise NotImplementedError() + + def __getattr__(self, name): + raise NotImplementedError() diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py index 8cb4cce3a..cb745cdaf 100644 --- a/nova/virt/xenapi/network_utils.py +++ b/nova/virt/xenapi/network_utils.py @@ -21,9 +21,10 @@ their lookup functions. """ from twisted.internet import defer +from nova.virt.xenapi import HelperBase -class NetworkHelper(): +class NetworkHelper(HelperBase): """ The class that wraps the helper methods together. """ diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 89831fe5d..2b0691c01 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -32,6 +32,7 @@ from nova.auth.manager import AuthManager from nova.compute import instance_types from nova.compute import power_state from nova.virt import images +from nova.virt.xenapi import HelperBase from nova.virt.xenapi.volume_utils import StorageError FLAGS = flags.FLAGS @@ -44,29 +45,13 @@ XENAPI_POWER_STATE = { 'Crashed': power_state.CRASHED} -class VMHelper(): +class VMHelper(HelperBase): """ The class that wraps the helper methods together. """ - - XenAPI = None - def __init__(self): return - @classmethod - def late_import(cls): - """ - Load XenAPI module in for helper class - """ - xenapi_module = \ - FLAGS.xenapi_use_fake_session and 'nova.virt.xenapi.fake' or 'XenAPI' - from_list = \ - FLAGS.xenapi_use_fake_session and ['fake'] or [] - if cls.XenAPI is None: - cls.XenAPI = __import__(xenapi_module, - globals(), locals(), from_list, -1) - @classmethod @defer.inlineCallbacks def create_vm(cls, session, instance, kernel, ramdisk): diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 9a8db0ad4..c79245972 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -24,9 +24,11 @@ from twisted.internet import defer from nova import db from nova import context +from nova import flags from nova import exception from nova.auth.manager import AuthManager +from nova.virt.xenapi import load_sdk from nova.virt.xenapi.network_utils import NetworkHelper from nova.virt.xenapi.vm_utils import VMHelper @@ -36,10 +38,10 @@ class VMOps(object): Management class for VM-related tasks """ def __init__(self, session): - self.XenAPI = __import__('XenAPI') + self.XenAPI = load_sdk(flags.FLAGS) self._session = session # Load XenAPI module in the helper class - VMHelper.late_import() + VMHelper.late_import(flags.FLAGS) def list_instances(self): """ List VM instances """ diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 05143ed91..4482e465c 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -30,7 +30,7 @@ from nova import context from nova import flags from nova import process from nova import utils - +from nova.virt.xenapi import HelperBase FLAGS = flags.FLAGS @@ -41,29 +41,13 @@ class StorageError(Exception): super(StorageError, self).__init__(message) -class VolumeHelper(): +class VolumeHelper(HelperBase): """ The class that wraps the helper methods together. """ - - XenAPI = None - def __init__(self): return - @classmethod - def late_import(cls): - """ - Load XenAPI module in for helper class - """ - xenapi_module = \ - FLAGS.xenapi_use_fake_session and 'nova.virt.xenapi.fake' or 'XenAPI' - from_list = \ - FLAGS.xenapi_use_fake_session and ['fake'] or [] - if cls.XenAPI is None: - cls.XenAPI = __import__(xenapi_module, - globals(), locals(), from_list, -1) - @classmethod @utils.deferredToThread def create_iscsi_storage(cls, session, info, label, description): diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index b1afdc811..1b337a6ed 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -21,6 +21,8 @@ import logging from twisted.internet import defer +from nova import flags +from nova.virt.xenapi import load_sdk from nova.virt.xenapi.vm_utils import VMHelper from nova.virt.xenapi.volume_utils import VolumeHelper from nova.virt.xenapi.volume_utils import StorageError @@ -31,11 +33,11 @@ class VolumeOps(object): Management class for Volume-related tasks """ def __init__(self, session): - self.XenAPI = __import__('XenAPI') + self.XenAPI = load_sdk(flags.FLAGS) self._session = session # Load XenAPI module in the helper classes respectively - VolumeHelper.late_import() - VMHelper.late_import() + VolumeHelper.late_import(flags.FLAGS) + VMHelper.late_import(flags.FLAGS) @defer.inlineCallbacks def attach_volume(self, instance_name, device_path, mountpoint): diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index d8d21e24d..649d5dd04 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -59,6 +59,7 @@ from twisted.internet import reactor from nova import utils from nova import flags +from nova.virt.xenapi import load_sdk from nova.virt.xenapi.vmops import VMOps from nova.virt.xenapi.volumeops import VolumeOps @@ -91,7 +92,7 @@ flags.DEFINE_string('target_port', '3260', 'iSCSI Target Port, 3260 Default') flags.DEFINE_string('iqn_prefix', - 'iqn.2010-12.org.openstack', + 'iqn.2010-10.org.openstack', 'IQN Prefix') @@ -156,8 +157,11 @@ class XenAPISession(object): def __init__(self, url, user, pw): # This is loaded late so that there's no need to install this # library when not using XenAPI. - self.XenAPI = __import__('XenAPI') - self._session = self.XenAPI.Session(url) + self.XenAPI = load_sdk(FLAGS) + if FLAGS.xenapi_use_fake_session: + self._session = self.XenAPI.FakeSession(url) + else: + self._session = self.XenAPI.Session(url) self._session.login_with_password(user, pw) def get_xenapi(self): -- cgit From dada56794679b213b2d80e4e1f907a212b73f54e Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Wed, 15 Dec 2010 17:50:05 +0000 Subject: * code cleanup * revised unittest approach * added stubout and a number of tests --- nova/tests/xenapi_unittest.py | 136 ++++++++++++++++++++++++++++++++++++++---- nova/virt/xenapi/__init__.py | 22 +------ nova/virt/xenapi/fake.py | 40 +------------ nova/virt/xenapi/vmops.py | 6 +- nova/virt/xenapi/volumeops.py | 7 +-- nova/virt/xenapi_conn.py | 24 ++++---- 6 files changed, 146 insertions(+), 89 deletions(-) diff --git a/nova/tests/xenapi_unittest.py b/nova/tests/xenapi_unittest.py index c9be17327..b9955a946 100644 --- a/nova/tests/xenapi_unittest.py +++ b/nova/tests/xenapi_unittest.py @@ -31,7 +31,7 @@ # under the License. -import mox +import stubout import uuid from twisted.internet import defer @@ -51,20 +51,34 @@ from nova.virt.xenapi import fake from nova.virt.xenapi import volume_utils from nova.virt.xenapi import vm_utils from nova.virt.xenapi import volumeops +from boto.ec2.volume import Volume FLAGS = flags.FLAGS +def stubout_session(stubs, cls): + def fake_import(self): + fake_module = 'nova.virt.xenapi.fake' + from_list = ['fake'] + return __import__(fake_module, globals(), locals(), from_list, -1) + + stubs.Set(xenapi_conn.XenAPISession, '_create_session', + lambda s, url: cls(url)) + stubs.Set(xenapi_conn.XenAPISession, 'get_imported_xenapi', + fake_import) + + class XenAPIVolumeTestCase(test.TrialTestCase): """ - This uses Ewan's fake session approach + Unit tests for VM operations """ def setUp(self): super(XenAPIVolumeTestCase, self).setUp() - FLAGS.xenapi_use_fake_session = True + self.stubs = stubout.StubOutForTesting() FLAGS.target_host = '127.0.0.1' FLAGS.xenapi_connection_url = 'test_url' FLAGS.xenapi_connection_password = 'test_pass' + fake.reset() def _create_volume(self, size='0'): """Create a volume object.""" @@ -78,11 +92,12 @@ class XenAPIVolumeTestCase(test.TrialTestCase): vol['attach_status'] = "detached" return db.volume_create(context.get_admin_context(), vol) - def test_create_iscsi_storage_raise_no_exception(self): - fake.reset() + def test_create_iscsi_storage(self): + """ This shows how to test helper classes' methods """ + stubout_session(self.stubs, FakeSessionForVolumeTests) session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass') helper = volume_utils.VolumeHelper - helper.late_import(FLAGS) + helper.XenAPI = session.get_imported_xenapi() vol = self._create_volume() info = yield helper.parse_volume_info(vol['ec2_id'], '/dev/sdc') label = 'SR-%s' % vol['ec2_id'] @@ -93,8 +108,25 @@ class XenAPIVolumeTestCase(test.TrialTestCase): description) db.volume_destroy(context.get_admin_context(), vol['id']) + def test_parse_volume_info_raise_exception(self): + """ This shows how to test helper classes' methods """ + stubout_session(self.stubs, FakeSessionForVolumeTests) + session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass') + helper = volume_utils.VolumeHelper + helper.XenAPI = session.get_imported_xenapi() + vol = self._create_volume() + # oops, wrong mount point! + info = helper.parse_volume_info(vol['ec2_id'], '/dev/sd') + + def check(exc): + self.assertIsInstance(exc.value, volume_utils.StorageError) + + info.addErrback(check) + db.volume_destroy(context.get_admin_context(), vol['id']) + def test_attach_volume(self): - fake.reset() + """ This shows how to test Ops classes' methods """ + stubout_session(self.stubs, FakeSessionForVolumeTests) conn = xenapi_conn.get_connection(False) volume = self._create_volume() instance = FakeInstance(1, 'fake', 'fake', 1, 2, 3, @@ -116,13 +148,34 @@ class XenAPIVolumeTestCase(test.TrialTestCase): result.addCallback(check) return result + def test_attach_volume_raise_exception(self): + """ This shows how to test when exceptions are raised """ + stubout_session(self.stubs, FakeSessionForVolumeFailedTests) + conn = xenapi_conn.get_connection(False) + volume = self._create_volume() + instance = FakeInstance(1, 'fake', 'fake', 1, 2, 3, + 'm1.large', 'aa:bb:cc:dd:ee:ff') + fake.create_vm(instance.name, 'Running') + result = conn.attach_volume(instance.name, volume['ec2_id'], + '/dev/sdc') + + def check(exc): + if exc: + pass + else: + self.fail('Oops, no exception has been raised!') + + result.addErrback(check) + return result + def tearDown(self): super(XenAPIVolumeTestCase, self).tearDown() + self.stubs.UnsetAll() class XenAPIVMTestCase(test.TrialTestCase): """ - This uses Ewan's fake session approach + Unit tests for VM operations """ def setUp(self): super(XenAPIVMTestCase, self).setUp() @@ -131,19 +184,20 @@ class XenAPIVMTestCase(test.TrialTestCase): admin=True) self.project = self.manager.create_project('fake', 'fake', 'fake') self.network = utils.import_object(FLAGS.network_manager) - FLAGS.xenapi_use_fake_session = True + self.stubs = stubout.StubOutForTesting() FLAGS.xenapi_connection_url = 'test_url' FLAGS.xenapi_connection_password = 'test_pass' fake.reset() fake.create_network('fake', FLAGS.flat_network_bridge) def test_list_instances_0(self): + stubout_session(self.stubs, FakeSessionForVMTests) conn = xenapi_conn.get_connection(False) instances = conn.list_instances() self.assertEquals(instances, []) - #test_list_instances_0.skip = "E" def test_spawn(self): + stubout_session(self.stubs, FakeSessionForVMTests) conn = xenapi_conn.get_connection(False) instance = FakeInstance(1, self.project.id, self.user.id, 1, 2, 3, 'm1.large', 'aa:bb:cc:dd:ee:ff') @@ -186,6 +240,7 @@ class XenAPIVMTestCase(test.TrialTestCase): super(XenAPIVMTestCase, self).tearDown() self.manager.delete_project(self.project) self.manager.delete_user(self.user) + self.stubs.UnsetAll() class FakeInstance(): @@ -199,3 +254,64 @@ class FakeInstance(): self.ramdisk_id = ramdisk_id self.instance_type = instance_type self.mac_address = mac_address + + +class FakeSessionForVMTests(fake.SessionBase): + def __init__(self, uri): + super(FakeSessionForVMTests, self).__init__(uri) + + def network_get_all_records_where(self, _1, _2): + return self.xenapi.network.get_all_records() + + def host_call_plugin(self, _1, _2, _3, _4, _5): + return '' + + def VM_start(self, _1, ref, _2, _3): + vm = fake.get_record('VM', ref) + if vm['power_state'] != 'Halted': + raise fake.Failure(['VM_BAD_POWER_STATE', ref, 'Halted', + vm['power_state']]) + vm['power_state'] = 'Running' + + +class FakeSessionForVolumeTests(fake.SessionBase): + def __init__(self, uri): + super(FakeSessionForVolumeTests, self).__init__(uri) + + def VBD_plug(self, _1, _2): + #FIXME(armando):make proper plug + pass + + def PBD_unplug(self, _1, _2): + #FIXME(armando):make proper unplug + pass + + def SR_forget(self, _1, _2): + #FIXME(armando):make proper forget + pass + + def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, + _6, _7, _8, _9, _10, _11): + #FIXME(armando):make proper introduce + valid_vdi = False + refs = fake.get_all('VDI') + for ref in refs: + rec = fake.get_record('VDI', ref) + if rec['uuid'] == uuid: + valid_vdi = True + if not valid_vdi: + raise fake.Failure([['INVALID_VDI', 'session', self._session]]) + + +class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests): + def __init__(self, uri): + super(FakeSessionForVolumeFailedTests, self).__init__(uri) + + def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, + _6, _7, _8, _9, _10, _11): + # test failure + raise fake.Failure([['INVALID_VDI', 'session', self._session]]) + + def VBD_plug(self, _1, _2): + # test failure + raise fake.Failure([['INVALID_VBD', 'session', self._session]]) diff --git a/nova/virt/xenapi/__init__.py b/nova/virt/xenapi/__init__.py index 1a2903b98..c7038deae 100644 --- a/nova/virt/xenapi/__init__.py +++ b/nova/virt/xenapi/__init__.py @@ -20,31 +20,11 @@ """ -def load_sdk(flags): - """ - This method is used for loading the XenAPI SDK (fake or real) - """ - xenapi_module = \ - flags.xenapi_use_fake_session and 'nova.virt.xenapi.fake' or 'XenAPI' - from_list = \ - flags.xenapi_use_fake_session and ['fake'] or [] - - return __import__(xenapi_module, globals(), locals(), from_list, -1) - - class HelperBase(): """ - The class that wraps the helper methods together. + The base for helper classes. This adds the XenAPI class attribute """ XenAPI = None def __init__(self): return - - @classmethod - def late_import(cls, FLAGS): - """ - Load XenAPI module in for helper class - """ - if cls.XenAPI is None: - cls.XenAPI = load_sdk(FLAGS) diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index f5fea3cc2..038064e8e 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -154,7 +154,7 @@ class Failure(Exception): def __str__(self): try: return str(self.details) - except Exception, exn: + except Exception, exc: return "XenAPI Fake Failure: %s" % str(self.details) def _details_map(self): @@ -324,8 +324,8 @@ class SessionBase(object): try: task['result'] = self.xenapi_request(func, params[1:]) task['status'] = 'success' - except Failure, exn: - task['error_info'] = exn.details + except Failure, exc: + task['error_info'] = exc.details task['status'] = 'failed' task['finished'] = datetime.datetime.now() return task_ref @@ -372,37 +372,3 @@ class _Dispatcher: def __call__(self, *args): return self.__send(self.__name, args) - - -class FakeSession(SessionBase): - def __init__(self, uri): - super(FakeSession, self).__init__(uri) - - def network_get_all_records_where(self, _1, _2): - return self.xenapi.network.get_all_records() - - def host_call_plugin(self, _1, _2, _3, _4, _5): - return '' - - def VM_start(self, _1, ref, _2, _3): - vm = get_record('VM', ref) - if vm['power_state'] != 'Halted': - raise Failure(['VM_BAD_POWER_STATE', ref, 'Halted', - vm['power_state']]) - vm['power_state'] = 'Running' - - def VBD_plug(self, _1, _2): - #FIXME(armando):make proper plug - pass - - def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, - _6, _7, _8, _9, _10, _11): - #FIXME(armando):make proper introduce - valid_vdi = False - refs = get_all('VDI') - for ref in refs: - rec = get_record('VDI', ref) - if rec['uuid'] == uuid: - valid_vdi = True - if not valid_vdi: - raise Failure([['INVALID_VDI', 'session', self._session]]) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 36b8fecc2..abaa1f5f1 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -28,7 +28,6 @@ from nova import flags from nova import exception from nova.auth.manager import AuthManager -from nova.virt.xenapi import load_sdk from nova.virt.xenapi.network_utils import NetworkHelper from nova.virt.xenapi.vm_utils import VMHelper @@ -38,10 +37,9 @@ class VMOps(object): Management class for VM-related tasks """ def __init__(self, session): - self.XenAPI = load_sdk(flags.FLAGS) + self.XenAPI = session.get_imported_xenapi() self._session = session - # Load XenAPI module in the helper class - VMHelper.late_import(flags.FLAGS) + VMHelper.XenAPI = self.XenAPI def list_instances(self): """ List VM instances """ diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index 1b337a6ed..68806c4c2 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -22,7 +22,6 @@ import logging from twisted.internet import defer from nova import flags -from nova.virt.xenapi import load_sdk from nova.virt.xenapi.vm_utils import VMHelper from nova.virt.xenapi.volume_utils import VolumeHelper from nova.virt.xenapi.volume_utils import StorageError @@ -33,11 +32,11 @@ class VolumeOps(object): Management class for Volume-related tasks """ def __init__(self, session): - self.XenAPI = load_sdk(flags.FLAGS) + self.XenAPI = session.get_imported_xenapi() self._session = session # Load XenAPI module in the helper classes respectively - VolumeHelper.late_import(flags.FLAGS) - VMHelper.late_import(flags.FLAGS) + VolumeHelper.XenAPI = self.XenAPI + VMHelper.XenAPI = self.XenAPI @defer.inlineCallbacks def attach_volume(self, instance_name, device_path, mountpoint): diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 5f2b9c7c6..a7e3a6723 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -36,7 +36,6 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block. **Related Flags** -:xenapi_use_fake_session: To be set for unit testing :xenapi_connection_url: URL for connection to XenServer/Xen Cloud Platform. :xenapi_connection_username: Username for connection to XenServer/Xen Cloud Platform (default: root). @@ -59,15 +58,11 @@ from twisted.internet import reactor from nova import utils from nova import flags -from nova.virt.xenapi import load_sdk from nova.virt.xenapi.vmops import VMOps from nova.virt.xenapi.volumeops import VolumeOps FLAGS = flags.FLAGS -flags.DEFINE_boolean('xenapi_use_fake_session', - False, - 'Set to true in order to use the fake XenAPI SDK') flags.DEFINE_string('xenapi_connection_url', None, 'URL for connection to XenServer/Xen Cloud Platform.' @@ -159,15 +154,14 @@ class XenAPIConnection(object): class XenAPISession(object): """ The session to invoke XenAPI SDK calls """ def __init__(self, url, user, pw): - # This is loaded late so that there's no need to install this - # library when not using XenAPI. - self.XenAPI = load_sdk(FLAGS) - if FLAGS.xenapi_use_fake_session: - self._session = self.XenAPI.FakeSession(url) - else: - self._session = self.XenAPI.Session(url) + self.XenAPI = self.get_imported_xenapi() + self._session = self._create_session(url) self._session.login_with_password(user, pw) + def get_imported_xenapi(self): + """Stubout point. This can be replaced with a mock xenapi module.""" + return __import__('XenAPI') + def get_xenapi(self): """ Return the xenapi object """ return self._session.xenapi @@ -200,6 +194,10 @@ class XenAPISession(object): reactor.callLater(0, self._poll_task, task, d) return d + def _create_session(self, url): + """Stubout point. This can be replaced with a mock session.""" + return self.XenAPI.Session(url) + @utils.deferredToThread def _poll_task(self, task, deferred): """Poll the given XenAPI task, and fire the given Deferred if we @@ -220,7 +218,7 @@ class XenAPISession(object): error_info) deferred.errback(self.XenAPI.Failure(error_info)) #logging.debug('Polling task %s done.', task) - except self.XenAPI.Failure, exc: + except Exception, exc: logging.warn(exc) deferred.errback(exc) -- cgit From e893be0a8d32cf1eb2c91187b81a6febf90e5b7c Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Wed, 15 Dec 2010 18:28:00 +0000 Subject: Adding back in openssh-lpk schema, as keys will likely be stored in LDAP again. --- nova/auth/opendj.sh | 1 + nova/auth/openssh-lpk_openldap.schema | 19 +++++++++++++++++++ nova/auth/openssh-lpk_sun.schema | 10 ++++++++++ nova/auth/slap.sh | 1 + 4 files changed, 31 insertions(+) create mode 100644 nova/auth/openssh-lpk_openldap.schema create mode 100644 nova/auth/openssh-lpk_sun.schema diff --git a/nova/auth/opendj.sh b/nova/auth/opendj.sh index 9a9600342..1a280e5a8 100755 --- a/nova/auth/opendj.sh +++ b/nova/auth/opendj.sh @@ -30,6 +30,7 @@ fi abspath=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"` schemapath='/var/opendj/instance/config/schema' +cp $abspath/openssh-lpk_sun.schema $schemapath/97-openssh-lpk_sun.ldif cp $abspath/nova_sun.schema $schemapath/98-nova_sun.ldif chown opendj:opendj $schemapath/98-nova_sun.ldif diff --git a/nova/auth/openssh-lpk_openldap.schema b/nova/auth/openssh-lpk_openldap.schema new file mode 100644 index 000000000..93351da6d --- /dev/null +++ b/nova/auth/openssh-lpk_openldap.schema @@ -0,0 +1,19 @@ +# +# LDAP Public Key Patch schema for use with openssh-ldappubkey +# Author: Eric AUGE +# +# Based on the proposal of : Mark Ruijter +# + + +# octetString SYNTAX +attributetype ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey' + DESC 'MANDATORY: OpenSSH Public key' + EQUALITY octetStringMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 ) + +# printableString SYNTAX yes|no +objectclass ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY + DESC 'MANDATORY: OpenSSH LPK objectclass' + MAY ( sshPublicKey $ uid ) + ) diff --git a/nova/auth/openssh-lpk_sun.schema b/nova/auth/openssh-lpk_sun.schema new file mode 100644 index 000000000..5f52db3b6 --- /dev/null +++ b/nova/auth/openssh-lpk_sun.schema @@ -0,0 +1,10 @@ +# +# LDAP Public Key Patch schema for use with openssh-ldappubkey +# Author: Eric AUGE +# +# Schema for Sun Directory Server. +# Based on the original schema, modified by Stefan Fischer. +# +dn: cn=schema +attributeTypes: ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey' DESC 'MANDATORY: OpenSSH Public key' EQUALITY octetStringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 ) +objectClasses: ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY DESC 'MANDATORY: OpenSSH LPK objectclass' MAY ( sshPublicKey $ uid ) ) diff --git a/nova/auth/slap.sh b/nova/auth/slap.sh index 36c4ba37b..95c61dafd 100755 --- a/nova/auth/slap.sh +++ b/nova/auth/slap.sh @@ -21,6 +21,7 @@ apt-get install -y slapd ldap-utils python-ldap abspath=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"` +cp $abspath/openssh-lpk_openldap.schema /etc/ldap/schema/openssh-lpk_openldap.schema cp $abspath/nova_openldap.schema /etc/ldap/schema/nova.schema mv /etc/ldap/slapd.conf /etc/ldap/slapd.conf.orig -- cgit From a4db44b94e611798d57ad59f4d4dbb5fb00516db Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 16 Dec 2010 16:33:38 +0000 Subject: Removed FakeInstance and introduced stubout for DB. Code clean-up --- nova/tests/db/__init__.py | 33 +++++++++ nova/tests/db/fakes.py | 61 ++++++++++++++++ nova/tests/xenapi/__init__.py | 33 +++++++++ nova/tests/xenapi/stubs.py | 98 +++++++++++++++++++++++++ nova/tests/xenapi_unittest.py | 165 +++++++++++------------------------------- 5 files changed, 267 insertions(+), 123 deletions(-) create mode 100644 nova/tests/db/__init__.py create mode 100644 nova/tests/db/fakes.py create mode 100644 nova/tests/xenapi/__init__.py create mode 100644 nova/tests/xenapi/stubs.py diff --git a/nova/tests/db/__init__.py b/nova/tests/db/__init__.py new file mode 100644 index 000000000..a157d7592 --- /dev/null +++ b/nova/tests/db/__init__.py @@ -0,0 +1,33 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Stubouts, mocks and fixtures for the test suite""" diff --git a/nova/tests/db/fakes.py b/nova/tests/db/fakes.py new file mode 100644 index 000000000..b3fb56c69 --- /dev/null +++ b/nova/tests/db/fakes.py @@ -0,0 +1,61 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack, LLC +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Stubouts, mocks and fixtures for the test suite""" + +import time + +from nova import db +from nova import utils +from nova.compute import instance_types + + +def stub_out_db_instance_api(stubs): + """ Stubs out the db API for creating Instances """ + + class FakeInstance(object): + """ Stubs out the Instance model """ + def __init__(self, values): + self.values = values + + def __getattr__(self, name): + return self.values[name] + + def fake_create(values): + """ Stubs out the db.instance_create method """ + + type_data = instance_types.INSTANCE_TYPES[values['instance_type']] + + base_options = { + 'name': values['name'], + 'reservation_id': utils.generate_uid('r'), + 'image_id': values['image_id'], + 'kernel_id': values['kernel_id'], + 'ramdisk_id': values['ramdisk_id'], + 'state_description': 'scheduling', + 'user_id': values['user_id'], + 'project_id': values['project_id'], + 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), + 'instance_type': values['instance_type'], + 'memory_mb': type_data['memory_mb'], + 'mac_address': values['mac_address'], + 'vcpus': type_data['vcpus'], + 'local_gb': type_data['local_gb'], + } + return FakeInstance(base_options) + + stubs.Set(db, 'instance_create', fake_create) diff --git a/nova/tests/xenapi/__init__.py b/nova/tests/xenapi/__init__.py new file mode 100644 index 000000000..a157d7592 --- /dev/null +++ b/nova/tests/xenapi/__init__.py @@ -0,0 +1,33 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Stubouts, mocks and fixtures for the test suite""" diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py new file mode 100644 index 000000000..525189388 --- /dev/null +++ b/nova/tests/xenapi/stubs.py @@ -0,0 +1,98 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Stubouts, mocks and fixtures for the test suite""" + +from nova.virt import xenapi_conn +from nova.virt.xenapi import fake + + +def stubout_session(stubs, cls): + """ Stubs out two methods from XenAPISession """ + def fake_import(self): + """ Stubs out get_imported_xenapi of XenAPISession """ + fake_module = 'nova.virt.xenapi.fake' + from_list = ['fake'] + return __import__(fake_module, globals(), locals(), from_list, -1) + + stubs.Set(xenapi_conn.XenAPISession, '_create_session', + lambda s, url: cls(url)) + stubs.Set(xenapi_conn.XenAPISession, 'get_imported_xenapi', + fake_import) + + +class FakeSessionForVMTests(fake.SessionBase): + """ Stubs out a XenAPISession for VM tests """ + def __init__(self, uri): + super(FakeSessionForVMTests, self).__init__(uri) + + def network_get_all_records_where(self, _1, _2): + return self.xenapi.network.get_all_records() + + def host_call_plugin(self, _1, _2, _3, _4, _5): + return '' + + def VM_start(self, _1, ref, _2, _3): + vm = fake.get_record('VM', ref) + if vm['power_state'] != 'Halted': + raise fake.Failure(['VM_BAD_POWER_STATE', ref, 'Halted', + vm['power_state']]) + vm['power_state'] = 'Running' + + +class FakeSessionForVolumeTests(fake.SessionBase): + """ Stubs out a XenAPISession for Volume tests """ + def __init__(self, uri): + super(FakeSessionForVolumeTests, self).__init__(uri) + + def VBD_plug(self, _1, _2): + #FIXME(armando):make proper plug + pass + + def PBD_unplug(self, _1, _2): + #FIXME(armando):make proper unplug + pass + + def SR_forget(self, _1, _2): + #FIXME(armando):make proper forget + pass + + def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, + _6, _7, _8, _9, _10, _11): + #FIXME(armando):make proper introduce + valid_vdi = False + refs = fake.get_all('VDI') + for ref in refs: + rec = fake.get_record('VDI', ref) + if rec['uuid'] == uuid: + valid_vdi = True + if not valid_vdi: + raise fake.Failure([['INVALID_VDI', 'session', self._session]]) + + +class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests): + """ Stubs out a XenAPISession for Volume tests: it injects failures """ + def __init__(self, uri): + super(FakeSessionForVolumeFailedTests, self).__init__(uri) + + def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, + _6, _7, _8, _9, _10, _11): + # This is for testing failure + raise fake.Failure([['INVALID_VDI', 'session', self._session]]) + + def VBD_plug(self, _1, _2): + # This is for testing failure + raise fake.Failure([['INVALID_VBD', 'session', self._session]]) diff --git a/nova/tests/xenapi_unittest.py b/nova/tests/xenapi_unittest.py index b9955a946..c2612a4c5 100644 --- a/nova/tests/xenapi_unittest.py +++ b/nova/tests/xenapi_unittest.py @@ -1,21 +1,5 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright (c) 2010 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# # Copyright (c) 2010 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -30,16 +14,14 @@ # License for the specific language governing permissions and limitations # under the License. +""" +Test suite for XenAPI +""" import stubout -import uuid - -from twisted.internet import defer -from twisted.internet import threads from nova import db from nova import context -from nova import exception from nova import flags from nova import test from nova import utils @@ -49,28 +31,15 @@ from nova.compute import power_state from nova.virt import xenapi_conn from nova.virt.xenapi import fake from nova.virt.xenapi import volume_utils -from nova.virt.xenapi import vm_utils -from nova.virt.xenapi import volumeops -from boto.ec2.volume import Volume +from nova.tests.db import fakes +from nova.tests.xenapi import stubs FLAGS = flags.FLAGS -def stubout_session(stubs, cls): - def fake_import(self): - fake_module = 'nova.virt.xenapi.fake' - from_list = ['fake'] - return __import__(fake_module, globals(), locals(), from_list, -1) - - stubs.Set(xenapi_conn.XenAPISession, '_create_session', - lambda s, url: cls(url)) - stubs.Set(xenapi_conn.XenAPISession, 'get_imported_xenapi', - fake_import) - - class XenAPIVolumeTestCase(test.TrialTestCase): """ - Unit tests for VM operations + Unit tests for Volume operations """ def setUp(self): super(XenAPIVolumeTestCase, self).setUp() @@ -78,7 +47,17 @@ class XenAPIVolumeTestCase(test.TrialTestCase): FLAGS.target_host = '127.0.0.1' FLAGS.xenapi_connection_url = 'test_url' FLAGS.xenapi_connection_password = 'test_pass' + fakes.stub_out_db_instance_api(self.stubs) fake.reset() + self.values = {'name': 1, + 'project_id': 'fake', + 'user_id': 'fake', + 'image_id': 1, + 'kernel_id': 2, + 'ramdisk_id': 3, + 'instance_type': 'm1.large', + 'mac_address': 'aa:bb:cc:dd:ee:ff', + } def _create_volume(self, size='0'): """Create a volume object.""" @@ -94,7 +73,7 @@ class XenAPIVolumeTestCase(test.TrialTestCase): def test_create_iscsi_storage(self): """ This shows how to test helper classes' methods """ - stubout_session(self.stubs, FakeSessionForVolumeTests) + stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests) session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass') helper = volume_utils.VolumeHelper helper.XenAPI = session.get_imported_xenapi() @@ -106,11 +85,13 @@ class XenAPIVolumeTestCase(test.TrialTestCase): info, label, description) + srs = fake.get_all('SR') + self.assertEqual(sr_ref, srs[0]) db.volume_destroy(context.get_admin_context(), vol['id']) def test_parse_volume_info_raise_exception(self): """ This shows how to test helper classes' methods """ - stubout_session(self.stubs, FakeSessionForVolumeTests) + stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests) session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass') helper = volume_utils.VolumeHelper helper.XenAPI = session.get_imported_xenapi() @@ -119,6 +100,7 @@ class XenAPIVolumeTestCase(test.TrialTestCase): info = helper.parse_volume_info(vol['ec2_id'], '/dev/sd') def check(exc): + """ handler """ self.assertIsInstance(exc.value, volume_utils.StorageError) info.addErrback(check) @@ -126,16 +108,16 @@ class XenAPIVolumeTestCase(test.TrialTestCase): def test_attach_volume(self): """ This shows how to test Ops classes' methods """ - stubout_session(self.stubs, FakeSessionForVolumeTests) + stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests) conn = xenapi_conn.get_connection(False) volume = self._create_volume() - instance = FakeInstance(1, 'fake', 'fake', 1, 2, 3, - 'm1.large', 'aa:bb:cc:dd:ee:ff') + instance = db.instance_create(self.values) fake.create_vm(instance.name, 'Running') result = conn.attach_volume(instance.name, volume['ec2_id'], '/dev/sdc') def check(_): + """ handler """ # check that the VM has a VBD attached to it # Get XenAPI reference for the VM vms = fake.get_all('VM') @@ -150,16 +132,17 @@ class XenAPIVolumeTestCase(test.TrialTestCase): def test_attach_volume_raise_exception(self): """ This shows how to test when exceptions are raised """ - stubout_session(self.stubs, FakeSessionForVolumeFailedTests) + stubs.stubout_session(self.stubs, + stubs.FakeSessionForVolumeFailedTests) conn = xenapi_conn.get_connection(False) volume = self._create_volume() - instance = FakeInstance(1, 'fake', 'fake', 1, 2, 3, - 'm1.large', 'aa:bb:cc:dd:ee:ff') + instance = db.instance_create(self.values) fake.create_vm(instance.name, 'Running') result = conn.attach_volume(instance.name, volume['ec2_id'], '/dev/sdc') def check(exc): + """ handler """ if exc: pass else: @@ -188,22 +171,32 @@ class XenAPIVMTestCase(test.TrialTestCase): FLAGS.xenapi_connection_url = 'test_url' FLAGS.xenapi_connection_password = 'test_pass' fake.reset() + fakes.stub_out_db_instance_api(self.stubs) fake.create_network('fake', FLAGS.flat_network_bridge) def test_list_instances_0(self): - stubout_session(self.stubs, FakeSessionForVMTests) + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) conn = xenapi_conn.get_connection(False) instances = conn.list_instances() self.assertEquals(instances, []) def test_spawn(self): - stubout_session(self.stubs, FakeSessionForVMTests) + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + values = {'name': 1, + 'project_id': self.project.id, + 'user_id': self.user.id, + 'image_id': 1, + 'kernel_id': 2, + 'ramdisk_id': 3, + 'instance_type': 'm1.large', + 'mac_address': 'aa:bb:cc:dd:ee:ff', + } conn = xenapi_conn.get_connection(False) - instance = FakeInstance(1, self.project.id, self.user.id, 1, 2, 3, - 'm1.large', 'aa:bb:cc:dd:ee:ff') + instance = db.instance_create(values) result = conn.spawn(instance) def check(_): + """ handler """ instances = conn.list_instances() self.assertEquals(instances, [1]) @@ -241,77 +234,3 @@ class XenAPIVMTestCase(test.TrialTestCase): self.manager.delete_project(self.project) self.manager.delete_user(self.user) self.stubs.UnsetAll() - - -class FakeInstance(): - def __init__(self, name, project_id, user_id, image_id, kernel_id, - ramdisk_id, instance_type, mac_address): - self.name = name - self.project_id = project_id - self.user_id = user_id - self.image_id = image_id - self.kernel_id = kernel_id - self.ramdisk_id = ramdisk_id - self.instance_type = instance_type - self.mac_address = mac_address - - -class FakeSessionForVMTests(fake.SessionBase): - def __init__(self, uri): - super(FakeSessionForVMTests, self).__init__(uri) - - def network_get_all_records_where(self, _1, _2): - return self.xenapi.network.get_all_records() - - def host_call_plugin(self, _1, _2, _3, _4, _5): - return '' - - def VM_start(self, _1, ref, _2, _3): - vm = fake.get_record('VM', ref) - if vm['power_state'] != 'Halted': - raise fake.Failure(['VM_BAD_POWER_STATE', ref, 'Halted', - vm['power_state']]) - vm['power_state'] = 'Running' - - -class FakeSessionForVolumeTests(fake.SessionBase): - def __init__(self, uri): - super(FakeSessionForVolumeTests, self).__init__(uri) - - def VBD_plug(self, _1, _2): - #FIXME(armando):make proper plug - pass - - def PBD_unplug(self, _1, _2): - #FIXME(armando):make proper unplug - pass - - def SR_forget(self, _1, _2): - #FIXME(armando):make proper forget - pass - - def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, - _6, _7, _8, _9, _10, _11): - #FIXME(armando):make proper introduce - valid_vdi = False - refs = fake.get_all('VDI') - for ref in refs: - rec = fake.get_record('VDI', ref) - if rec['uuid'] == uuid: - valid_vdi = True - if not valid_vdi: - raise fake.Failure([['INVALID_VDI', 'session', self._session]]) - - -class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests): - def __init__(self, uri): - super(FakeSessionForVolumeFailedTests, self).__init__(uri) - - def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, - _6, _7, _8, _9, _10, _11): - # test failure - raise fake.Failure([['INVALID_VDI', 'session', self._session]]) - - def VBD_plug(self, _1, _2): - # test failure - raise fake.Failure([['INVALID_VBD', 'session', self._session]]) -- cgit From 8152acf7c3df83a04591fdafb21201965da7bfad Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 16 Dec 2010 17:47:48 +0000 Subject: fake session clean-up --- nova/tests/db/__init__.py | 16 ---------------- nova/tests/xenapi/__init__.py | 16 ---------------- nova/virt/xenapi/fake.py | 34 ++++++++++++++++++++++------------ 3 files changed, 22 insertions(+), 44 deletions(-) diff --git a/nova/tests/db/__init__.py b/nova/tests/db/__init__.py index a157d7592..dcd81b743 100644 --- a/nova/tests/db/__init__.py +++ b/nova/tests/db/__init__.py @@ -1,21 +1,5 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright (c) 2010 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# # Copyright (c) 2010 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/nova/tests/xenapi/__init__.py b/nova/tests/xenapi/__init__.py index a157d7592..dcd81b743 100644 --- a/nova/tests/xenapi/__init__.py +++ b/nova/tests/xenapi/__init__.py @@ -1,21 +1,5 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright (c) 2010 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# # Copyright (c) 2010 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 038064e8e..a46f3fd80 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -105,9 +105,10 @@ def create_vdi(name_label, read_only, sr_ref, sharable): }) -def create_pbd(config, attached): +def create_pbd(config, sr_ref, attached): return _create_object('PBD', { 'device-config': config, + 'SR': sr_ref, 'currently-attached': attached, }) @@ -126,6 +127,21 @@ def _create_object(table, obj): return ref +def _create_sr(table, obj): + sr_type = obj[6] + # Forces fake to support iscsi only + if sr_type != 'iscsi': + raise Failure(['SR_UNKNOWN_DRIVER', sr_type]) + sr_ref = _create_object(table, obj[2]) + vdi_ref = create_vdi('', False, sr_ref, False) + pbd_ref = create_pbd('', sr_ref, True) + _db_content['SR'][sr_ref]['VDIs'] = [vdi_ref] + _db_content['SR'][sr_ref]['PBDs'] = [pbd_ref] + _db_content['VDI'][vdi_ref]['SR'] = sr_ref + _db_content['PBD'][pbd_ref]['SR'] = sr_ref + return sr_ref + + def get_all(table): return _db_content[table].keys() @@ -296,19 +312,13 @@ class SessionBase(object): def _create(self, name, params): self._check_session(params) - expected = 2 - if name == 'SR.create': - expected = 10 + is_sr_create = name == 'SR.create' + # Storage Repositories have a different API + expected = is_sr_create and 10 or 2 self._check_arg_count(params, expected) (cls, _) = name.split('.') - if name == 'SR.create': - vdi_ref = create_vdi('', False, '', False) - pbd_ref = create_pbd('', True) - params[2]['VDIs'] = [vdi_ref] - params[2]['PBDs'] = [pbd_ref] - ref = _create_object(cls, params[2]) - else: - ref = _create_object(cls, params[1]) + ref = is_sr_create and \ + _create_sr(cls, params) or _create_object(cls, params[1]) obj = get_record(cls, ref) # Add RO fields -- cgit From db96fd559d28bcfdf8cc29d79b9afca6dea1cfb7 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 16 Dec 2010 18:44:42 +0000 Subject: reviewed the FIXMEs, and spotted an uncaught exception in volume_utils...yay! --- nova/tests/virt_unittest.py | 2 -- nova/tests/xenapi/stubs.py | 24 +++++++++--------------- nova/tests/xenapi_unittest.py | 5 ++--- nova/virt/xenapi/fake.py | 1 + nova/virt/xenapi/volume_utils.py | 29 +++++++++++++++++------------ 5 files changed, 29 insertions(+), 32 deletions(-) diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py index 52843b703..d49383fb7 100644 --- a/nova/tests/virt_unittest.py +++ b/nova/tests/virt_unittest.py @@ -25,8 +25,6 @@ from nova import utils from nova.api.ec2 import cloud from nova.auth import manager from nova.virt import libvirt_conn -from nova.virt.xenapi import fake -from nova.virt.xenapi import volume_utils FLAGS = flags.FLAGS flags.DECLARE('instances_path', 'nova.compute.manager') diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 525189388..11dd535d4 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -58,21 +58,12 @@ class FakeSessionForVolumeTests(fake.SessionBase): def __init__(self, uri): super(FakeSessionForVolumeTests, self).__init__(uri) - def VBD_plug(self, _1, _2): - #FIXME(armando):make proper plug - pass - - def PBD_unplug(self, _1, _2): - #FIXME(armando):make proper unplug - pass - - def SR_forget(self, _1, _2): - #FIXME(armando):make proper forget - pass + def VBD_plug(self, _1, ref): + rec = fake.get_record('VBD', ref) + rec['currently-attached'] = True def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11): - #FIXME(armando):make proper introduce valid_vdi = False refs = fake.get_all('VDI') for ref in refs: @@ -93,6 +84,9 @@ class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests): # This is for testing failure raise fake.Failure([['INVALID_VDI', 'session', self._session]]) - def VBD_plug(self, _1, _2): - # This is for testing failure - raise fake.Failure([['INVALID_VBD', 'session', self._session]]) + def PBD_unplug(self, _1, ref): + rec = fake.get_record('PBD', ref) + rec['currently-attached'] = False + + def SR_forget(self, _1, ref): + pass diff --git a/nova/tests/xenapi_unittest.py b/nova/tests/xenapi_unittest.py index c2612a4c5..839d6aa44 100644 --- a/nova/tests/xenapi_unittest.py +++ b/nova/tests/xenapi_unittest.py @@ -141,14 +141,13 @@ class XenAPIVolumeTestCase(test.TrialTestCase): result = conn.attach_volume(instance.name, volume['ec2_id'], '/dev/sdc') - def check(exc): + def check_exception(exc): """ handler """ if exc: pass else: self.fail('Oops, no exception has been raised!') - - result.addErrback(check) + result.addErrback(check_exception) return result def tearDown(self): diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index a46f3fd80..7877b5905 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -102,6 +102,7 @@ def create_vdi(name_label, read_only, sr_ref, sharable): 'location': '', 'xenstore_data': '', 'sm_config': {}, + 'VBDs': {}, }) diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 4482e465c..8d1d6fb81 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -152,18 +152,23 @@ class VolumeHelper(HelperBase): logging.warn(exc) raise StorageError('Unable to get record of VDI %s on' % vdis[0]) else: - return session.get_xenapi().VDI.introduce( - vdi_rec['uuid'], - vdi_rec['name_label'], - vdi_rec['name_description'], - vdi_rec['SR'], - vdi_rec['type'], - vdi_rec['sharable'], - vdi_rec['read_only'], - vdi_rec['other_config'], - vdi_rec['location'], - vdi_rec['xenstore_data'], - vdi_rec['sm_config']) + try: + vdi_ref = session.get_xenapi().VDI.introduce( + vdi_rec['uuid'], + vdi_rec['name_label'], + vdi_rec['name_description'], + vdi_rec['SR'], + vdi_rec['type'], + vdi_rec['sharable'], + vdi_rec['read_only'], + vdi_rec['other_config'], + vdi_rec['location'], + vdi_rec['xenstore_data'], + vdi_rec['sm_config']) + except cls.XenAPI.Failure, exc: + logging.warn(exc) + raise StorageError('Unable to introduce VDI for SR %s' + % sr_ref) @classmethod @defer.inlineCallbacks -- cgit From ca81b0c12a3853942e9ce85154c38dad381ead0e Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Sat, 18 Dec 2010 00:50:49 +0000 Subject: fixed unittests and further clean-up post-eventlet merge --- nova/tests/xenapi_unittest.py | 48 +++++++++++++-------------------------- nova/virt/xenapi/__init__.py | 2 +- nova/virt/xenapi/network_utils.py | 2 +- nova/virt/xenapi/vm_utils.py | 3 +-- nova/virt/xenapi/vmops.py | 8 ++++--- nova/virt/xenapi/volume_utils.py | 2 +- nova/virt/xenapi/volumeops.py | 1 - nova/virt/xenapi_conn.py | 8 +++---- 8 files changed, 29 insertions(+), 45 deletions(-) diff --git a/nova/tests/xenapi_unittest.py b/nova/tests/xenapi_unittest.py index 839d6aa44..74401ce5d 100644 --- a/nova/tests/xenapi_unittest.py +++ b/nova/tests/xenapi_unittest.py @@ -78,13 +78,10 @@ class XenAPIVolumeTestCase(test.TrialTestCase): helper = volume_utils.VolumeHelper helper.XenAPI = session.get_imported_xenapi() vol = self._create_volume() - info = yield helper.parse_volume_info(vol['ec2_id'], '/dev/sdc') + info = helper.parse_volume_info(vol['ec2_id'], '/dev/sdc') label = 'SR-%s' % vol['ec2_id'] description = 'Test-SR' - sr_ref = helper.create_iscsi_storage_blocking(session, - info, - label, - description) + sr_ref = helper.create_iscsi_storage(session, info, label, description) srs = fake.get_all('SR') self.assertEqual(sr_ref, srs[0]) db.volume_destroy(context.get_admin_context(), vol['id']) @@ -97,13 +94,10 @@ class XenAPIVolumeTestCase(test.TrialTestCase): helper.XenAPI = session.get_imported_xenapi() vol = self._create_volume() # oops, wrong mount point! - info = helper.parse_volume_info(vol['ec2_id'], '/dev/sd') - - def check(exc): - """ handler """ - self.assertIsInstance(exc.value, volume_utils.StorageError) - - info.addErrback(check) + self.assertRaises(volume_utils.StorageError, + helper.parse_volume_info, + vol['ec2_id'], + '/dev/sd') db.volume_destroy(context.get_admin_context(), vol['id']) def test_attach_volume(self): @@ -116,8 +110,7 @@ class XenAPIVolumeTestCase(test.TrialTestCase): result = conn.attach_volume(instance.name, volume['ec2_id'], '/dev/sdc') - def check(_): - """ handler """ + def check(): # check that the VM has a VBD attached to it # Get XenAPI reference for the VM vms = fake.get_all('VM') @@ -127,8 +120,7 @@ class XenAPIVolumeTestCase(test.TrialTestCase): vm_ref = vbd['VM'] self.assertEqual(vm_ref, vms[0]) - result.addCallback(check) - return result + check() def test_attach_volume_raise_exception(self): """ This shows how to test when exceptions are raised """ @@ -138,17 +130,11 @@ class XenAPIVolumeTestCase(test.TrialTestCase): volume = self._create_volume() instance = db.instance_create(self.values) fake.create_vm(instance.name, 'Running') - result = conn.attach_volume(instance.name, volume['ec2_id'], - '/dev/sdc') - - def check_exception(exc): - """ handler """ - if exc: - pass - else: - self.fail('Oops, no exception has been raised!') - result.addErrback(check_exception) - return result + self.assertRaises(Exception, + conn.attach_volume, + instance.name, + volume['ec2_id'], + '/dev/sdc') def tearDown(self): super(XenAPIVolumeTestCase, self).tearDown() @@ -192,10 +178,9 @@ class XenAPIVMTestCase(test.TrialTestCase): } conn = xenapi_conn.get_connection(False) instance = db.instance_create(values) - result = conn.spawn(instance) + conn.spawn(instance) - def check(_): - """ handler """ + def check(): instances = conn.list_instances() self.assertEquals(instances, [1]) @@ -225,8 +210,7 @@ class XenAPIVMTestCase(test.TrialTestCase): # Check that the VM is running according to XenAPI. self.assertEquals(vm['power_state'], 'Running') - result.addCallback(check) - return result + check() def tearDown(self): super(XenAPIVMTestCase, self).tearDown() diff --git a/nova/virt/xenapi/__init__.py b/nova/virt/xenapi/__init__.py index c7038deae..c75162f08 100644 --- a/nova/virt/xenapi/__init__.py +++ b/nova/virt/xenapi/__init__.py @@ -20,7 +20,7 @@ """ -class HelperBase(): +class HelperBase(object): """ The base for helper classes. This adds the XenAPI class attribute """ diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py index c292383b6..e783120fe 100644 --- a/nova/virt/xenapi/network_utils.py +++ b/nova/virt/xenapi/network_utils.py @@ -29,7 +29,7 @@ class NetworkHelper(HelperBase): The class that wraps the helper methods together. """ def __init__(self): - return + super(NetworkHelper, self).__init__() @classmethod def find_network_with_bridge(cls, session, bridge): diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 57d419773..911fcc9b2 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -24,7 +24,6 @@ import urllib from xml.dom import minidom from nova import flags -from nova import utils from nova.auth.manager import AuthManager from nova.compute import instance_types from nova.compute import power_state @@ -48,7 +47,7 @@ class VMHelper(HelperBase): The class that wraps the helper methods together. """ def __init__(self): - return + super(VMHelper, self).__init__() @classmethod def create_vm(cls, session, instance, kernel, ramdisk): diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 0be4ed07d..aa3a3ae53 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -22,7 +22,6 @@ import logging from nova import db from nova import context -from nova import flags from nova import exception from nova import utils @@ -78,6 +77,7 @@ class VMOps(object): logging.info('Spawning VM %s created %s.', instance.name, vm_ref) + # NOTE(armando): Do we really need to do this in virt? timer = utils.LoopingCall(f=None) def _wait_for_boot(): @@ -88,7 +88,8 @@ class VMOps(object): if state == power_state.RUNNING: logging.debug('Instance %s: booted', instance['name']) timer.stop() - except: + except Exception, exc: + logging.warn(exc) logging.exception('instance %s: failed to boot', instance['name']) db.instance_set_state(context.get_admin_context(), @@ -131,6 +132,7 @@ class VMOps(object): self._session.wait_for_task(task) except self.XenAPI.Failure, exc: logging.warn(exc) + # VM Destroy try: task = self._session.call_xenapi('Async.VM.destroy', vm) self._session.wait_for_task(task) @@ -149,7 +151,7 @@ class VMOps(object): """Return data about VM diagnostics""" vm = VMHelper.lookup(self._session, instance_id) if vm is None: - raise exception.Exception("Instance not found %s" % instance_id) + raise exception.NotFound("Instance not found %s" % instance_id) rec = self._session.get_xenapi().VM.get_record(vm) return VMHelper.compile_diagnostics(self._session, rec) diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 255f23d88..5366078ce 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -43,7 +43,7 @@ class VolumeHelper(HelperBase): The class that wraps the helper methods together. """ def __init__(self): - return + super(VolumeHelper, self).__init__() @classmethod def create_iscsi_storage(cls, session, info, label, description): diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index 6c7516073..9dbb1bb14 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -19,7 +19,6 @@ Management class for Storage-related functions (attach, detach, etc). """ import logging -from nova import flags from nova.virt.xenapi.vm_utils import VMHelper from nova.virt.xenapi.volume_utils import VolumeHelper from nova.virt.xenapi.volume_utils import StorageError diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 207222744..2a8614cfd 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -119,11 +119,11 @@ class XenAPIConnection(object): def spawn(self, instance): """ Create VM instance """ - return self._vmops.spawn(instance) + self._vmops.spawn(instance) def reboot(self, instance): """ Reboot VM instance """ - return self._vmops.reboot(instance) + self._vmops.reboot(instance) def destroy(self, instance): """ Destroy VM instance """ @@ -143,13 +143,13 @@ class XenAPIConnection(object): def attach_volume(self, instance_name, device_path, mountpoint): """ Attach volume storage to VM instance """ - return self._volumeops.attach_volume(instance_name, + self._volumeops.attach_volume(instance_name, device_path, mountpoint) def detach_volume(self, instance_name, mountpoint): """ Detach volume storage to VM instance """ - return self._volumeops.detach_volume(instance_name, mountpoint) + self._volumeops.detach_volume(instance_name, mountpoint) class XenAPISession(object): -- cgit From ca1017988f98a246aa82c16f471791c7ea3eceec Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sun, 19 Dec 2010 20:05:41 +0000 Subject: Support proxying api by using X-Forwarded-For --- nova/api/ec2/__init__.py | 8 +++++++- nova/api/ec2/metadatarequesthandler.py | 11 +++++++++-- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index a6ee16c33..c455144a9 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -37,6 +37,9 @@ from nova.auth import manager FLAGS = flags.FLAGS +flags.DEFINE_boolean('use_forwarded_for', False, + 'Treat X-Forwarded-For as the canonical remote address. ' + 'Only enable this if you have a sanitizing proxy.') _log = logging.getLogger("api") _log.setLevel(logging.DEBUG) @@ -81,9 +84,12 @@ class Authenticate(wsgi.Middleware): raise webob.exc.HTTPForbidden() # Authenticated! + remote_address = req.remote_addr + if FLAGS.use_forwarded_for: + remote_address = req.headers.get('X-Forwarded-For', remote_address) ctxt = context.RequestContext(user=user, project=project, - remote_address=req.remote_addr) + remote_address=remote_address) req.environ['ec2.context'] = ctxt return self.application diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py index 2f4f414cc..66439a0df 100644 --- a/nova/api/ec2/metadatarequesthandler.py +++ b/nova/api/ec2/metadatarequesthandler.py @@ -23,9 +23,13 @@ import logging import webob.dec import webob.exc +from nova import flags from nova.api.ec2 import cloud +FLAGS = flags.FLAGS + + class MetadataRequestHandler(object): """Serve metadata from the EC2 API.""" @@ -63,10 +67,13 @@ class MetadataRequestHandler(object): @webob.dec.wsgify def __call__(self, req): cc = cloud.CloudController() - meta_data = cc.get_metadata(req.remote_addr) + remote_address = req.remote_addr + if FLAGS.use_forwarded_for: + remote_address = req.headers.get('X-Forwarded-For', remote_address) + meta_data = cc.get_metadata(remote_address) if meta_data is None: logging.error('Failed to get metadata for ip: %s' % - req.remote_addr) + remote_address) raise webob.exc.HTTPNotFound() data = self.lookup(req.path_info, meta_data) if data is None: -- cgit From f0195ebfd2cc56cee5797fff19fb9702c51df51b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 21 Dec 2010 21:47:13 +0000 Subject: fix reboot command to work even if a host is rebooted --- nova/compute/manager.py | 4 +++- nova/virt/libvirt_conn.py | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index a84af6bb9..6d3ea966d 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -138,8 +138,8 @@ class ComputeManager(manager.Manager): def reboot_instance(self, context, instance_id): """Reboot an instance on this server.""" context = context.elevated() - instance_ref = self.db.instance_get(context, instance_id) self._update_state(context, instance_id) + instance_ref = self.db.instance_get(context, instance_id) if instance_ref['state'] != power_state.RUNNING: logging.warn('trying to reboot a non-running ' @@ -153,6 +153,7 @@ class ComputeManager(manager.Manager): instance_id, power_state.NOSTATE, 'rebooting') + self.network_manager.setup_compute_network(context, instance_id) self.driver.reboot(instance_ref) self._update_state(context, instance_id) @@ -168,6 +169,7 @@ class ComputeManager(manager.Manager): instance_id, power_state.NOSTATE, 'rescuing') + self.network_manager.setup_compute_network(context, instance_id) self.driver.rescue(instance_ref) self._update_state(context, instance_id) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index ad101db2a..845167d9f 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -40,6 +40,7 @@ import logging import os import shutil +from eventlet import greenthread from eventlet import event from eventlet import tpool @@ -183,7 +184,8 @@ class LibvirtConnection(object): # everything has been vetted a bit def _wait_for_timer(): timer_done.wait() - self._cleanup(instance) + if cleanup: + self._cleanup(instance) done.send() greenthread.spawn(_wait_for_timer) -- cgit From a7e5a4a39b93b32974ca82b77391368c4f01cdd8 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 22 Dec 2010 22:54:43 +0000 Subject: removed extra files --- nova/virt/libvirt.rescue.qemu.xml.template.THIS | 38 ------------------------- nova/virt/libvirt.rescue.uml.xml.template.THIS | 31 -------------------- 2 files changed, 69 deletions(-) delete mode 100644 nova/virt/libvirt.rescue.qemu.xml.template.THIS delete mode 100644 nova/virt/libvirt.rescue.uml.xml.template.THIS diff --git a/nova/virt/libvirt.rescue.qemu.xml.template.THIS b/nova/virt/libvirt.rescue.qemu.xml.template.THIS deleted file mode 100644 index a3b88106c..000000000 --- a/nova/virt/libvirt.rescue.qemu.xml.template.THIS +++ /dev/null @@ -1,38 +0,0 @@ - - %(name)s - - hvm - %(basepath)s/rescue-kernel - %(basepath)s/rescue-ramdisk - root=/dev/vda1 console=ttyS0 - - - - - %(memory_kb)s - %(vcpus)s - - - - - - - - - - - - - - - - - %(extra_params)s - - - - - - - - diff --git a/nova/virt/libvirt.rescue.uml.xml.template.THIS b/nova/virt/libvirt.rescue.uml.xml.template.THIS deleted file mode 100644 index a254692d4..000000000 --- a/nova/virt/libvirt.rescue.uml.xml.template.THIS +++ /dev/null @@ -1,31 +0,0 @@ - - %(name)s - %(memory_kb)s - - %(type)s - /usr/bin/linux - /dev/ubda1 - - - - - - - - - - - - - - - - - %(extra_params)s - - - - - - - -- cgit From 93dcd9088108b17c44337f556d0c624e802ba788 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 23 Dec 2010 01:32:57 +0000 Subject: fix commits from Anthony and Vish that were committed with the wrong email --- .mailmap | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.mailmap b/.mailmap index 8041e2341..c01f964d2 100644 --- a/.mailmap +++ b/.mailmap @@ -24,7 +24,7 @@ + + # These are from people who failed to set a proper committer -. . -. -- cgit From 588b39981d03c61c7b4eafdb489467ab57540d13 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 23 Dec 2010 02:03:39 +0000 Subject: Add Ryan Lane as well --- .mailmap | 3 +-- Authors | 1 + 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.mailmap b/.mailmap index c01f964d2..9ab7db743 100644 --- a/.mailmap +++ b/.mailmap @@ -26,5 +26,4 @@ -# These are from people who failed to set a proper committer -. + diff --git a/Authors b/Authors index 0b048becb..250b3b2ad 100644 --- a/Authors +++ b/Authors @@ -24,6 +24,7 @@ Michael Gundlach Monty Taylor Paul Voccio Rick Clark +Ryan Lane Ryan Lucio Sandy Walsh Soren Hansen -- cgit