From 9ca0b3435d93a87407ca42a853562cd06aaa896e Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 22 Nov 2010 12:57:03 +0000 Subject: added placeholders --- nova/virt/xenapi.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index 0f563aa41..4ed5d047f 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -296,6 +296,14 @@ class XenAPIConnection(object): yield self._wait_for_task(task) except Exception, exc: logging.warn(exc) + + @defer.inlineCallbacks + def attach_volume(self, instance_name, device_path, mountpoint): + return True + + @defer.inlineCallbacks + def detach_volume(self, instance_name, mountpoint): + return True def get_info(self, instance_id): vm = self._lookup_blocking(instance_id) -- cgit From 9f722a0bcdb987c228f4ebf1e42c904a26d0ef73 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 25 Nov 2010 10:42:06 +0000 Subject: first cut of changes for the attach_volume call --- nova/virt/xenapi.py | 94 +++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 87 insertions(+), 7 deletions(-) diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index 4ed5d047f..ec5e7456a 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -63,6 +63,9 @@ from nova.compute import instance_types from nova.compute import power_state from nova.virt import images +from xml.dom.minidom import parseString + + XenAPI = None @@ -90,7 +93,7 @@ XENAPI_POWER_STATE = { 'Halted': power_state.SHUTDOWN, 'Running': power_state.RUNNING, 'Paused': power_state.PAUSED, - 'Suspended': power_state.SHUTDOWN, # FIXME + 'Suspended': power_state.SHUTDOWN, # FIXME 'Crashed': power_state.CRASHED} @@ -126,7 +129,7 @@ class XenAPIConnection(object): def spawn(self, instance): vm = yield self._lookup(instance.name) if vm is not None: - raise Exception('Attempted to create non-unique name %s' % + raise Exception('Attempted to create non-unique name %s' % instance.name) network = db.project_get_network(None, instance.project_id) @@ -296,14 +299,34 @@ class XenAPIConnection(object): yield self._wait_for_task(task) except Exception, exc: logging.warn(exc) - + @defer.inlineCallbacks def attach_volume(self, instance_name, device_path, mountpoint): - return True + # NOTE: No Resource Pool concept so far + logging.debug("Attach_volume: %s, %s, %s", + instance_name, device_path, mountpoint) + volume_info = _parse_volume_info(device_path, mountpoint) + # Create the iSCSI SR, and the PDB through which hosts access SRs. + # But first, retrieve target info, like Host, IQN, LUN and SCSIID + target = yield self._get_target(volume_info) + label = 'SR-%s' % volume_info['volumeId'] + sr_ref = yield self._create_sr(target, label) + # Create VDI and attach VBD to VM + vm = None + try: + task = yield self._call_xenapi('', vm) + yield self._wait_for_task(task) + except Exception, exc: + logging.warn(exc) + yield True @defer.inlineCallbacks def detach_volume(self, instance_name, mountpoint): - return True + logging.debug("Detach_volume: %s, %s, %s", instance_name, mountpoint) + # Detach VBD from VM + # Forget SR/PDB info associated with host + # TODO: can we avoid destroying the SR every time we detach? + yield True def get_info(self, instance_id): vm = self._lookup_blocking(instance_id) @@ -333,6 +356,52 @@ class XenAPIConnection(object): else: return vms[0] + @utils.deferredToThread + def _get_target(self, volume_info): + return self._get_target_blocking(volume_info) + + def _get_target_blocking(self, volume_info): + target = {} + target['target'] = volume_info['targetHost'] + target['port'] = volume_info['targetPort'] + target['targetIQN'] = volume_info['iqn'] + # We expect SR_BACKEND_FAILURE_107 to retrieve params to create the SR + try: + self._conn.xenapi.SR.create(self._get_xenapi_host(), + target, '-1', '', '', + 'lvmoiscsi', '', False, {}) + except XenAPI.Failure, exc: + if exc.details[0] == 'SR_BACKEND_FAILURE_107': + xml_response = parseString(exc.details[3]) + isciTargets = xml_response.getElementsByTagName('iscsi-target') + # Make sure that only the correct Lun is visible + if len(isciTargets) > 1: + raise Exception('More than one ISCSI Target available') + isciLuns = isciTargets.item(0).getElementsByTagName('LUN') + if len(isciLuns) > 1: + raise Exception('More than one ISCSI Lun available') + # Parse params from the xml response into the dictionary + for n in isciLuns.item(0).childNodes: + if n.nodeType == 1: + target[n.nodeName] = str(n.firstChild.data).strip() + return target + + @utils.deferredToThread + def _create_sr(self, target, label): + return self._create_sr_blocking(target, label) + + def _create_sr_blocking(self, target, label): + # TODO: we might want to put all these string literals into constants + sr = self._conn.xenapi.SR.create(self._get_xenapi_host(), + target, + target['size'], + label, + '', + 'lvmoiscsi', + '', + True, {}) + return sr + def _wait_for_task(self, task): """Return a Deferred that will give the result of the given task. The task is polled until it completes.""" @@ -412,7 +481,18 @@ def _parse_xmlrpc_value(val): if not val: return val x = xmlrpclib.loads( - '' + - val + + '' + + val + '') return x[0][0] + + +def _parse_volume_info(device_path, mountpoint): + volume_info = {} + volume_info['volumeId'] = 'vol-qurmrzn9' + # XCP and XenServer add an x to the device name + volume_info['xenMountpoint'] = '/dev/xvdb' + volume_info['targetHost'] = '10.70.177.40' + volume_info['targetPort'] = '3260' # default 3260 + volume_info['iqn'] = 'iqn.2010-10.org.openstack:vol-qurmrzn9' + return volume_info -- cgit From 688d564668aefa4b644236421a3a45fc90486634 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 25 Nov 2010 20:31:32 +0000 Subject: work on attach_volume, with a few things to iron out --- nova/virt/xenapi.py | 99 ++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 83 insertions(+), 16 deletions(-) diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index 5bf98468e..d3167ebf3 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -147,7 +147,7 @@ class XenAPIConnection(object): vdi_ref = yield self._call_xenapi('VDI.get_by_uuid', vdi_uuid) vm_ref = yield self._create_vm(instance, kernel, ramdisk) - yield self._create_vbd(vm_ref, vdi_ref, 0, True) + yield self._create_vbd(vm_ref, vdi_ref, 0, True, True, False) if network_ref: yield self._create_vif(vm_ref, network_ref, instance.mac_address) logging.debug('Starting VM %s...', vm_ref) @@ -197,7 +197,21 @@ class XenAPIConnection(object): defer.returnValue(vm_ref) @defer.inlineCallbacks - def _create_vbd(self, vm_ref, vdi_ref, userdevice, bootable): + def _create_vdi(self, sr_ref, size, type, label, description, read_only, sharable): + vdi_rec = {} + vdi_rec['read_only'] = read_only + vdi_rec['SR'] = sr_ref + vdi_rec['virtual_size'] = str(size) + vdi_rec['name_label'] = label + vdi_rec['name_description'] = description + vdi_rec['sharable'] = sharable + vdi_rec['type'] = type + vdi_rec['other_config'] = {} + vdi_ref = yield self._call_xenapi('VDI.create', vdi_rec) + defer.returnValue(vdi_ref) + + @defer.inlineCallbacks + def _create_vbd(self, vm_ref, vdi_ref, userdevice, bootable, unpluggable, empty): """Create a VBD record. Returns a Deferred that gives the new VBD reference.""" @@ -208,8 +222,8 @@ class XenAPIConnection(object): vbd_rec['bootable'] = bootable vbd_rec['mode'] = 'RW' vbd_rec['type'] = 'disk' - vbd_rec['unpluggable'] = True - vbd_rec['empty'] = False + vbd_rec['unpluggable'] = unpluggable + vbd_rec['empty'] = empty vbd_rec['other_config'] = {} vbd_rec['qos_algorithm_type'] = '' vbd_rec['qos_algorithm_params'] = {} @@ -320,14 +334,33 @@ class XenAPIConnection(object): # But first, retrieve target info, like Host, IQN, LUN and SCSIID target = yield self._get_target(volume_info) label = 'SR-%s' % volume_info['volumeId'] - sr_ref = yield self._create_sr(target, label) + description = 'Attached-to:%s' % instance_name + # Create SR and check the physical space available for the VDI allocation + sr_ref = yield self._create_sr(target, label, description) + disk_size = yield self._get_sr_available_space(sr_ref) # Create VDI and attach VBD to VM - vm = None + vm_ref = yield self._lookup(instance_name) + logging.debug("Mounting disk of: %s GB", (disk_size / (1024*1024*1024.0))) try: - task = yield self._call_xenapi('', vm) - yield self._wait_for_task(task) + vdi_ref = yield self._create_vdi(sr_ref, disk_size, + 'user', volume_info['volumeId'], '', + False, False) except Exception, exc: logging.warn(exc) + if sr_ref: + yield self._destroy_sr(sr_ref) + raise Exception('Unable to create VDI on SR %s' % sr_ref) + else: + try: + userdevice = 2 # FIXME: this depends on the numbers of attached disks + vbd_ref = yield self._create_vbd(vm_ref, vdi_ref, userdevice, False, True, False) + task = yield self._call_xenapi('Async.VBD.plug', vbd_ref) + yield self._wait_for_task(task) + except Exception, exc: + logging.warn(exc) + if sr_ref: + yield self._destroy_sr(sr_ref) + raise Exception('Unable to create VBD on SR %s' % sr_ref) yield True @defer.inlineCallbacks @@ -395,23 +428,57 @@ class XenAPIConnection(object): if n.nodeType == 1: target[n.nodeName] = str(n.firstChild.data).strip() return target + + @utils.deferredToThread + def _get_sr_available_space(self, sr_ref): + return self._get_sr_available_space_blocking(sr_ref) + + def _get_sr_available_space_blocking(self, sr_ref): + pu = self._conn.xenapi.SR.get_physical_utilisation(sr_ref) + ps = self._conn.xenapi.SR.get_physical_size(sr_ref) + return (int(ps) - int(pu)) - (8 * 1024 * 1024) @utils.deferredToThread - def _create_sr(self, target, label): - return self._create_sr_blocking(target, label) + def _create_sr(self, target, label, description): + return self._create_sr_blocking(target, label, description) - def _create_sr_blocking(self, target, label): + def _create_sr_blocking(self, target, label, description): # TODO: we might want to put all these string literals into constants - sr = self._conn.xenapi.SR.create(self._get_xenapi_host(), + sr_ref = self._conn.xenapi.SR.create(self._get_xenapi_host(), target, target['size'], label, - '', + description, 'lvmoiscsi', '', True, {}) - return sr + # TODO: there might be some timing issues here + self._conn.xenapi.SR.scan(sr_ref) + return sr_ref + @defer.inlineCallbacks + def _destroy_sr(self, sr_ref): + # Some clean-up depending on the state of the SR + #yield self._destroy_vdbs(sr_ref) + #yield self._destroy_vdis(sr_ref) + # Destroy PDBs + pbds = yield self._conn.xenapi.SR.get_PBDs(sr_ref) + for pbd_ref in pbds: + try: + task = yield self._call_xenapi('Async.PBD.unplug', pbd_ref) + yield self._wait_for_task(task) + except Exception, exc: + logging.warn(exc) + else: + task = yield self._call_xenapi('Async.PBD.destroy', pbd_ref) + yield self._wait_for_task(task) + # Forget SR + try: + task = yield self._call_xenapi('Async.SR.forget', sr_ref) + yield self._wait_for_task(task) + except Exception, exc: + logging.warn(exc) + @utils.deferredToThread def _lookup_vm_vdis(self, vm): return self._lookup_vm_vdis_blocking(vm) @@ -524,8 +591,8 @@ def _parse_xmlrpc_value(val): def _parse_volume_info(device_path, mountpoint): volume_info = {} volume_info['volumeId'] = 'vol-qurmrzn9' - # XCP and XenServer add an x to the device name - volume_info['xenMountpoint'] = '/dev/xvdb' + # Because XCP/XS want an x beforehand + volume_info['xenMountpoint'] = '/dev/xvdc' volume_info['targetHost'] = '10.70.177.40' volume_info['targetPort'] = '3260' # default 3260 volume_info['iqn'] = 'iqn.2010-10.org.openstack:vol-qurmrzn9' -- cgit From a9b900d24020b68284e402a98ee28c107de0bd71 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 25 Nov 2010 20:42:22 +0000 Subject: added attach_volume implementation --- nova/virt/xenapi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index d3167ebf3..236360f11 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -593,7 +593,7 @@ def _parse_volume_info(device_path, mountpoint): volume_info['volumeId'] = 'vol-qurmrzn9' # Because XCP/XS want an x beforehand volume_info['xenMountpoint'] = '/dev/xvdc' - volume_info['targetHost'] = '10.70.177.40' + volume_info['targetHost'] = '' volume_info['targetPort'] = '3260' # default 3260 volume_info['iqn'] = 'iqn.2010-10.org.openstack:vol-qurmrzn9' return volume_info -- cgit From 04b1740c991d6d499364c21c2524c46ed5fc2522 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 29 Nov 2010 17:26:44 +0000 Subject: changes --- nova/virt/xenapi.py | 93 +++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 68 insertions(+), 25 deletions(-) diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index 236360f11..f2ba71306 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -198,6 +198,9 @@ class XenAPIConnection(object): @defer.inlineCallbacks def _create_vdi(self, sr_ref, size, type, label, description, read_only, sharable): + """Create a VDI record. Returns a Deferred that gives the new + VDI reference.""" + vdi_rec = {} vdi_rec['read_only'] = read_only vdi_rec['SR'] = sr_ref @@ -337,7 +340,8 @@ class XenAPIConnection(object): description = 'Attached-to:%s' % instance_name # Create SR and check the physical space available for the VDI allocation sr_ref = yield self._create_sr(target, label, description) - disk_size = yield self._get_sr_available_space(sr_ref) + disk_size = int(target['size']) + #disk_size = yield self._get_sr_available_space(sr_ref) # Create VDI and attach VBD to VM vm_ref = yield self._lookup(instance_name) logging.debug("Mounting disk of: %s GB", (disk_size / (1024*1024*1024.0))) @@ -347,20 +351,30 @@ class XenAPIConnection(object): False, False) except Exception, exc: logging.warn(exc) - if sr_ref: - yield self._destroy_sr(sr_ref) - raise Exception('Unable to create VDI on SR %s' % sr_ref) + yield self._destroy_sr(sr_ref) + raise Exception('Unable to create VDI on SR %s for instance %s' + % (sr_ref, + instance_name)) else: - try: + try: userdevice = 2 # FIXME: this depends on the numbers of attached disks vbd_ref = yield self._create_vbd(vm_ref, vdi_ref, userdevice, False, True, False) - task = yield self._call_xenapi('Async.VBD.plug', vbd_ref) - yield self._wait_for_task(task) except Exception, exc: logging.warn(exc) - if sr_ref: + yield self._destroy_sr(sr_ref) + raise Exception('Unable to create VBD on SR %s for instance %s' + % (sr_ref, + instance_name)) + else: + try: + raise Exception('') + task = yield self._call_xenapi('Async.VBD.plug', vbd_ref) + yield self._wait_for_task(task) + except Exception, exc: + logging.warn(exc) yield self._destroy_sr(sr_ref) - raise Exception('Unable to create VBD on SR %s' % sr_ref) + raise Exception('Unable to attach volume to instance %s' % instance_name) + yield True @defer.inlineCallbacks @@ -412,7 +426,7 @@ class XenAPIConnection(object): try: self._conn.xenapi.SR.create(self._get_xenapi_host(), target, '-1', '', '', - 'lvmoiscsi', '', False, {}) + 'iscsi', '', False, {}) except XenAPI.Failure, exc: if exc.details[0] == 'SR_BACKEND_FAILURE_107': xml_response = parseString(exc.details[3]) @@ -427,6 +441,9 @@ class XenAPIConnection(object): for n in isciLuns.item(0).childNodes: if n.nodeType == 1: target[n.nodeName] = str(n.firstChild.data).strip() + else: + logging.warn(exc) + raise Exception('Unable to access SR') return target @utils.deferredToThread @@ -444,24 +461,45 @@ class XenAPIConnection(object): def _create_sr_blocking(self, target, label, description): # TODO: we might want to put all these string literals into constants - sr_ref = self._conn.xenapi.SR.create(self._get_xenapi_host(), + sr_ref = self._conn.xenapi.SR.get_by_name_label(label) + if sr_ref is None: + sr_ref = self._conn.xenapi.SR.create(self._get_xenapi_host(), target, target['size'], label, description, - 'lvmoiscsi', + 'iscsi', '', True, {}) - # TODO: there might be some timing issues here - self._conn.xenapi.SR.scan(sr_ref) - return sr_ref + if sr_ref: + #self._conn.xenapi.SR.scan(sr_ref) + return sr_ref + else: + raise Exception('Unable to create SR') @defer.inlineCallbacks def _destroy_sr(self, sr_ref): # Some clean-up depending on the state of the SR - #yield self._destroy_vdbs(sr_ref) - #yield self._destroy_vdis(sr_ref) - # Destroy PDBs + # Remove VBDs + #vbds = yield self._conn.xenapi.SR.get_VBDs(sr_ref) + #for vbd_ref in vbds: + # try: + # task = yield self._call_xenapi('Async.VBD.destroy', vbd_ref) + # yield self._wait_for_task(task) + # except Exception, exc: + # logging.warn(exc) + # Remove VDIs + #======================================================================= + # vdis = yield self._conn.xenapi.SR.get_VDIs(sr_ref) + # for vdi_ref in vdis: + # try: + # task = yield self._call_xenapi('Async.VDI.destroy', vdi_ref) + # yield self._wait_for_task(task) + # except Exception, exc: + # logging.warn(exc) + #======================================================================= + sr_rec = self._conn.xenapi.SR.get_record(sr_ref) + # Detach from host pbds = yield self._conn.xenapi.SR.get_PBDs(sr_ref) for pbd_ref in pbds: try: @@ -469,16 +507,21 @@ class XenAPIConnection(object): yield self._wait_for_task(task) except Exception, exc: logging.warn(exc) - else: - task = yield self._call_xenapi('Async.PBD.destroy', pbd_ref) - yield self._wait_for_task(task) - # Forget SR + # Destroy SR try: - task = yield self._call_xenapi('Async.SR.forget', sr_ref) + task = yield self._call_xenapi('Async.SR.destroy', sr_ref) yield self._wait_for_task(task) except Exception, exc: logging.warn(exc) + def _sr_dispose_action(self, action, records): + for rec_ref in records: + try: + task = yield self._call_xenapi(action, rec_ref) + yield self._wait_for_task(task) + except Exception, exc: + logging.warn(exc) + @utils.deferredToThread def _lookup_vm_vdis(self, vm): return self._lookup_vm_vdis_blocking(vm) @@ -592,8 +635,8 @@ def _parse_volume_info(device_path, mountpoint): volume_info = {} volume_info['volumeId'] = 'vol-qurmrzn9' # Because XCP/XS want an x beforehand - volume_info['xenMountpoint'] = '/dev/xvdc' - volume_info['targetHost'] = '' + volume_info['mountpoint'] = '/dev/xvdc' # translate + volume_info['targetHost'] = '10.70.177.40' volume_info['targetPort'] = '3260' # default 3260 volume_info['iqn'] = 'iqn.2010-10.org.openstack:vol-qurmrzn9' return volume_info -- cgit From 40de074f44059f89caa15420a7174f63c76eec48 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Tue, 30 Nov 2010 19:03:13 +0000 Subject: iscsi volumes attach/detach complete. There is only one minor issue on how to discover targets from device_path --- nova/virt/xenapi/vm_utils.py | 38 +++++++ nova/virt/xenapi/volume_utils.py | 210 +++++++++++++++++++++++++++++++++++++++ nova/virt/xenapi/volumeops.py | 93 +++++++++++------ 3 files changed, 313 insertions(+), 28 deletions(-) create mode 100644 nova/virt/xenapi/volume_utils.py diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index b68df2791..6966e7b7b 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -103,6 +103,44 @@ class VMHelper(): vdi_ref) defer.returnValue(vbd_ref) + @classmethod + @utils.deferredToThread + def find_vbd_by_number(self, session, vm_ref, number): + return VMHelper.find_vbd_by_number_blocking(session, vm_ref, number) + + @classmethod + def find_vbd_by_number_blocking(self, session, vm_ref, number): + vbds = session.get_xenapi().VM.get_VBDs(vm_ref) + if vbds: + for vbd in vbds: + try: + vbd_rec = session.get_xenapi().VBD.get_record(vbd) + if vbd_rec['userdevice'] == str(number): + return vbd + except Exception, exc: + logging.warn(exc) + raise Exception('VBD not found in instance %s' % vm_ref) + + @classmethod + @defer.inlineCallbacks + def unplug_vbd(self, session, vbd_ref): + try: + vbd_ref = yield session.call_xenapi('VBD.unplug', vbd_ref) + except Exception, exc: + logging.warn(exc) + if exc.details[0] != 'DEVICE_ALREADY_DETACHED': + raise Exception('Unable to unplug VBD %s' % vbd_ref) + + @classmethod + @defer.inlineCallbacks + def destroy_vbd(self, session, vbd_ref): + try: + task = yield session.call_xenapi('Async.VBD.destroy', vbd_ref) + yield session.wait_for_task(task) + except Exception, exc: + logging.warn(exc) + raise Exception('Unable to destroy VBD %s' % vbd_ref) + @classmethod @defer.inlineCallbacks def create_vif(self, session, vm_ref, network_ref, mac_address): diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py new file mode 100644 index 000000000..b982ac124 --- /dev/null +++ b/nova/virt/xenapi/volume_utils.py @@ -0,0 +1,210 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Helper methods for operations related to the management of volumes, +and storage repositories +""" + +import logging +import re +import string + +from twisted.internet import defer + +from nova import utils +from nova import flags + +FLAGS = flags.FLAGS + +#FIXME: replace with proper target discovery +flags.DEFINE_string('target_host', None, 'iSCSI Target Host') +flags.DEFINE_string('target_port', '3260', 'iSCSI Target Port, 3260 Default') +flags.DEFINE_string('iqn_prefix', 'iqn.2010-10.org.openstack', 'IQN Prefix') + + +class VolumeHelper(): + def __init__(self, session): + return + + @classmethod + @utils.deferredToThread + def create_iscsi_storage(self, session, target, port, target_iqn, + username, password, label, description): + + return VolumeHelper.create_iscsi_storage_blocking(session, target, + port, + target_iqn, + username, + password, + label, + description) + + @classmethod + def create_iscsi_storage_blocking(self, session, target, port, target_iqn, + username, password, label, description): + + sr_ref = session.get_xenapi().SR.get_by_name_label(label) + if len(sr_ref) == 0: + logging.debug('Introducing %s...' % label) + try: + sr_ref = session.get_xenapi().SR.create( + session.get_xenapi_host(), + {'target': target, + 'port': port, + 'targetIQN': target_iqn + # TODO: when/if chap authentication is used + #'chapuser': username, + #'chappassword': password + }, + '0', label, description, 'iscsi', '', False, {}) + logging.debug('Introduced %s as %s.' % (label, sr_ref)) + return sr_ref + except Exception, exc: + logging.warn(exc) + raise Exception('Unable to create Storage Repository') + else: + return sr_ref[0] + + @classmethod + @defer.inlineCallbacks + def find_sr_from_vbd(self, session, vbd_ref): + vdi_ref = yield session.get_xenapi().VBD.get_VDI(vbd_ref) + sr_ref = yield session.get_xenapi().VDI.get_SR(vdi_ref) + defer.returnValue(sr_ref) + + @classmethod + @utils.deferredToThread + def destroy_iscsi_storage(self, session, sr_ref): + VolumeHelper.destroy_iscsi_storage_blocking(session, sr_ref) + + @classmethod + def destroy_iscsi_storage_blocking(self, session, sr_ref): + logging.debug("Forgetting SR %s ... ", sr_ref) + pbds = [] + try: + pbds = session.get_xenapi().SR.get_PBDs(sr_ref) + except Exception, exc: + logging.warn('Ignoring exception %s when getting PBDs for %s', + exc, sr_ref) + for pbd in pbds: + try: + session.get_xenapi().PBD.unplug(pbd) + except Exception, exc: + logging.warn('Ignoring exception %s when unplugging PBD %s', + exc, pbd) + try: + session.get_xenapi().SR.forget(sr_ref) + logging.debug("Forgetting SR %s done.", sr_ref) + except Exception, exc: + logging.warn('Ignoring exception %s when forgetting SR %s', + exc, sr_ref) + + @classmethod + @utils.deferredToThread + def introduce_vdi(self, session, sr_ref): + return VolumeHelper.introduce_vdi_blocking(session, sr_ref) + + @classmethod + def introduce_vdi_blocking(self, session, sr_ref): + try: + vdis = session.get_xenapi().SR.get_VDIs(sr_ref) + except Exception, exc: + raise Exception('Unable to introduce VDI on SR %s' % sr_ref) + try: + vdi_rec = session.get_xenapi().VDI.get_record(vdis[0]) + except Exception, exc: + raise Exception('Unable to get record of VDI %s on' % vdis[0]) + else: + return session.get_xenapi().VDI.introduce( + vdi_rec['uuid'], + vdi_rec['name_label'], + vdi_rec['name_description'], + vdi_rec['SR'], + vdi_rec['type'], + vdi_rec['sharable'], + vdi_rec['read_only'], + vdi_rec['other_config'], + vdi_rec['location'], + vdi_rec['xenstore_data'], + vdi_rec['sm_config']) + + @classmethod + def parse_volume_info(self, device_path, mountpoint): + # Because XCP/XS want a device number instead of a mountpoint + device_number = VolumeHelper.mountpoint_to_number(mountpoint) + volume_id = _get_volume_id(device_path) + target_host = _get_target_host(device_path) + target_port = _get_target_port(device_path) + target_iqn = _get_iqn(device_path) + + if (device_number < 0) or \ + (volume_id is None) or \ + (target_host is None) or \ + (target_iqn is None): + raise Exception('Unable to obtain target information %s, %s' % + (device_path, mountpoint)) + + volume_info = {} + volume_info['deviceNumber'] = device_number + volume_info['volumeId'] = volume_id + volume_info['targetHost'] = target_host + volume_info['targetPort'] = target_port + volume_info['targeIQN'] = target_iqn + return volume_info + + @classmethod + def mountpoint_to_number(self, mountpoint): + if mountpoint.startswith('/dev/'): + mountpoint = mountpoint[5:] + if re.match('^[hs]d[a-p]$', mountpoint): + return (ord(mountpoint[2:3]) - ord('a')) + elif re.match('^vd[a-p]$', mountpoint): + return (ord(mountpoint[2:3]) - ord('a')) + elif re.match('^[0-9]+$', mountpoint): + return string.atoi(mountpoint, 10) + else: + logging.warn('Mountpoint cannot be translated: %s', mountpoint) + return -1 + + +def _get_volume_id(n): + # FIXME: n must contain at least the volume_id + # /vol- is for remote volumes + # -vol- is for local volumes + # see compute/manager->setup_compute_volume + volume_id = n[n.find('/vol-') + 1:] + if volume_id == n: + volume_id = n[n.find('-vol-') + 1:].replace('--', '-') + return volume_id + + +def _get_target_host(n): + # FIXME: if n is none fall back on flags + if n is None or FLAGS.target_host: + return FLAGS.target_host + + +def _get_target_port(n): + # FIXME: if n is none fall back on flags + return FLAGS.target_port + + +def _get_iqn(n): + # FIXME: n must contain at least the volume_id + volume_id = _get_volume_id(n) + if n is None or FLAGS.iqn_prefix: + return '%s:%s' % (FLAGS.iqn_prefix, volume_id) diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index 5aefa0611..d5c309240 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -17,6 +17,12 @@ """ Management class for Storage-related functions (attach, detach, etc). """ +import logging + +from twisted.internet import defer + +from volume_utils import VolumeHelper +from vm_utils import VMHelper class VolumeOps(object): @@ -25,58 +31,89 @@ class VolumeOps(object): @defer.inlineCallbacks def attach_volume(self, instance_name, device_path, mountpoint): + # Before we start, check that the VM exists + vm_ref = yield VMHelper.lookup(self._session, instance_name) + if vm_ref is None: + raise Exception('Instance %s does not exist' % instance_name) # NOTE: No Resource Pool concept so far logging.debug("Attach_volume: %s, %s, %s", instance_name, device_path, mountpoint) - volume_info = _parse_volume_info(device_path, mountpoint) + vol_rec = VolumeHelper.parse_volume_info(device_path, mountpoint) # Create the iSCSI SR, and the PDB through which hosts access SRs. # But first, retrieve target info, like Host, IQN, LUN and SCSIID - target = yield self._get_target(volume_info) - label = 'SR-%s' % volume_info['volumeId'] - description = 'Attached-to:%s' % instance_name - # Create SR and check the physical space available for the VDI allocation - sr_ref = yield self._create_sr(target, label, description) - disk_size = int(target['size']) - #disk_size = yield self._get_sr_available_space(sr_ref) - # Create VDI and attach VBD to VM - vm_ref = yield self._lookup(instance_name) - logging.debug("Mounting disk of: %s GB", (disk_size / (1024*1024*1024.0))) + label = 'SR-%s' % vol_rec['volumeId'] + description = 'Disk-for:%s' % instance_name + # Create SR + sr_ref = yield VolumeHelper.create_iscsi_storage(self._session, + vol_rec['targetHost'], + vol_rec['targetPort'], + vol_rec['targeIQN'], + '', # no CHAP auth + '', + label, + description) + # Introduce VDI and attach VBD to VM try: - vdi_ref = yield self._create_vdi(sr_ref, disk_size, - 'user', volume_info['volumeId'], '', - False, False) + vdi_ref = yield VolumeHelper.introduce_vdi(self._session, sr_ref) except Exception, exc: logging.warn(exc) - yield self._destroy_sr(sr_ref) + yield VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) raise Exception('Unable to create VDI on SR %s for instance %s' % (sr_ref, instance_name)) else: try: - userdevice = 2 # FIXME: this depends on the numbers of attached disks - vbd_ref = yield self._create_vbd(vm_ref, vdi_ref, userdevice, False, True, False) + vbd_ref = yield VMHelper.create_vbd(self._session, + vm_ref, vdi_ref, + vol_rec['deviceNumber'], + False) except Exception, exc: logging.warn(exc) - yield self._destroy_sr(sr_ref) + yield VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) raise Exception('Unable to create VBD on SR %s for instance %s' % (sr_ref, instance_name)) else: try: - raise Exception('') - task = yield self._call_xenapi('Async.VBD.plug', vbd_ref) - yield self._wait_for_task(task) + #raise Exception('') + task = yield self._session.call_xenapi('Async.VBD.plug', + vbd_ref) + yield self._session.wait_for_task(task) except Exception, exc: logging.warn(exc) - yield self._destroy_sr(sr_ref) - raise Exception('Unable to attach volume to instance %s' % instance_name) - + yield VolumeHelper.destroy_iscsi_storage(self._session, + sr_ref) + raise Exception('Unable to attach volume to instance %s' % + instance_name) yield True @defer.inlineCallbacks def detach_volume(self, instance_name, mountpoint): - logging.debug("Detach_volume: %s, %s, %s", instance_name, mountpoint) + # Before we start, check that the VM exists + vm_ref = yield VMHelper.lookup(self._session, instance_name) + if vm_ref is None: + raise Exception('Instance %s does not exist' % instance_name) # Detach VBD from VM - # Forget SR/PDB info associated with host - # TODO: can we avoid destroying the SR every time we detach? - yield True \ No newline at end of file + logging.debug("Detach_volume: %s, %s", instance_name, mountpoint) + device_number = VolumeHelper.mountpoint_to_number(mountpoint) + try: + vbd_ref = yield VMHelper.find_vbd_by_number(self._session, + vm_ref, device_number) + except Exception, exc: + logging.warn(exc) + raise Exception('Unable to locate volume %s' % mountpoint) + else: + try: + sr_ref = yield VolumeHelper.find_sr_from_vbd(self._session, + vbd_ref) + yield VMHelper.unplug_vbd(self._session, vbd_ref) + except Exception, exc: + logging.warn(exc) + raise Exception('Unable to detach volume %s' % mountpoint) + try: + yield VMHelper.destroy_vbd(self._session, vbd_ref) + except Exception, exc: + logging.warn(exc) + # Forget SR + yield VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) + yield True -- cgit From f26489ef1ad2a7df0e9f72a8c9ad4f2e3a65ae57 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Wed, 1 Dec 2010 12:02:02 +0000 Subject: minor refactoring --- nova/virt/xenapi/novadeps.py | 82 ++++++++++++++++++++++++++++++++++++++++ nova/virt/xenapi/volume_utils.py | 77 ------------------------------------- nova/virt/xenapi/volumeops.py | 6 ++- 3 files changed, 86 insertions(+), 79 deletions(-) diff --git a/nova/virt/xenapi/novadeps.py b/nova/virt/xenapi/novadeps.py index ba62468fb..db51982a6 100644 --- a/nova/virt/xenapi/novadeps.py +++ b/nova/virt/xenapi/novadeps.py @@ -14,6 +14,9 @@ # License for the specific language governing permissions and limitations # under the License. +import re +import string + from nova import db from nova import flags from nova import process @@ -32,6 +35,15 @@ XENAPI_POWER_STATE = { 'Suspended': power_state.SHUTDOWN, # FIXME 'Crashed': power_state.CRASHED} +from nova import flags + +FLAGS = flags.FLAGS + +#FIXME: replace with proper target discovery +flags.DEFINE_string('target_host', None, 'iSCSI Target Host') +flags.DEFINE_string('target_port', '3260', 'iSCSI Target Port, 3260 Default') +flags.DEFINE_string('iqn_prefix', 'iqn.2010-10.org.openstack', 'IQN Prefix') + class Instance(object): @@ -101,3 +113,73 @@ class User(object): @classmethod def get_secret(self, user): return user.secret + + +class Volume(object): + + @classmethod + def parse_volume_info(self, device_path, mountpoint): + # Because XCP/XS want a device number instead of a mountpoint + device_number = Volume.mountpoint_to_number(mountpoint) + volume_id = Volume.get_volume_id(device_path) + target_host = Volume.get_target_host(device_path) + target_port = Volume.get_target_port(device_path) + target_iqn = Volume.get_iqn(device_path) + + if (device_number < 0) or \ + (volume_id is None) or \ + (target_host is None) or \ + (target_iqn is None): + raise Exception('Unable to obtain target information %s, %s' % + (device_path, mountpoint)) + + volume_info = {} + volume_info['deviceNumber'] = device_number + volume_info['volumeId'] = volume_id + volume_info['targetHost'] = target_host + volume_info['targetPort'] = target_port + volume_info['targeIQN'] = target_iqn + return volume_info + + @classmethod + def mountpoint_to_number(self, mountpoint): + if mountpoint.startswith('/dev/'): + mountpoint = mountpoint[5:] + if re.match('^[hs]d[a-p]$', mountpoint): + return (ord(mountpoint[2:3]) - ord('a')) + elif re.match('^vd[a-p]$', mountpoint): + return (ord(mountpoint[2:3]) - ord('a')) + elif re.match('^[0-9]+$', mountpoint): + return string.atoi(mountpoint, 10) + else: + logging.warn('Mountpoint cannot be translated: %s', mountpoint) + return -1 + + @classmethod + def get_volume_id(self, n): + # FIXME: n must contain at least the volume_id + # /vol- is for remote volumes + # -vol- is for local volumes + # see compute/manager->setup_compute_volume + volume_id = n[n.find('/vol-') + 1:] + if volume_id == n: + volume_id = n[n.find('-vol-') + 1:].replace('--', '-') + return volume_id + + @classmethod + def get_target_host(self, n): + # FIXME: if n is none fall back on flags + if n is None or FLAGS.target_host: + return FLAGS.target_host + + @classmethod + def get_target_port(self, n): + # FIXME: if n is none fall back on flags + return FLAGS.target_port + + @classmethod + def get_iqn(self, n): + # FIXME: n must contain at least the volume_id + volume_id = Volume.get_volume_id(n) + if n is None or FLAGS.iqn_prefix: + return '%s:%s' % (FLAGS.iqn_prefix, volume_id) diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index b982ac124..3b3e8894c 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -20,20 +20,10 @@ and storage repositories """ import logging -import re -import string from twisted.internet import defer from nova import utils -from nova import flags - -FLAGS = flags.FLAGS - -#FIXME: replace with proper target discovery -flags.DEFINE_string('target_host', None, 'iSCSI Target Host') -flags.DEFINE_string('target_port', '3260', 'iSCSI Target Port, 3260 Default') -flags.DEFINE_string('iqn_prefix', 'iqn.2010-10.org.openstack', 'IQN Prefix') class VolumeHelper(): @@ -141,70 +131,3 @@ class VolumeHelper(): vdi_rec['location'], vdi_rec['xenstore_data'], vdi_rec['sm_config']) - - @classmethod - def parse_volume_info(self, device_path, mountpoint): - # Because XCP/XS want a device number instead of a mountpoint - device_number = VolumeHelper.mountpoint_to_number(mountpoint) - volume_id = _get_volume_id(device_path) - target_host = _get_target_host(device_path) - target_port = _get_target_port(device_path) - target_iqn = _get_iqn(device_path) - - if (device_number < 0) or \ - (volume_id is None) or \ - (target_host is None) or \ - (target_iqn is None): - raise Exception('Unable to obtain target information %s, %s' % - (device_path, mountpoint)) - - volume_info = {} - volume_info['deviceNumber'] = device_number - volume_info['volumeId'] = volume_id - volume_info['targetHost'] = target_host - volume_info['targetPort'] = target_port - volume_info['targeIQN'] = target_iqn - return volume_info - - @classmethod - def mountpoint_to_number(self, mountpoint): - if mountpoint.startswith('/dev/'): - mountpoint = mountpoint[5:] - if re.match('^[hs]d[a-p]$', mountpoint): - return (ord(mountpoint[2:3]) - ord('a')) - elif re.match('^vd[a-p]$', mountpoint): - return (ord(mountpoint[2:3]) - ord('a')) - elif re.match('^[0-9]+$', mountpoint): - return string.atoi(mountpoint, 10) - else: - logging.warn('Mountpoint cannot be translated: %s', mountpoint) - return -1 - - -def _get_volume_id(n): - # FIXME: n must contain at least the volume_id - # /vol- is for remote volumes - # -vol- is for local volumes - # see compute/manager->setup_compute_volume - volume_id = n[n.find('/vol-') + 1:] - if volume_id == n: - volume_id = n[n.find('-vol-') + 1:].replace('--', '-') - return volume_id - - -def _get_target_host(n): - # FIXME: if n is none fall back on flags - if n is None or FLAGS.target_host: - return FLAGS.target_host - - -def _get_target_port(n): - # FIXME: if n is none fall back on flags - return FLAGS.target_port - - -def _get_iqn(n): - # FIXME: n must contain at least the volume_id - volume_id = _get_volume_id(n) - if n is None or FLAGS.iqn_prefix: - return '%s:%s' % (FLAGS.iqn_prefix, volume_id) diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index d5c309240..ec4343329 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -24,6 +24,8 @@ from twisted.internet import defer from volume_utils import VolumeHelper from vm_utils import VMHelper +from novadeps import Volume + class VolumeOps(object): def __init__(self, session): @@ -38,9 +40,9 @@ class VolumeOps(object): # NOTE: No Resource Pool concept so far logging.debug("Attach_volume: %s, %s, %s", instance_name, device_path, mountpoint) - vol_rec = VolumeHelper.parse_volume_info(device_path, mountpoint) # Create the iSCSI SR, and the PDB through which hosts access SRs. # But first, retrieve target info, like Host, IQN, LUN and SCSIID + vol_rec = Volume.parse_volume_info(device_path, mountpoint) label = 'SR-%s' % vol_rec['volumeId'] description = 'Disk-for:%s' % instance_name # Create SR @@ -95,7 +97,7 @@ class VolumeOps(object): raise Exception('Instance %s does not exist' % instance_name) # Detach VBD from VM logging.debug("Detach_volume: %s, %s", instance_name, mountpoint) - device_number = VolumeHelper.mountpoint_to_number(mountpoint) + device_number = Volume.mountpoint_to_number(mountpoint) try: vbd_ref = yield VMHelper.find_vbd_by_number(self._session, vm_ref, device_number) -- cgit From e4cfd7f3fe7d3c50d65c61abf21bf998fde85147 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 2 Dec 2010 14:09:23 +0000 Subject: minor refactoring after merge --- nova/virt/xenapi/novadeps.py | 28 ++++++++++++++++++++-------- nova/virt/xenapi_conn.py | 5 ++++- 2 files changed, 24 insertions(+), 9 deletions(-) diff --git a/nova/virt/xenapi/novadeps.py b/nova/virt/xenapi/novadeps.py index b4802764e..aa3535162 100644 --- a/nova/virt/xenapi/novadeps.py +++ b/nova/virt/xenapi/novadeps.py @@ -36,8 +36,6 @@ XENAPI_POWER_STATE = { 'Suspended': power_state.SHUTDOWN, # FIXME 'Crashed': power_state.CRASHED} -FLAGS = flags.FLAGS - flags.DEFINE_string('xenapi_connection_url', None, 'URL for connection to XenServer/Xen Cloud Platform.' @@ -81,6 +79,21 @@ class Configuration(object): def xenapi_task_poll_interval(self): return self._flags.xenapi_task_poll_interval + @property + def target_host(self): + return self._flags.target_host + + @property + def target_port(self): + return self._flags.target_port + + @property + def iqn_prefix(self): + return self._flags.iqn_prefix + + +config = Configuration() + class Instance(object): @@ -206,18 +219,17 @@ class Volume(object): @classmethod def get_target_host(self, n): # FIXME: if n is none fall back on flags - if n is None or FLAGS.target_host: - return FLAGS.target_host + if n is None or config.target_host: + return config.target_host @classmethod def get_target_port(self, n): # FIXME: if n is none fall back on flags - return FLAGS.target_port + return config.target_port @classmethod def get_iqn(self, n): # FIXME: n must contain at least the volume_id volume_id = Volume.get_volume_id(n) - if n is None or FLAGS.iqn_prefix: - return '%s:%s' % (FLAGS.iqn_prefix, volume_id) - + if n is None or config.iqn_prefix: + return '%s:%s' % (config.iqn_prefix, volume_id) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 948fade7e..d3f66b12c 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -44,7 +44,10 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block. :xenapi_task_poll_interval: The interval (seconds) used for polling of remote tasks (Async.VM.start, etc) (default: 0.5). - +:target_host: the iSCSI Target Host IP address, i.e. the IP + address for the nova-volume host +:target_port: iSCSI Target Port, 3260 Default +:iqn_prefix: IQN Prefix, e.g. 'iqn.2010-10.org.openstack' """ import logging -- cgit From da010f311c07ee31d7d00ceb48d0f8656f1825ca Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Fri, 3 Dec 2010 00:01:21 +0000 Subject: * Removes unused schema * Removes MUST uid from novaUser * Changes isAdmin to isNovaAdmin * Adds two new configuration options: ** ldap_user_id_attribute, with a default of uid ** ldap_user_name_attribute, with a default of cn * ldapdriver.py has been modified to use these changes Rationale: Removing uid from novaUser: Requiring uid makes the schema very posix specific. Other schemas don't use uid for identifiers at all. This change makes the schema more interoperable. Changing isAdmin to isNovaAdmin: This attribute is too generic. It doesn't describe what the user is an admin of, and in a pre-existing directory is out of place. This change is to make the attribute more specific to the software. Adding config options for id and name: This is another interoperability change. This change makes the driver more compatible with directories like AD, where sAMAccountName is used instead of uid. Also, some directory admins prefer to use displayName rather than CN for full names of users. --- nova/auth/ldapdriver.py | 21 ++++++++++++--------- nova/auth/nova_openldap.schema | 26 +++----------------------- nova/auth/nova_sun.schema | 6 ++---- nova/auth/openssh-lpk_openldap.schema | 19 ------------------- nova/auth/openssh-lpk_sun.schema | 10 ---------- 5 files changed, 17 insertions(+), 65 deletions(-) delete mode 100644 nova/auth/openssh-lpk_openldap.schema delete mode 100644 nova/auth/openssh-lpk_sun.schema diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index ceade1d65..e4c36c28d 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -37,6 +37,8 @@ flags.DEFINE_string('ldap_url', 'ldap://localhost', flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password') flags.DEFINE_string('ldap_user_dn', 'cn=Manager,dc=example,dc=com', 'DN of admin user') +flags.DEFINE_string('ldap_user_id_attribute', 'uid', 'Attribute to use as id') +flags.DEFINE_string('ldap_user_name_attribute', 'cn', 'Attribute to use as name') flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users') flags.DEFINE_string('ldap_user_subtree', 'ou=Users,dc=example,dc=com', 'OU for Users') @@ -131,12 +133,12 @@ class LdapDriver(object): 'inetOrgPerson', 'novaUser']), ('ou', [FLAGS.ldap_user_unit]), - ('uid', [name]), + (FLAGS.ldap_user_id_attribute, [name]), ('sn', [name]), - ('cn', [name]), + (FLAGS.ldap_user_name_attribute, [name]), ('secretKey', [secret_key]), ('accessKey', [access_key]), - ('isAdmin', [str(is_admin).upper()]), + ('isNovaAdmin', [str(is_admin).upper()]), ] self.conn.add_s(self.__uid_to_dn(name), attr) return self.__to_user(dict(attr)) @@ -274,7 +276,7 @@ class LdapDriver(object): if secret_key: attr.append((self.ldap.MOD_REPLACE, 'secretKey', secret_key)) if admin is not None: - attr.append((self.ldap.MOD_REPLACE, 'isAdmin', str(admin).upper())) + attr.append((self.ldap.MOD_REPLACE, 'isNovaAdmin', str(admin).upper())) self.conn.modify_s(self.__uid_to_dn(uid), attr) def __user_exists(self, uid): @@ -450,11 +452,11 @@ class LdapDriver(object): if attr == None: return None return { - 'id': attr['uid'][0], - 'name': attr['cn'][0], + 'id': attr[FLAGS.ldap_user_id_attribute][0], + 'name': attr[FLAGS.ldap_user_name_attribute][0], 'access': attr['accessKey'][0], 'secret': attr['secretKey'][0], - 'admin': (attr['isAdmin'][0] == 'TRUE')} + 'admin': (attr['isNovaAdmin'][0] == 'TRUE')} def __to_project(self, attr): """Convert ldap attributes to Project object""" @@ -474,9 +476,10 @@ class LdapDriver(object): return dn.split(',')[0].split('=')[1] @staticmethod - def __uid_to_dn(dn): + def __uid_to_dn(uid): """Convert uid to dn""" - return 'uid=%s,%s' % (dn, FLAGS.ldap_user_subtree) + return FLAGS.ldap_user_id_attribute + '=%s,%s' \ + % (uid, FLAGS.ldap_user_subtree) class FakeLdapDriver(LdapDriver): diff --git a/nova/auth/nova_openldap.schema b/nova/auth/nova_openldap.schema index 4047361de..9e528f58b 100644 --- a/nova/auth/nova_openldap.schema +++ b/nova/auth/nova_openldap.schema @@ -30,20 +30,10 @@ attributetype ( SINGLE-VALUE ) -attributetype ( - novaAttrs:3 - NAME 'keyFingerprint' - DESC 'Fingerprint of private key' - EQUALITY caseIgnoreMatch - SUBSTR caseIgnoreSubstringsMatch - SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 - SINGLE-VALUE - ) - attributetype ( novaAttrs:4 - NAME 'isAdmin' - DESC 'Is user an administrator?' + NAME 'isNovaAdmin' + DESC 'Is user an nova administrator?' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE @@ -61,17 +51,7 @@ objectClass ( NAME 'novaUser' DESC 'access and secret keys' AUXILIARY - MUST ( uid ) - MAY ( accessKey $ secretKey $ isAdmin ) - ) - -objectClass ( - novaOCs:2 - NAME 'novaKeyPair' - DESC 'Key pair for User' - SUP top - STRUCTURAL - MUST ( cn $ sshPublicKey $ keyFingerprint ) + MAY ( accessKey $ secretKey $ isNovaAdmin ) ) objectClass ( diff --git a/nova/auth/nova_sun.schema b/nova/auth/nova_sun.schema index e925e05e4..decf10f06 100644 --- a/nova/auth/nova_sun.schema +++ b/nova/auth/nova_sun.schema @@ -8,9 +8,7 @@ dn: cn=schema attributeTypes: ( 1.3.6.1.3.1.666.666.3.1 NAME 'accessKey' DESC 'Key for accessing data' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributeTypes: ( 1.3.6.1.3.1.666.666.3.2 NAME 'secretKey' DESC 'Secret key' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) -attributeTypes: ( 1.3.6.1.3.1.666.666.3.3 NAME 'keyFingerprint' DESC 'Fingerprint of private key' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE) -attributeTypes: ( 1.3.6.1.3.1.666.666.3.4 NAME 'isAdmin' DESC 'Is user an administrator?' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE ) +attributeTypes: ( 1.3.6.1.3.1.666.666.3.4 NAME 'isNovaAdmin' DESC 'Is user a nova administrator?' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE ) attributeTypes: ( 1.3.6.1.3.1.666.666.3.5 NAME 'projectManager' DESC 'Project Managers of a project' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 ) -objectClasses: ( 1.3.6.1.3.1.666.666.4.1 NAME 'novaUser' DESC 'access and secret keys' SUP top AUXILIARY MUST ( uid ) MAY ( accessKey $ secretKey $ isAdmin ) ) -objectClasses: ( 1.3.6.1.3.1.666.666.4.2 NAME 'novaKeyPair' DESC 'Key pair for User' SUP top STRUCTURAL MUST ( cn $ sshPublicKey $ keyFingerprint ) ) +objectClasses: ( 1.3.6.1.3.1.666.666.4.1 NAME 'novaUser' DESC 'access and secret keys' SUP top AUXILIARY MAY ( accessKey $ secretKey $ isNovaAdmin ) ) objectClasses: ( 1.3.6.1.3.1.666.666.4.3 NAME 'novaProject' DESC 'Container for project' SUP groupOfNames STRUCTURAL MUST ( cn $ projectManager ) ) diff --git a/nova/auth/openssh-lpk_openldap.schema b/nova/auth/openssh-lpk_openldap.schema deleted file mode 100644 index 93351da6d..000000000 --- a/nova/auth/openssh-lpk_openldap.schema +++ /dev/null @@ -1,19 +0,0 @@ -# -# LDAP Public Key Patch schema for use with openssh-ldappubkey -# Author: Eric AUGE -# -# Based on the proposal of : Mark Ruijter -# - - -# octetString SYNTAX -attributetype ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey' - DESC 'MANDATORY: OpenSSH Public key' - EQUALITY octetStringMatch - SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 ) - -# printableString SYNTAX yes|no -objectclass ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY - DESC 'MANDATORY: OpenSSH LPK objectclass' - MAY ( sshPublicKey $ uid ) - ) diff --git a/nova/auth/openssh-lpk_sun.schema b/nova/auth/openssh-lpk_sun.schema deleted file mode 100644 index 5f52db3b6..000000000 --- a/nova/auth/openssh-lpk_sun.schema +++ /dev/null @@ -1,10 +0,0 @@ -# -# LDAP Public Key Patch schema for use with openssh-ldappubkey -# Author: Eric AUGE -# -# Schema for Sun Directory Server. -# Based on the original schema, modified by Stefan Fischer. -# -dn: cn=schema -attributeTypes: ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey' DESC 'MANDATORY: OpenSSH Public key' EQUALITY octetStringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 ) -objectClasses: ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY DESC 'MANDATORY: OpenSSH LPK objectclass' MAY ( sshPublicKey $ uid ) ) -- cgit From ee71c0accbb540bcb9d08cdcdc8b659f29a0edd6 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 6 Dec 2010 19:06:32 +0000 Subject: added interim solution for target discovery. Now info can either be passed via flags or discovered via iscsiadm. Long term solution is to add a few more fields to the db in the iscsi_target table with the necessary info and modify the iscsi driver to set them --- nova/virt/xenapi/novadeps.py | 89 ++++++++++++++++++++++++++++++++++--------- nova/virt/xenapi/volumeops.py | 2 +- 2 files changed, 71 insertions(+), 20 deletions(-) diff --git a/nova/virt/xenapi/novadeps.py b/nova/virt/xenapi/novadeps.py index 65576019e..66c8233b8 100644 --- a/nova/virt/xenapi/novadeps.py +++ b/nova/virt/xenapi/novadeps.py @@ -27,6 +27,9 @@ import string from nova import db from nova import flags from nova import context +from nova import process + +from twisted.internet import defer from nova.compute import power_state from nova.auth.manager import AuthManager @@ -193,15 +196,28 @@ class User(object): class Volume(object): + """ Wraps up volume specifics """ @classmethod - def parse_volume_info(self, device_path, mountpoint): - # Because XCP/XS want a device number instead of a mountpoint + @defer.inlineCallbacks + def parse_volume_info(cls, device_path, mountpoint): + """ + Parse device_path and mountpoint as they can be used by XenAPI. + In particular, the mountpoint (e.g. /dev/sdc) must be translated + into a numeric literal. + FIXME: As for device_path, currently cannot be used as it is, + because it does not contain target information. As for interim + solution, target details are passed either via Flags or obtained + by iscsiadm. Long-term solution is to add a few more fields to the + db in the iscsi_target table with the necessary info and modify + the iscsi driver to set them. + """ device_number = Volume.mountpoint_to_number(mountpoint) volume_id = Volume.get_volume_id(device_path) - target_host = Volume.get_target_host(device_path) - target_port = Volume.get_target_port(device_path) - target_iqn = Volume.get_iqn(device_path) + (iscsi_name, iscsi_portal) = yield Volume.get_target(volume_id) + target_host = Volume.get_target_host(iscsi_portal) + target_port = Volume.get_target_port(iscsi_portal) + target_iqn = Volume.get_iqn(iscsi_name, volume_id) if (device_number < 0) or \ (volume_id is None) or \ @@ -216,10 +232,11 @@ class Volume(object): volume_info['targetHost'] = target_host volume_info['targetPort'] = target_port volume_info['targeIQN'] = target_iqn - return volume_info + defer.returnValue(volume_info) @classmethod - def mountpoint_to_number(self, mountpoint): + def mountpoint_to_number(cls, mountpoint): + """ Translate a mountpoint like /dev/sdc into a numberic """ if mountpoint.startswith('/dev/'): mountpoint = mountpoint[5:] if re.match('^[hs]d[a-p]$', mountpoint): @@ -233,8 +250,9 @@ class Volume(object): return -1 @classmethod - def get_volume_id(self, n): - # FIXME: n must contain at least the volume_id + def get_volume_id(cls, n): + """ Retrieve the volume id from device_path """ + # n must contain at least the volume_id # /vol- is for remote volumes # -vol- is for local volumes # see compute/manager->setup_compute_volume @@ -244,19 +262,52 @@ class Volume(object): return volume_id @classmethod - def get_target_host(self, n): - # FIXME: if n is none fall back on flags - if n is None or config.target_host: + def get_target_host(cls, n): + """ Retrieve target host """ + if n: + return n[0:n.find(':')] + elif n is None or config.target_host: return config.target_host @classmethod - def get_target_port(self, n): - # FIXME: if n is none fall back on flags - return config.target_port + def get_target_port(cls, n): + """ Retrieve target port """ + if n: + return n[n.find(':') + 1:] + elif n is None or config.target_port: + return config.target_port @classmethod - def get_iqn(self, n): - # FIXME: n must contain at least the volume_id - volume_id = Volume.get_volume_id(n) - if n is None or config.iqn_prefix: + def get_iqn(cls, n, id): + """ Retrieve target IQN """ + if n: + return n + elif n is None or config.iqn_prefix: + volume_id = Volume.get_volume_id(id) return '%s:%s' % (config.iqn_prefix, volume_id) + + @classmethod + @defer.inlineCallbacks + def get_target(self, volume_id): + """ + Gets iscsi name and portal from volume name and host. + For this method to work the following are needed: + 1) volume_ref['host'] to resolve the public IP address + 2) ietd to listen only to the public network interface + If any of the two are missing, fall back on Flags + """ + volume_ref = db.volume_get_by_ec2_id(context.get_admin_context(), + volume_id) + + (r, _e) = yield process.simple_execute("sudo iscsiadm -m discovery -t " + "sendtargets -p %s" % + volume_ref['host']) + if len(_e) == 0: + for target in r.splitlines(): + if volume_id in target: + (location, _sep, iscsi_name) = target.partition(" ") + break + iscsi_portal = location.split(",")[0] + defer.returnValue((iscsi_name, iscsi_portal)) + else: + defer.returnValue((None, None)) diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index 6c48f6491..a052aaf95 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -47,7 +47,7 @@ class VolumeOps(object): instance_name, device_path, mountpoint) # Create the iSCSI SR, and the PDB through which hosts access SRs. # But first, retrieve target info, like Host, IQN, LUN and SCSIID - vol_rec = Volume.parse_volume_info(device_path, mountpoint) + vol_rec = yield Volume.parse_volume_info(device_path, mountpoint) label = 'SR-%s' % vol_rec['volumeId'] description = 'Disk-for:%s' % instance_name # Create SR -- cgit From 06c52051005f5e43a1f543e2d1c5922aa91c7918 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 6 Dec 2010 19:29:00 +0000 Subject: minor changes to docstrings --- nova/virt/xenapi/novadeps.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/nova/virt/xenapi/novadeps.py b/nova/virt/xenapi/novadeps.py index 66c8233b8..3680a5dd2 100644 --- a/nova/virt/xenapi/novadeps.py +++ b/nova/virt/xenapi/novadeps.py @@ -60,10 +60,15 @@ flags.DEFINE_float('xenapi_task_poll_interval', 'The interval used for polling of remote tasks ' '(Async.VM.start, etc). Used only if ' 'connection_type=xenapi.') -#FIXME: replace with proper target discovery -flags.DEFINE_string('target_host', None, 'iSCSI Target Host') -flags.DEFINE_string('target_port', '3260', 'iSCSI Target Port, 3260 Default') -flags.DEFINE_string('iqn_prefix', 'iqn.2010-10.org.openstack', 'IQN Prefix') +flags.DEFINE_string('target_host', + None, + 'iSCSI Target Host') +flags.DEFINE_string('target_port', + '3260', + 'iSCSI Target Port, 3260 Default') +flags.DEFINE_string('iqn_prefix', + 'iqn.2010-10.org.openstack', + 'IQN Prefix') class Configuration(object): @@ -292,9 +297,9 @@ class Volume(object): """ Gets iscsi name and portal from volume name and host. For this method to work the following are needed: - 1) volume_ref['host'] to resolve the public IP address - 2) ietd to listen only to the public network interface - If any of the two are missing, fall back on Flags + 1) volume_ref['host'] must resolve to something rather than loopback + 2) ietd must bind only to the address as resolved above + If any of the two conditions are not met, fall back on Flags. """ volume_ref = db.volume_get_by_ec2_id(context.get_admin_context(), volume_id) -- cgit From e9597d1370211de15ca96f1fa52fcbe3c9166a7e Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Tue, 7 Dec 2010 14:15:22 +0000 Subject: fixed pylint violations that slipped out from a previous check --- nova/virt/xenapi/vm_utils.py | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 762cbae83..2ee6737ab 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -29,6 +29,7 @@ from nova.auth.manager import AuthManager from nova.compute import instance_types from nova.virt import images from nova.compute import power_state +from nova.virt.xenapi.volume_utils import StorageError XENAPI_POWER_STATE = { 'Halted': power_state.SHUTDOWN, @@ -115,11 +116,13 @@ class VMHelper(): @classmethod @utils.deferredToThread - def find_vbd_by_number(self, session, vm_ref, number): + def find_vbd_by_number(cls, session, vm_ref, number): + """ Get the VBD reference from the device number """ return VMHelper.find_vbd_by_number_blocking(session, vm_ref, number) @classmethod - def find_vbd_by_number_blocking(self, session, vm_ref, number): + def find_vbd_by_number_blocking(cls, session, vm_ref, number): + """ Synchronous find_vbd_by_number """ vbds = session.get_xenapi().VM.get_VBDs(vm_ref) if vbds: for vbd in vbds: @@ -127,29 +130,31 @@ class VMHelper(): vbd_rec = session.get_xenapi().VBD.get_record(vbd) if vbd_rec['userdevice'] == str(number): return vbd - except Exception, exc: + except XenAPI.Failure, exc: logging.warn(exc) raise Exception('VBD not found in instance %s' % vm_ref) @classmethod @defer.inlineCallbacks - def unplug_vbd(self, session, vbd_ref): + def unplug_vbd(cls, session, vbd_ref): + """ Unplug VBD from VM """ try: vbd_ref = yield session.call_xenapi('VBD.unplug', vbd_ref) - except Exception, exc: + except XenAPI.Failure, exc: logging.warn(exc) if exc.details[0] != 'DEVICE_ALREADY_DETACHED': - raise Exception('Unable to unplug VBD %s' % vbd_ref) + raise StorageError('Unable to unplug VBD %s' % vbd_ref) @classmethod @defer.inlineCallbacks - def destroy_vbd(self, session, vbd_ref): + def destroy_vbd(cls, session, vbd_ref): + """ Destroy VBD from host database """ try: task = yield session.call_xenapi('Async.VBD.destroy', vbd_ref) yield session.wait_for_task(task) - except Exception, exc: + except XenAPI.Failure, exc: logging.warn(exc) - raise Exception('Unable to destroy VBD %s' % vbd_ref) + raise StorageError('Unable to destroy VBD %s' % vbd_ref) @classmethod @defer.inlineCallbacks @@ -244,6 +249,7 @@ class VMHelper(): @classmethod def compile_info(cls, record): + """ Fill record with VM status information """ return {'state': XENAPI_POWER_STATE[record['power_state']], 'max_mem': long(record['memory_static_max']) >> 10, 'mem': long(record['memory_dynamic_max']) >> 10, -- cgit From 88777c09ad909c68da8d433800cae862e9bbff4a Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Tue, 7 Dec 2010 14:26:38 +0000 Subject: and yet another pylint fix --- nova/virt/xenapi/vm_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 2ee6737ab..039e72981 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -132,7 +132,7 @@ class VMHelper(): return vbd except XenAPI.Failure, exc: logging.warn(exc) - raise Exception('VBD not found in instance %s' % vm_ref) + raise StorageError('VBD not found in instance %s' % vm_ref) @classmethod @defer.inlineCallbacks -- cgit From c0fc8a5e9e72ecb780258d9cf41b32973620eb4c Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Tue, 7 Dec 2010 15:35:56 +0000 Subject: small fixes on Exception handling --- nova/virt/xenapi/vm_utils.py | 2 +- nova/virt/xenapi/volume_utils.py | 10 +++++++--- nova/virt/xenapi/volumeops.py | 7 ++++--- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 039e72981..f29803136 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -132,7 +132,7 @@ class VMHelper(): return vbd except XenAPI.Failure, exc: logging.warn(exc) - raise StorageError('VBD not found in instance %s' % vm_ref) + raise StorageError('VBD not found in instance %s' % vm_ref) @classmethod @defer.inlineCallbacks diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 48aff7ef5..debaa6906 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -95,9 +95,13 @@ class VolumeHelper(): @defer.inlineCallbacks def find_sr_from_vbd(cls, session, vbd_ref): """ Find the SR reference from the VBD reference """ - vdi_ref = yield session.get_xenapi().VBD.get_VDI(vbd_ref) - sr_ref = yield session.get_xenapi().VDI.get_SR(vdi_ref) - defer.returnValue(sr_ref) + try: + vdi_ref = yield session.get_xenapi().VBD.get_VDI(vbd_ref) + sr_ref = yield session.get_xenapi().VDI.get_SR(vdi_ref) + defer.returnValue(sr_ref) + except XenAPI.Failure, exc: + logging.warn(exc) + raise StorageError('Unable to find SR from VBD %s' % vbd_ref) @classmethod @utils.deferredToThread diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index 4055688e3..b9f260756 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -18,6 +18,7 @@ Management class for Storage-related functions (attach, detach, etc). """ import logging +import XenAPI from twisted.internet import defer @@ -68,10 +69,10 @@ class VolumeOps(object): vm_ref, vdi_ref, vol_rec['deviceNumber'], False) - except StorageError, exc: + except XenAPI.Failure, exc: logging.warn(exc) yield VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) - raise StorageError('Unable to use SR %s for instance %s' + raise Exception('Unable to use SR %s for instance %s' % (sr_ref, instance_name)) else: @@ -79,7 +80,7 @@ class VolumeOps(object): task = yield self._session.call_xenapi('Async.VBD.plug', vbd_ref) yield self._session.wait_for_task(task) - except StorageError, exc: + except XenAPI.Failure, exc: logging.warn(exc) yield VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) -- cgit From 699ac0785240307ef2396d688e6c0a2acb446665 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Tue, 7 Dec 2010 22:22:48 +0000 Subject: pylint fixes --- nova/virt/xenapi/vm_utils.py | 2 +- nova/virt/xenapi/volume_utils.py | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 35d89d835..0549dc9fb 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -44,7 +44,7 @@ class VMHelper(): """ The class that wraps the helper methods together. """ - def __init__(self, session): + def __init__(self): return @classmethod diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 84eb82f15..051d0fe85 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -1,5 +1,4 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -from orca.scripts import self_voicing # Copyright (c) 2010 Citrix Systems, Inc. # @@ -46,7 +45,7 @@ class VolumeHelper(): """ The class that wraps the helper methods together. """ - def __init__(self, session): + def __init__(self): return @classmethod -- cgit From 03920759ac485e76c9104b4c9a1bf53231e2c47c Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Wed, 8 Dec 2010 10:22:29 +0000 Subject: Removing novaProject from the schema. This change may look odd at first; here's how it works: Both roles are projects are groupOfNames. Previously, we were differentiating projects from project roles by using the novaProject objectclass on the project, and not on the roles. This change removes novaProject, and uses the owner attribute instead of the projectManager attribute. Only projects should have an owner. We can differentiate projects from project roles by checking for the existence of this attribute. To check for the existence of an attribute in LDAP, a wildcard search is used. The fake LDAP driver did not support wildcard searches, so I put in "all or nothing" support for it. The wildcard search support doesn't work exactly like wildcard searches in LDAP, but will work for the case that's required. --- nova/auth/fakeldap.py | 3 +++ nova/auth/ldapdriver.py | 16 ++++++++-------- nova/auth/nova_openldap.schema | 16 ---------------- nova/auth/nova_sun.schema | 2 -- nova/auth/opendj.sh | 2 -- nova/auth/slap.sh | 4 +--- 6 files changed, 12 insertions(+), 31 deletions(-) diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py index 46e0135b4..2dcb69267 100644 --- a/nova/auth/fakeldap.py +++ b/nova/auth/fakeldap.py @@ -119,6 +119,9 @@ def _match(key, value, attrs): """Match a given key and value against an attribute list.""" if key not in attrs: return False + # This is a wild card search. Implemented as all or nothing for now. + if value == "*": + return True if key != "objectclass": return value in attrs[key] # it is an objectclass check, so check subclasses diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 871515663..705e89ee8 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -106,7 +106,7 @@ class LdapDriver(object): """Retrieve project by id""" dn = 'cn=%s,%s' % (pid, FLAGS.ldap_project_subtree) - attr = self.__find_object(dn, '(objectclass=novaProject)') + attr = self.__find_object(dn, '(owner=*)') return self.__to_project(attr) def get_users(self): @@ -122,7 +122,7 @@ class LdapDriver(object): def get_projects(self, uid=None): """Retrieve list of projects""" - pattern = '(objectclass=novaProject)' + pattern = '(owner=*)' if uid: pattern = "(&%s(member=%s))" % (pattern, self.__uid_to_dn(uid)) attrs = self.__find_objects(FLAGS.ldap_project_subtree, @@ -205,10 +205,10 @@ class LdapDriver(object): if not manager_dn in members: members.append(manager_dn) attr = [ - ('objectclass', ['novaProject']), + ('objectclass', ['groupOfNames']), ('cn', [name]), ('description', [description]), - ('projectManager', [manager_dn]), + ('owner', [manager_dn]), ('member', members)] self.conn.add_s('cn=%s,%s' % (name, FLAGS.ldap_project_subtree), attr) return self.__to_project(dict(attr)) @@ -224,7 +224,7 @@ class LdapDriver(object): "manager %s doesn't exist" % manager_uid) manager_dn = self.__uid_to_dn(manager_uid) - attr.append((self.ldap.MOD_REPLACE, 'projectManager', manager_dn)) + attr.append((self.ldap.MOD_REPLACE, 'owner', manager_dn)) if description: attr.append((self.ldap.MOD_REPLACE, 'description', description)) self.conn.modify_s('cn=%s,%s' % (project_id, @@ -286,7 +286,7 @@ class LdapDriver(object): project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) roles = self.__find_objects(project_dn, '(&(&(objectclass=groupOfNames)' - '(!(objectclass=novaProject)))' + '(!(owner=*)))' '(member=%s))' % self.__uid_to_dn(uid)) return [role['cn'][0] for role in roles] @@ -385,7 +385,7 @@ class LdapDriver(object): def __find_role_dns(self, tree): """Find dns of role objects in given tree""" return self.__find_dns(tree, - '(&(objectclass=groupOfNames)(!(objectclass=novaProject)))') + '(&(objectclass=groupOfNames)(!(owner=*)))') def __find_group_dns_with_member(self, tree, uid): """Find dns of group objects in a given tree that contain member""" @@ -534,7 +534,7 @@ class LdapDriver(object): return { 'id': attr['cn'][0], 'name': attr['cn'][0], - 'project_manager_id': self.__dn_to_uid(attr['projectManager'][0]), + 'project_manager_id': self.__dn_to_uid(attr['owner'][0]), 'description': attr.get('description', [None])[0], 'member_ids': [self.__dn_to_uid(x) for x in member_dns]} diff --git a/nova/auth/nova_openldap.schema b/nova/auth/nova_openldap.schema index 9e528f58b..1a10a445d 100644 --- a/nova/auth/nova_openldap.schema +++ b/nova/auth/nova_openldap.schema @@ -39,13 +39,6 @@ attributetype ( SINGLE-VALUE ) -attributetype ( - novaAttrs:5 - NAME 'projectManager' - DESC 'Project Managers of a project' - SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 - ) - objectClass ( novaOCs:1 NAME 'novaUser' @@ -53,12 +46,3 @@ objectClass ( AUXILIARY MAY ( accessKey $ secretKey $ isNovaAdmin ) ) - -objectClass ( - novaOCs:3 - NAME 'novaProject' - DESC 'Container for project' - SUP groupOfNames - STRUCTURAL - MUST ( cn $ projectManager ) - ) diff --git a/nova/auth/nova_sun.schema b/nova/auth/nova_sun.schema index decf10f06..1a04601b5 100644 --- a/nova/auth/nova_sun.schema +++ b/nova/auth/nova_sun.schema @@ -9,6 +9,4 @@ dn: cn=schema attributeTypes: ( 1.3.6.1.3.1.666.666.3.1 NAME 'accessKey' DESC 'Key for accessing data' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributeTypes: ( 1.3.6.1.3.1.666.666.3.2 NAME 'secretKey' DESC 'Secret key' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributeTypes: ( 1.3.6.1.3.1.666.666.3.4 NAME 'isNovaAdmin' DESC 'Is user a nova administrator?' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE ) -attributeTypes: ( 1.3.6.1.3.1.666.666.3.5 NAME 'projectManager' DESC 'Project Managers of a project' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 ) objectClasses: ( 1.3.6.1.3.1.666.666.4.1 NAME 'novaUser' DESC 'access and secret keys' SUP top AUXILIARY MAY ( accessKey $ secretKey $ isNovaAdmin ) ) -objectClasses: ( 1.3.6.1.3.1.666.666.4.3 NAME 'novaProject' DESC 'Container for project' SUP groupOfNames STRUCTURAL MUST ( cn $ projectManager ) ) diff --git a/nova/auth/opendj.sh b/nova/auth/opendj.sh index 8052c077d..9a9600342 100755 --- a/nova/auth/opendj.sh +++ b/nova/auth/opendj.sh @@ -30,9 +30,7 @@ fi abspath=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"` schemapath='/var/opendj/instance/config/schema' -cp $abspath/openssh-lpk_sun.schema $schemapath/97-openssh-lpk_sun.ldif cp $abspath/nova_sun.schema $schemapath/98-nova_sun.ldif -chown opendj:opendj $schemapath/97-openssh-lpk_sun.ldif chown opendj:opendj $schemapath/98-nova_sun.ldif cat >/etc/ldap/ldap.conf </dev/null; echo "$PWD"/"${0##*/}")"` -cp $abspath/openssh-lpk_openldap.schema /etc/ldap/schema/openssh-lpk_openldap.schema -cp $abspath/nova_openldap.schema /etc/ldap/schema/nova_openldap.schema +cp $abspath/nova_openldap.schema /etc/ldap/schema/nova.schema mv /etc/ldap/slapd.conf /etc/ldap/slapd.conf.orig cat >/etc/ldap/slapd.conf </etc/ldap/slapd.conf < Date: Wed, 8 Dec 2010 16:23:59 +0000 Subject: Adding support for choosing a schema version, so that users can more easily migrate from an old schema to the new schema. --- nova/auth/ldapdriver.py | 79 ++++++++++++++++++++++++------------------ nova/auth/nova_openldap.schema | 4 ++- nova/auth/nova_sun.schema | 5 +-- 3 files changed, 52 insertions(+), 36 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 705e89ee8..21d8f8065 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -32,6 +32,8 @@ from nova import flags FLAGS = flags.FLAGS +flags.DEFINE_integer('ldap_schema_version', 1, + 'Current version of the LDAP schema') flags.DEFINE_string('ldap_url', 'ldap://localhost', 'Point this at your ldap server') flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password') @@ -75,10 +77,20 @@ class LdapDriver(object): Defines enter and exit and therefore supports the with/as syntax. """ + project_pattern = '(owner=*)' + isadmin_attribute = 'isNovaAdmin' + project_attribute = 'owner' + project_objectclass = 'groupOfNames' + def __init__(self): """Imports the LDAP module""" self.ldap = __import__('ldap') self.conn = None + if FLAGS.ldap_schema_version == 1: + LdapDriver.project_pattern = '(objectclass=novaProject)' + LdapDriver.isadmin_attribute = 'isAdmin' + LdapDriver.project_attribute = 'projectManager' + LdapDriver.project_objectclass = 'novaProject' def __enter__(self): """Creates the connection to LDAP""" @@ -106,7 +118,7 @@ class LdapDriver(object): """Retrieve project by id""" dn = 'cn=%s,%s' % (pid, FLAGS.ldap_project_subtree) - attr = self.__find_object(dn, '(owner=*)') + attr = self.__find_object(dn, LdapDriver.project_pattern) return self.__to_project(attr) def get_users(self): @@ -122,7 +134,7 @@ class LdapDriver(object): def get_projects(self, uid=None): """Retrieve list of projects""" - pattern = '(owner=*)' + pattern = LdapDriver.project_pattern if uid: pattern = "(&%s(member=%s))" % (pattern, self.__uid_to_dn(uid)) attrs = self.__find_objects(FLAGS.ldap_project_subtree, @@ -152,11 +164,11 @@ class LdapDriver(object): else: attr.append((self.ldap.MOD_ADD, 'accessKey', \ [access_key])) - if 'isNovaAdmin' in user.keys(): - attr.append((self.ldap.MOD_REPLACE, 'isNovaAdmin', \ + if LdapDriver.isadmin_attribute in user.keys(): + attr.append((self.ldap.MOD_REPLACE, LdapDriver.isadmin_attribute, \ [str(is_admin).upper()])) else: - attr.append((self.ldap.MOD_ADD, 'isNovaAdmin', \ + attr.append((self.ldap.MOD_ADD, LdapDriver.isadmin_attribute, \ [str(is_admin).upper()])) self.conn.modify_s(self.__uid_to_dn(name), attr) return self.get_user(name) @@ -175,7 +187,7 @@ class LdapDriver(object): (FLAGS.ldap_user_name_attribute, [name]), ('secretKey', [secret_key]), ('accessKey', [access_key]), - ('isNovaAdmin', [str(is_admin).upper()]), + (LdapDriver.isadmin_attribute, [str(is_admin).upper()]), ] self.conn.add_s(self.__uid_to_dn(name), attr) return self.__to_user(dict(attr)) @@ -205,10 +217,10 @@ class LdapDriver(object): if not manager_dn in members: members.append(manager_dn) attr = [ - ('objectclass', ['groupOfNames']), + ('objectclass', [LdapDriver.project_objectclass]), ('cn', [name]), ('description', [description]), - ('owner', [manager_dn]), + (LdapDriver.project_attribute, [manager_dn]), ('member', members)] self.conn.add_s('cn=%s,%s' % (name, FLAGS.ldap_project_subtree), attr) return self.__to_project(dict(attr)) @@ -224,7 +236,7 @@ class LdapDriver(object): "manager %s doesn't exist" % manager_uid) manager_dn = self.__uid_to_dn(manager_uid) - attr.append((self.ldap.MOD_REPLACE, 'owner', manager_dn)) + attr.append((self.ldap.MOD_REPLACE, LdapDriver.project_attribute, manager_dn)) if description: attr.append((self.ldap.MOD_REPLACE, 'description', description)) self.conn.modify_s('cn=%s,%s' % (project_id, @@ -284,10 +296,9 @@ class LdapDriver(object): return roles else: project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) - roles = self.__find_objects(project_dn, - '(&(&(objectclass=groupOfNames)' - '(!(owner=*)))' - '(member=%s))' % self.__uid_to_dn(uid)) + query = ('(&(&(objectclass=groupOfNames)(!%s))(member=%s))' % + (LdapDriver.project_pattern, self.__uid_to_dn(uid))) + roles = self.__find_objects(project_dn, query) return [role['cn'][0] for role in roles] def delete_user(self, uid): @@ -306,9 +317,9 @@ class LdapDriver(object): if 'accessKey' in user.keys(): attr.append((self.ldap.MOD_DELETE, 'accessKey', \ user['accessKey'])) - if 'isNovaAdmin' in user.keys(): - attr.append((self.ldap.MOD_DELETE, 'isNovaAdmin', \ - user['isNovaAdmin'])) + if LdapDriver.isadmin_attribute in user.keys(): + attr.append((self.ldap.MOD_DELETE, LdapDriver.isadmin_attribute, \ + user[LdapDriver.isadmin_attribute])) self.conn.modify_s(self.__uid_to_dn(uid), attr) else: # Delete entry @@ -330,7 +341,7 @@ class LdapDriver(object): if secret_key: attr.append((self.ldap.MOD_REPLACE, 'secretKey', secret_key)) if admin is not None: - attr.append((self.ldap.MOD_REPLACE, 'isNovaAdmin', str(admin).upper())) + attr.append((self.ldap.MOD_REPLACE, LdapDriver.isadmin_attribute, str(admin).upper())) self.conn.modify_s(self.__uid_to_dn(uid), attr) def __user_exists(self, uid): @@ -384,19 +395,20 @@ class LdapDriver(object): def __find_role_dns(self, tree): """Find dns of role objects in given tree""" - return self.__find_dns(tree, - '(&(objectclass=groupOfNames)(!(owner=*)))') + query = '(&(objectclass=groupOfNames)(!%s))' % LdapDriver.project_pattern + return self.__find_dns(tree, query) def __find_group_dns_with_member(self, tree, uid): """Find dns of group objects in a given tree that contain member""" - dns = self.__find_dns(tree, - '(&(objectclass=groupOfNames)(member=%s))' % - self.__uid_to_dn(uid)) + query = ('(&(objectclass=groupOfNames)(member=%s))' % + self.__uid_to_dn(uid)) + dns = self.__find_dns(tree, query) return dns def __group_exists(self, dn): """Check if group exists""" - return self.__find_object(dn, '(objectclass=groupOfNames)') is not None + query = '(objectclass=groupOfNames)' + return self.__find_object(dn, query) is not None @staticmethod def __role_to_dn(role, project_id=None): @@ -435,7 +447,7 @@ class LdapDriver(object): """Check if user is in group""" if not self.__user_exists(uid): raise exception.NotFound("User %s can't be searched in group " - "becuase the user doesn't exist" % (uid,)) + "because the user doesn't exist" % uid) if not self.__group_exists(group_dn): return False res = self.__find_object(group_dn, @@ -447,10 +459,10 @@ class LdapDriver(object): """Add user to group""" if not self.__user_exists(uid): raise exception.NotFound("User %s can't be added to the group " - "becuase the user doesn't exist" % (uid,)) + "because the user doesn't exist" % uid) if not self.__group_exists(group_dn): raise exception.NotFound("The group at dn %s doesn't exist" % - (group_dn,)) + group_dn) if self.__is_in_group(uid, group_dn): raise exception.Duplicate("User %s is already a member of " "the group %s" % (uid, group_dn)) @@ -461,13 +473,13 @@ class LdapDriver(object): """Remove user from group""" if not self.__group_exists(group_dn): raise exception.NotFound("The group at dn %s doesn't exist" % - (group_dn,)) + group_dn) if not self.__user_exists(uid): raise exception.NotFound("User %s can't be removed from the " - "group because the user doesn't exist" % (uid,)) + "group because the user doesn't exist" % uid) if not self.__is_in_group(uid, group_dn): raise exception.NotFound("User %s is not a member of the group" % - (uid,)) + uid) # NOTE(vish): remove user from group and any sub_groups sub_dns = self.__find_group_dns_with_member( group_dn, uid) @@ -489,7 +501,7 @@ class LdapDriver(object): """Remove user from all roles and projects""" if not self.__user_exists(uid): raise exception.NotFound("User %s can't be removed from all " - "because the user doesn't exist" % (uid,)) + "because the user doesn't exist" % uid) role_dns = self.__find_group_dns_with_member( FLAGS.role_project_subtree, uid) for role_dn in role_dns: @@ -516,13 +528,13 @@ class LdapDriver(object): if attr is None: return None if ('accessKey' in attr.keys() and 'secretKey' in attr.keys() \ - and 'isNovaAdmin' in attr.keys()): + and LdapDriver.isadmin_attribute in attr.keys()): return { 'id': attr[FLAGS.ldap_user_id_attribute][0], 'name': attr[FLAGS.ldap_user_name_attribute][0], 'access': attr['accessKey'][0], 'secret': attr['secretKey'][0], - 'admin': (attr['isNovaAdmin'][0] == 'TRUE')} + 'admin': (attr[LdapDriver.isadmin_attribute][0] == 'TRUE')} else: return None @@ -534,7 +546,8 @@ class LdapDriver(object): return { 'id': attr['cn'][0], 'name': attr['cn'][0], - 'project_manager_id': self.__dn_to_uid(attr['owner'][0]), + 'project_manager_id': + self.__dn_to_uid(attr[LdapDriver.project_attribute][0]), 'description': attr.get('description', [None])[0], 'member_ids': [self.__dn_to_uid(x) for x in member_dns]} diff --git a/nova/auth/nova_openldap.schema b/nova/auth/nova_openldap.schema index 1a10a445d..daa3a8442 100644 --- a/nova/auth/nova_openldap.schema +++ b/nova/auth/nova_openldap.schema @@ -1,7 +1,9 @@ # # Person object for Nova # inetorgperson with extra attributes -# Author: Vishvananda Ishaya +# Schema version: 2 +# Authors: Vishvananda Ishaya +# Ryan Lane # # diff --git a/nova/auth/nova_sun.schema b/nova/auth/nova_sun.schema index 1a04601b5..8e9052ded 100644 --- a/nova/auth/nova_sun.schema +++ b/nova/auth/nova_sun.schema @@ -1,8 +1,9 @@ # # Person object for Nova # inetorgperson with extra attributes -# Author: Vishvananda Ishaya -# Modified for strict RFC 4512 compatibility by: Ryan Lane +# Schema version: 2 +# Authors: Vishvananda Ishaya +# Ryan Lane # # using internet experimental oid arc as per BP64 3.1 dn: cn=schema -- cgit From 5e79e5957a016c1f38fb2d126f710078a4b7f9a2 Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Wed, 8 Dec 2010 16:26:12 +0000 Subject: Setting the default schema version to the new schema --- nova/auth/ldapdriver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 21d8f8065..eac1db547 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -32,7 +32,7 @@ from nova import flags FLAGS = flags.FLAGS -flags.DEFINE_integer('ldap_schema_version', 1, +flags.DEFINE_integer('ldap_schema_version', 2, 'Current version of the LDAP schema') flags.DEFINE_string('ldap_url', 'ldap://localhost', 'Point this at your ldap server') -- cgit From 55bc83b07abc8700c2b619be6be88b348f42a4d8 Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Wed, 8 Dec 2010 16:38:35 +0000 Subject: PEP8 fixes --- nova/auth/ldapdriver.py | 41 +++++++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index eac1db547..870262a15 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -40,7 +40,8 @@ flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password') flags.DEFINE_string('ldap_user_dn', 'cn=Manager,dc=example,dc=com', 'DN of admin user') flags.DEFINE_string('ldap_user_id_attribute', 'uid', 'Attribute to use as id') -flags.DEFINE_string('ldap_user_name_attribute', 'cn', 'Attribute to use as name') +flags.DEFINE_string('ldap_user_name_attribute', 'cn', + 'Attribute to use as name') flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users') flags.DEFINE_string('ldap_user_subtree', 'ou=Users,dc=example,dc=com', 'OU for Users') @@ -153,23 +154,23 @@ class LdapDriver(object): # Malformed entries are useless, replace attributes found. attr = [] if 'secretKey' in user.keys(): - attr.append((self.ldap.MOD_REPLACE, 'secretKey', \ + attr.append((self.ldap.MOD_REPLACE, 'secretKey', [secret_key])) else: - attr.append((self.ldap.MOD_ADD, 'secretKey', \ + attr.append((self.ldap.MOD_ADD, 'secretKey', [secret_key])) if 'accessKey' in user.keys(): - attr.append((self.ldap.MOD_REPLACE, 'accessKey', \ + attr.append((self.ldap.MOD_REPLACE, 'accessKey', [access_key])) else: - attr.append((self.ldap.MOD_ADD, 'accessKey', \ + attr.append((self.ldap.MOD_ADD, 'accessKey', [access_key])) if LdapDriver.isadmin_attribute in user.keys(): - attr.append((self.ldap.MOD_REPLACE, LdapDriver.isadmin_attribute, \ - [str(is_admin).upper()])) + attr.append((self.ldap.MOD_REPLACE, + LdapDriver.isadmin_attribute, [str(is_admin).upper()])) else: - attr.append((self.ldap.MOD_ADD, LdapDriver.isadmin_attribute, \ - [str(is_admin).upper()])) + attr.append((self.ldap.MOD_ADD, + LdapDriver.isadmin_attribute, [str(is_admin).upper()])) self.conn.modify_s(self.__uid_to_dn(name), attr) return self.get_user(name) else: @@ -236,7 +237,8 @@ class LdapDriver(object): "manager %s doesn't exist" % manager_uid) manager_dn = self.__uid_to_dn(manager_uid) - attr.append((self.ldap.MOD_REPLACE, LdapDriver.project_attribute, manager_dn)) + attr.append((self.ldap.MOD_REPLACE, LdapDriver.project_attribute, + manager_dn)) if description: attr.append((self.ldap.MOD_REPLACE, 'description', description)) self.conn.modify_s('cn=%s,%s' % (project_id, @@ -312,14 +314,15 @@ class LdapDriver(object): # Retrieve user by name user = self.__get_ldap_user(uid) if 'secretKey' in user.keys(): - attr.append((self.ldap.MOD_DELETE, 'secretKey', \ - user['secretKey'])) + attr.append((self.ldap.MOD_DELETE, 'secretKey', + user['secretKey'])) if 'accessKey' in user.keys(): - attr.append((self.ldap.MOD_DELETE, 'accessKey', \ - user['accessKey'])) + attr.append((self.ldap.MOD_DELETE, 'accessKey', + user['accessKey'])) if LdapDriver.isadmin_attribute in user.keys(): - attr.append((self.ldap.MOD_DELETE, LdapDriver.isadmin_attribute, \ - user[LdapDriver.isadmin_attribute])) + attr.append((self.ldap.MOD_DELETE, + LdapDriver.isadmin_attribute, + user[LdapDriver.isadmin_attribute])) self.conn.modify_s(self.__uid_to_dn(uid), attr) else: # Delete entry @@ -341,7 +344,8 @@ class LdapDriver(object): if secret_key: attr.append((self.ldap.MOD_REPLACE, 'secretKey', secret_key)) if admin is not None: - attr.append((self.ldap.MOD_REPLACE, LdapDriver.isadmin_attribute, str(admin).upper())) + attr.append((self.ldap.MOD_REPLACE, LdapDriver.isadmin_attribute, + str(admin).upper())) self.conn.modify_s(self.__uid_to_dn(uid), attr) def __user_exists(self, uid): @@ -395,7 +399,8 @@ class LdapDriver(object): def __find_role_dns(self, tree): """Find dns of role objects in given tree""" - query = '(&(objectclass=groupOfNames)(!%s))' % LdapDriver.project_pattern + query = ('(&(objectclass=groupOfNames)(!%s))' % + LdapDriver.project_pattern) return self.__find_dns(tree, query) def __find_group_dns_with_member(self, tree, uid): -- cgit From 63006a18701ff185e6837aa2b88f001052643460 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Wed, 8 Dec 2010 18:49:28 +0000 Subject: typo fix --- nova/virt/xenapi/volume_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 051d0fe85..d247066aa 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -78,14 +78,14 @@ class VolumeHelper(): if 'chapuser' in info and 'chappassword' in info: record = {'target': info['targetHost'], 'port': info['targetPort'], - 'targetIQN': info['targeIQN'], + 'targetIQN': info['targetIQN'], 'chapuser': info['chapuser'], 'chappassword': info['chappassword'] } else: record = {'target': info['targetHost'], 'port': info['targetPort'], - 'targetIQN': info['targeIQN'] + 'targetIQN': info['targetIQN'] } try: sr_ref = session.get_xenapi().SR.create( @@ -211,7 +211,7 @@ class VolumeHelper(): volume_info['volumeId'] = volume_id volume_info['targetHost'] = target_host volume_info['targetPort'] = target_port - volume_info['targeIQN'] = target_iqn + volume_info['targetIQN'] = target_iqn defer.returnValue(volume_info) @classmethod -- cgit From 2a5ad56319dfdf75bf2eab1337032f035822f272 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Fri, 10 Dec 2010 18:37:17 +0000 Subject: There is always the odd change that one forgets! --- nova/virt/xenapi_conn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index b82862764..4ace6da14 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -65,7 +65,7 @@ from nova.virt.xenapi.volumeops import VolumeOps FLAGS = flags.FLAGS flags.DEFINE_boolean('xenapi_use_fake_session', - True, + False, 'Set to true in order to use the fake XenAPI SDK') flags.DEFINE_string('xenapi_connection_url', None, -- cgit From 8d08206cb4759328e7cf3b836eeff824e0d22052 Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Fri, 10 Dec 2010 18:49:54 +0000 Subject: Format fixes and modification of Vish's email address. --- nova/auth/ldapdriver.py | 41 ++++++++++++++++++++++------------------- nova/auth/nova_openldap.schema | 2 +- nova/auth/nova_sun.schema | 2 +- 3 files changed, 24 insertions(+), 21 deletions(-) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 870262a15..1b928e7d8 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -125,7 +125,7 @@ class LdapDriver(object): def get_users(self): """Retrieve list of users""" attrs = self.__find_objects(FLAGS.ldap_user_subtree, - '(objectclass=novaUser)') + '(objectclass=novaUser)') users = [] for attr in attrs: user = self.__to_user(attr) @@ -155,22 +155,24 @@ class LdapDriver(object): attr = [] if 'secretKey' in user.keys(): attr.append((self.ldap.MOD_REPLACE, 'secretKey', - [secret_key])) + [secret_key])) else: attr.append((self.ldap.MOD_ADD, 'secretKey', - [secret_key])) + [secret_key])) if 'accessKey' in user.keys(): attr.append((self.ldap.MOD_REPLACE, 'accessKey', - [access_key])) + [access_key])) else: attr.append((self.ldap.MOD_ADD, 'accessKey', - [access_key])) + [access_key])) if LdapDriver.isadmin_attribute in user.keys(): attr.append((self.ldap.MOD_REPLACE, - LdapDriver.isadmin_attribute, [str(is_admin).upper()])) + LdapDriver.isadmin_attribute, + [str(is_admin).upper()])) else: attr.append((self.ldap.MOD_ADD, - LdapDriver.isadmin_attribute, [str(is_admin).upper()])) + LdapDriver.isadmin_attribute, + [str(is_admin).upper()])) self.conn.modify_s(self.__uid_to_dn(name), attr) return self.get_user(name) else: @@ -299,7 +301,7 @@ class LdapDriver(object): else: project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) query = ('(&(&(objectclass=groupOfNames)(!%s))(member=%s))' % - (LdapDriver.project_pattern, self.__uid_to_dn(uid))) + (LdapDriver.project_pattern, self.__uid_to_dn(uid))) roles = self.__find_objects(project_dn, query) return [role['cn'][0] for role in roles] @@ -363,7 +365,7 @@ class LdapDriver(object): def __get_ldap_user(self, uid): """Retrieve LDAP user entry by id""" attr = self.__find_object(self.__uid_to_dn(uid), - '(objectclass=novaUser)') + '(objectclass=novaUser)') return attr def __find_object(self, dn, query=None, scope=None): @@ -406,7 +408,7 @@ class LdapDriver(object): def __find_group_dns_with_member(self, tree, uid): """Find dns of group objects in a given tree that contain member""" query = ('(&(objectclass=groupOfNames)(member=%s))' % - self.__uid_to_dn(uid)) + self.__uid_to_dn(uid)) dns = self.__find_dns(tree, query) return dns @@ -436,7 +438,8 @@ class LdapDriver(object): for member_uid in member_uids: if not self.__user_exists(member_uid): raise exception.NotFound("Group can't be created " - "because user %s doesn't exist" % member_uid) + "because user %s doesn't exist" % + member_uid) members.append(self.__uid_to_dn(member_uid)) dn = self.__uid_to_dn(uid) if not dn in members: @@ -452,7 +455,7 @@ class LdapDriver(object): """Check if user is in group""" if not self.__user_exists(uid): raise exception.NotFound("User %s can't be searched in group " - "because the user doesn't exist" % uid) + "because the user doesn't exist" % uid) if not self.__group_exists(group_dn): return False res = self.__find_object(group_dn, @@ -464,7 +467,7 @@ class LdapDriver(object): """Add user to group""" if not self.__user_exists(uid): raise exception.NotFound("User %s can't be added to the group " - "because the user doesn't exist" % uid) + "because the user doesn't exist" % uid) if not self.__group_exists(group_dn): raise exception.NotFound("The group at dn %s doesn't exist" % group_dn) @@ -481,13 +484,13 @@ class LdapDriver(object): group_dn) if not self.__user_exists(uid): raise exception.NotFound("User %s can't be removed from the " - "group because the user doesn't exist" % uid) + "group because the user doesn't exist" % + uid) if not self.__is_in_group(uid, group_dn): raise exception.NotFound("User %s is not a member of the group" % uid) # NOTE(vish): remove user from group and any sub_groups - sub_dns = self.__find_group_dns_with_member( - group_dn, uid) + sub_dns = self.__find_group_dns_with_member(group_dn, uid) for sub_dn in sub_dns: self.__safe_remove_from_group(uid, sub_dn) @@ -506,7 +509,7 @@ class LdapDriver(object): """Remove user from all roles and projects""" if not self.__user_exists(uid): raise exception.NotFound("User %s can't be removed from all " - "because the user doesn't exist" % uid) + "because the user doesn't exist" % uid) role_dns = self.__find_group_dns_with_member( FLAGS.role_project_subtree, uid) for role_dn in role_dns: @@ -564,8 +567,8 @@ class LdapDriver(object): @staticmethod def __uid_to_dn(uid): """Convert uid to dn""" - return FLAGS.ldap_user_id_attribute + '=%s,%s' \ - % (uid, FLAGS.ldap_user_subtree) + return (FLAGS.ldap_user_id_attribute + '=%s,%s' + % (uid, FLAGS.ldap_user_subtree)) class FakeLdapDriver(LdapDriver): diff --git a/nova/auth/nova_openldap.schema b/nova/auth/nova_openldap.schema index daa3a8442..539a5c42d 100644 --- a/nova/auth/nova_openldap.schema +++ b/nova/auth/nova_openldap.schema @@ -2,7 +2,7 @@ # Person object for Nova # inetorgperson with extra attributes # Schema version: 2 -# Authors: Vishvananda Ishaya +# Authors: Vishvananda Ishaya # Ryan Lane # # diff --git a/nova/auth/nova_sun.schema b/nova/auth/nova_sun.schema index 8e9052ded..4a6a78839 100644 --- a/nova/auth/nova_sun.schema +++ b/nova/auth/nova_sun.schema @@ -2,7 +2,7 @@ # Person object for Nova # inetorgperson with extra attributes # Schema version: 2 -# Authors: Vishvananda Ishaya +# Authors: Vishvananda Ishaya # Ryan Lane # # using internet experimental oid arc as per BP64 3.1 -- cgit From 669d5f5612840c9ed6449d91ee5aae97842cac72 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 13 Dec 2010 18:22:56 +0000 Subject: second round for unit testing framework --- nova/tests/virt_unittest.py | 65 +++++++++++++++++++++++++++----------- nova/virt/xenapi/__init__.py | 5 +++ nova/virt/xenapi/fake.py | 75 ++++++++++++++++++++++++++++++++++++++++++-- nova/virt/xenapi/vmops.py | 3 +- nova/virt/xenapi_conn.py | 2 +- 5 files changed, 127 insertions(+), 23 deletions(-) diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py index ba3fba83b..611022632 100644 --- a/nova/tests/virt_unittest.py +++ b/nova/tests/virt_unittest.py @@ -30,6 +30,9 @@ from nova.virt.xenapi import volume_utils FLAGS = flags.FLAGS flags.DECLARE('instances_path', 'nova.compute.manager') +# Those are XenAPI related +flags.DECLARE('target_host', 'nova.virt.xenapi_conn') +FLAGS.target_host = '127.0.0.1' class LibvirtConnTestCase(test.TrialTestCase): @@ -270,36 +273,62 @@ class XenAPIVolumeTestCase(test.TrialTestCase): self.helper = volume_utils.VolumeHelper self.helper.late_import() + def _create_volume(self, size='0'): + """Create a volume object.""" + vol = {} + vol['size'] = size + vol['user_id'] = 'fake' + vol['project_id'] = 'fake' + vol['host'] = 'localhost' + vol['availability_zone'] = FLAGS.storage_availability_zone + vol['status'] = "creating" + vol['attach_status'] = "detached" + return db.volume_create(context.get_admin_context(), vol) + def test_create_iscsi_storage_raise_no_exception(self): - info = self.helper.parse_volume_info(None, None) - label = 'SR-' - description = '' - self.helper.create_iscsi_storage_blocking(self.session, - info, - label, - description) + vol = self._create_volume() + info = yield self.helper.parse_volume_info(vol['ec2_id'], '/dev/sdc') + label = None # For testing new SRs + description = 'Test-SR' + self.session.fail_next_call = False + sr_ref = self.helper.create_iscsi_storage_blocking(self.session, + info, + label, + description) + self.assertEqual(sr_ref, self.session.SR.FAKE_REF) + db.volume_destroy(context.get_admin_context(), vol['id']) def test_create_iscsi_storage_raise_unable_to_create_sr_exception(self): - info = self.helper.parse_volume_info(None, None) - label = None + vol = self._create_volume() + info = yield self.helper.parse_volume_info(vol['ec2_id'], '/dev/sdc') + label = None # For testing new SRs description = None - self.assertFailure(self.helper.create_iscsi_storage_blocking(self.session, - info, - label, - description), - StorageError) + self.session.fail_next_call = True + self.assertRaises(volume_utils.StorageError, + self.helper.create_iscsi_storage_blocking, + self.session, + info, + label, + description) def test_find_sr_from_vbd_raise_no_exception(self): - pass + sr_ref = yield self.helper.find_sr_from_vbd(self.session, + self.session.VBD.FAKE_REF) + self.assertEqual(sr_ref, self.session.SR.FAKE_REF) - def test_destroy_iscsi_storage_raise_no_exception(self): + def test_destroy_iscsi_storage(self): pass def test_introduce_vdi_raise_no_exception(self): - pass + sr_ref = self.session.SR.FAKE_REF + self.helper.introduce_vdi_blocking(self.session, sr_ref) def test_introduce_vdi_raise_unable_get_vdi_record_exception(self): - pass + sr_ref = self.session.SR.FAKE_REF + self.session.fail_next_call = True + self.assertRaises(volume_utils.StorageError, + self.helper.introduce_vdi_blocking, + self.session, sr_ref) def tearDown(self): super(XenAPIVolumeTestCase, self).tearDown() diff --git a/nova/virt/xenapi/__init__.py b/nova/virt/xenapi/__init__.py index 3d598c463..d9abe54c5 100644 --- a/nova/virt/xenapi/__init__.py +++ b/nova/virt/xenapi/__init__.py @@ -13,3 +13,8 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + +""" +:mod:`xenapi` -- Nova support for XenServer and XCP through XenAPI +================================================================== +""" diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 75ff587e1..3a01f9c3d 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -1,4 +1,6 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 +from twisted.web.domhelpers import _get +from aptdaemon.defer import defer # Copyright (c) 2010 Citrix Systems, Inc. # @@ -33,15 +35,15 @@ class Failure(Exception): class FakeXenAPISession(object): """ The session to invoke XenAPI SDK calls """ def __init__(self): - pass + self.fail_next_call = False def get_xenapi(self): """ Return the xenapi object """ - raise NotImplementedError() + return self def get_xenapi_host(self): """ Return the xenapi host """ - raise NotImplementedError() + return 'FAKE_XENAPI_HOST' def call_xenapi(self, method, *args): """Call the specified XenAPI method on a background thread. Returns @@ -57,3 +59,70 @@ class FakeXenAPISession(object): """Return a Deferred that will give the result of the given task. The task is polled until it completes.""" raise NotImplementedError() + + def __getattr__(self, name): + return FakeXenAPIObject(name, self) + + +class FakeXenAPIObject(object): + def __init__(self, name, session): + self.name = name + self.session = session + self.FAKE_REF = 'FAKE_REFERENCE_%s' % name + + def get_by_name_label(self, label): + if label is None: + return '' # 'No object found' + else: + return 'FAKE_OBJECT_%s_%s' % (self.name, label) + + def getter(self, *args): + self._check_fail() + return self.FAKE_REF + + def ref_list(self, *args): + self._check_fail() + return [FakeXenAPIRecord()] + + def __getattr__(self, name): + if name == 'create': + return self._create + elif name == 'get_record': + return self._record + elif name == 'introduce': + return self._introduce + elif name.startswith('get_'): + getter = 'get_%s' % self.name + if name == getter: + return self.getter + else: + child = name[name.find('_') + 1:] + if child.endswith('s'): + return FakeXenAPIObject(child[:-1], self.session).ref_list + else: + return FakeXenAPIObject(child, self.session).getter + + def _create(self, *args): + self._check_fail() + return self.FAKE_REF + + def _record(self, *args): + self._check_fail() + return FakeXenAPIRecord() + + def _introduce(self, *args): + self._check_fail() + pass + + def _check_fail(self): + if self.session.fail_next_call: + self.session.fail_next_call = False # Reset! + raise Failure('Unable to create %s' % self.name) + + +class FakeXenAPIRecord(dict): + def __init__(self): + pass + + def __getitem__(self, attr): + return '' diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index a5d923a3b..9a8db0ad4 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -24,6 +24,7 @@ from twisted.internet import defer from nova import db from nova import context +from nova import exception from nova.auth.manager import AuthManager from nova.virt.xenapi.network_utils import NetworkHelper @@ -123,7 +124,7 @@ class VMOps(object): """ Return data about VM instance """ vm = VMHelper.lookup_blocking(self._session, instance_id) if vm is None: - raise Exception('instance not present %s' % instance_id) + raise exception.NotFound('Instance not found %s' % instance_id) rec = self._session.get_xenapi().VM.get_record(vm) return VMHelper.compile_info(rec) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 4ace6da14..d8d21e24d 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -91,7 +91,7 @@ flags.DEFINE_string('target_port', '3260', 'iSCSI Target Port, 3260 Default') flags.DEFINE_string('iqn_prefix', - 'iqn.2010-10.org.openstack', + 'iqn.2010-12.org.openstack', 'IQN Prefix') -- cgit From fe667352c3e25c744a989ca45f4f9ed472778ae3 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 13 Dec 2010 18:43:24 +0000 Subject: removing imports that should have not been there --- nova/tests/virt_unittest.py | 3 ++- nova/virt/xenapi/fake.py | 10 +++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py index 611022632..1095662c3 100644 --- a/nova/tests/virt_unittest.py +++ b/nova/tests/virt_unittest.py @@ -317,7 +317,8 @@ class XenAPIVolumeTestCase(test.TrialTestCase): self.assertEqual(sr_ref, self.session.SR.FAKE_REF) def test_destroy_iscsi_storage(self): - pass + sr_ref = self.session.SR.FAKE_REF + self.helper.destroy_iscsi_storage_blocking(self.session, sr_ref) def test_introduce_vdi_raise_no_exception(self): sr_ref = self.session.SR.FAKE_REF diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 3a01f9c3d..2fed28609 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -1,6 +1,4 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -from twisted.web.domhelpers import _get -from aptdaemon.defer import defer # Copyright (c) 2010 Citrix Systems, Inc. # @@ -89,8 +87,10 @@ class FakeXenAPIObject(object): return self._create elif name == 'get_record': return self._record - elif name == 'introduce': - return self._introduce + elif name == 'introduce' or\ + name == 'forget' or\ + name == 'unplug': + return self._fake_action elif name.startswith('get_'): getter = 'get_%s' % self.name if name == getter: @@ -110,7 +110,7 @@ class FakeXenAPIObject(object): self._check_fail() return FakeXenAPIRecord() - def _introduce(self, *args): + def _fake_action(self, *args): self._check_fail() pass -- cgit From e30801445f8b543d78494ca63be60f85b94d3a53 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 13 Dec 2010 20:31:33 +0000 Subject: moving xenapi unittests changes into another branch --- nova/tests/virt_unittest.py | 76 -------------------------- nova/virt/xenapi/fake.py | 128 -------------------------------------------- 2 files changed, 204 deletions(-) delete mode 100644 nova/virt/xenapi/fake.py diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py index 1095662c3..d49383fb7 100644 --- a/nova/tests/virt_unittest.py +++ b/nova/tests/virt_unittest.py @@ -25,14 +25,9 @@ from nova import utils from nova.api.ec2 import cloud from nova.auth import manager from nova.virt import libvirt_conn -from nova.virt.xenapi import fake -from nova.virt.xenapi import volume_utils FLAGS = flags.FLAGS flags.DECLARE('instances_path', 'nova.compute.manager') -# Those are XenAPI related -flags.DECLARE('target_host', 'nova.virt.xenapi_conn') -FLAGS.target_host = '127.0.0.1' class LibvirtConnTestCase(test.TrialTestCase): @@ -262,74 +257,3 @@ class NWFilterTestCase(test.TrialTestCase): d.addCallback(lambda _: self.teardown_security_group()) return d - - -class XenAPIVolumeTestCase(test.TrialTestCase): - - def setUp(self): - super(XenAPIVolumeTestCase, self).setUp() - self.flags(xenapi_use_fake_session=True) - self.session = fake.FakeXenAPISession() - self.helper = volume_utils.VolumeHelper - self.helper.late_import() - - def _create_volume(self, size='0'): - """Create a volume object.""" - vol = {} - vol['size'] = size - vol['user_id'] = 'fake' - vol['project_id'] = 'fake' - vol['host'] = 'localhost' - vol['availability_zone'] = FLAGS.storage_availability_zone - vol['status'] = "creating" - vol['attach_status'] = "detached" - return db.volume_create(context.get_admin_context(), vol) - - def test_create_iscsi_storage_raise_no_exception(self): - vol = self._create_volume() - info = yield self.helper.parse_volume_info(vol['ec2_id'], '/dev/sdc') - label = None # For testing new SRs - description = 'Test-SR' - self.session.fail_next_call = False - sr_ref = self.helper.create_iscsi_storage_blocking(self.session, - info, - label, - description) - self.assertEqual(sr_ref, self.session.SR.FAKE_REF) - db.volume_destroy(context.get_admin_context(), vol['id']) - - def test_create_iscsi_storage_raise_unable_to_create_sr_exception(self): - vol = self._create_volume() - info = yield self.helper.parse_volume_info(vol['ec2_id'], '/dev/sdc') - label = None # For testing new SRs - description = None - self.session.fail_next_call = True - self.assertRaises(volume_utils.StorageError, - self.helper.create_iscsi_storage_blocking, - self.session, - info, - label, - description) - - def test_find_sr_from_vbd_raise_no_exception(self): - sr_ref = yield self.helper.find_sr_from_vbd(self.session, - self.session.VBD.FAKE_REF) - self.assertEqual(sr_ref, self.session.SR.FAKE_REF) - - def test_destroy_iscsi_storage(self): - sr_ref = self.session.SR.FAKE_REF - self.helper.destroy_iscsi_storage_blocking(self.session, sr_ref) - - def test_introduce_vdi_raise_no_exception(self): - sr_ref = self.session.SR.FAKE_REF - self.helper.introduce_vdi_blocking(self.session, sr_ref) - - def test_introduce_vdi_raise_unable_get_vdi_record_exception(self): - sr_ref = self.session.SR.FAKE_REF - self.session.fail_next_call = True - self.assertRaises(volume_utils.StorageError, - self.helper.introduce_vdi_blocking, - self.session, sr_ref) - - def tearDown(self): - super(XenAPIVolumeTestCase, self).tearDown() diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py deleted file mode 100644 index 2fed28609..000000000 --- a/nova/virt/xenapi/fake.py +++ /dev/null @@ -1,128 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2010 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -A fake XenAPI SDK. - -Allows for xenapi helper classes testing. -""" - - -class Failure(Exception): - def __init__(self, message=None): - super(Failure, self).__init__(message) - self.details = [] - - def __str__(self): - return 'Fake XenAPI Exception' - - -class FakeXenAPISession(object): - """ The session to invoke XenAPI SDK calls """ - def __init__(self): - self.fail_next_call = False - - def get_xenapi(self): - """ Return the xenapi object """ - return self - - def get_xenapi_host(self): - """ Return the xenapi host """ - return 'FAKE_XENAPI_HOST' - - def call_xenapi(self, method, *args): - """Call the specified XenAPI method on a background thread. Returns - a Deferred for the result.""" - raise NotImplementedError() - - def async_call_plugin(self, plugin, fn, args): - """Call Async.host.call_plugin on a background thread. Returns a - Deferred with the task reference.""" - raise NotImplementedError() - - def wait_for_task(self, task): - """Return a Deferred that will give the result of the given task. - The task is polled until it completes.""" - raise NotImplementedError() - - def __getattr__(self, name): - return FakeXenAPIObject(name, self) - - -class FakeXenAPIObject(object): - def __init__(self, name, session): - self.name = name - self.session = session - self.FAKE_REF = 'FAKE_REFERENCE_%s' % name - - def get_by_name_label(self, label): - if label is None: - return '' # 'No object found' - else: - return 'FAKE_OBJECT_%s_%s' % (self.name, label) - - def getter(self, *args): - self._check_fail() - return self.FAKE_REF - - def ref_list(self, *args): - self._check_fail() - return [FakeXenAPIRecord()] - - def __getattr__(self, name): - if name == 'create': - return self._create - elif name == 'get_record': - return self._record - elif name == 'introduce' or\ - name == 'forget' or\ - name == 'unplug': - return self._fake_action - elif name.startswith('get_'): - getter = 'get_%s' % self.name - if name == getter: - return self.getter - else: - child = name[name.find('_') + 1:] - if child.endswith('s'): - return FakeXenAPIObject(child[:-1], self.session).ref_list - else: - return FakeXenAPIObject(child, self.session).getter - - def _create(self, *args): - self._check_fail() - return self.FAKE_REF - - def _record(self, *args): - self._check_fail() - return FakeXenAPIRecord() - - def _fake_action(self, *args): - self._check_fail() - pass - - def _check_fail(self): - if self.session.fail_next_call: - self.session.fail_next_call = False # Reset! - raise Failure('Unable to create %s' % self.name) - - -class FakeXenAPIRecord(dict): - def __init__(self): - pass - - def __getitem__(self, attr): - return '' -- cgit From 6e37cf42d758b5040442d9c296b21955d10a7327 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Tue, 14 Dec 2010 11:48:29 +0000 Subject: final cleanup, after moving unittest work into another branch --- nova/virt/xenapi/__init__.py | 30 ++++++++++++++++++ nova/virt/xenapi/fake.py | 66 +++++++++++++++++++++++++++++++++++++++ nova/virt/xenapi/network_utils.py | 3 +- nova/virt/xenapi/vm_utils.py | 19 ++--------- nova/virt/xenapi/vmops.py | 6 ++-- nova/virt/xenapi/volume_utils.py | 20 ++---------- nova/virt/xenapi/volumeops.py | 8 +++-- nova/virt/xenapi_conn.py | 10 ++++-- 8 files changed, 118 insertions(+), 44 deletions(-) create mode 100644 nova/virt/xenapi/fake.py diff --git a/nova/virt/xenapi/__init__.py b/nova/virt/xenapi/__init__.py index d9abe54c5..1a2903b98 100644 --- a/nova/virt/xenapi/__init__.py +++ b/nova/virt/xenapi/__init__.py @@ -18,3 +18,33 @@ :mod:`xenapi` -- Nova support for XenServer and XCP through XenAPI ================================================================== """ + + +def load_sdk(flags): + """ + This method is used for loading the XenAPI SDK (fake or real) + """ + xenapi_module = \ + flags.xenapi_use_fake_session and 'nova.virt.xenapi.fake' or 'XenAPI' + from_list = \ + flags.xenapi_use_fake_session and ['fake'] or [] + + return __import__(xenapi_module, globals(), locals(), from_list, -1) + + +class HelperBase(): + """ + The class that wraps the helper methods together. + """ + XenAPI = None + + def __init__(self): + return + + @classmethod + def late_import(cls, FLAGS): + """ + Load XenAPI module in for helper class + """ + if cls.XenAPI is None: + cls.XenAPI = load_sdk(FLAGS) diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py new file mode 100644 index 000000000..8d6a39a87 --- /dev/null +++ b/nova/virt/xenapi/fake.py @@ -0,0 +1,66 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +A fake XenAPI SDK. + +Allows for xenapi helper classes testing. +""" + + +class Failure(Exception): + def __init__(self, message=None): + super(Failure, self).__init__(message) + self.details = [] + + def __str__(self): + return 'Fake XenAPI Exception' + + +class FakeSession(object): + """ + The session to invoke XenAPI SDK calls. + FIXME(armando): this is a placeholder + for the xenapi unittests branch. + """ + def __init__(self, url): + pass + + def get_xenapi(self): + """ Return the xenapi object """ + raise NotImplementedError() + + def get_xenapi_host(self): + """ Return the xenapi host """ + raise NotImplementedError() + + def call_xenapi(self, method, *args): + """Call the specified XenAPI method on a background thread. Returns + a Deferred for the result.""" + raise NotImplementedError() + + def async_call_plugin(self, plugin, fn, args): + """Call Async.host.call_plugin on a background thread. Returns a + Deferred with the task reference.""" + raise NotImplementedError() + + def wait_for_task(self, task): + """Return a Deferred that will give the result of the given task. + The task is polled until it completes.""" + raise NotImplementedError() + + def __getattr__(self, name): + raise NotImplementedError() diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py index 8cb4cce3a..cb745cdaf 100644 --- a/nova/virt/xenapi/network_utils.py +++ b/nova/virt/xenapi/network_utils.py @@ -21,9 +21,10 @@ their lookup functions. """ from twisted.internet import defer +from nova.virt.xenapi import HelperBase -class NetworkHelper(): +class NetworkHelper(HelperBase): """ The class that wraps the helper methods together. """ diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 89831fe5d..2b0691c01 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -32,6 +32,7 @@ from nova.auth.manager import AuthManager from nova.compute import instance_types from nova.compute import power_state from nova.virt import images +from nova.virt.xenapi import HelperBase from nova.virt.xenapi.volume_utils import StorageError FLAGS = flags.FLAGS @@ -44,29 +45,13 @@ XENAPI_POWER_STATE = { 'Crashed': power_state.CRASHED} -class VMHelper(): +class VMHelper(HelperBase): """ The class that wraps the helper methods together. """ - - XenAPI = None - def __init__(self): return - @classmethod - def late_import(cls): - """ - Load XenAPI module in for helper class - """ - xenapi_module = \ - FLAGS.xenapi_use_fake_session and 'nova.virt.xenapi.fake' or 'XenAPI' - from_list = \ - FLAGS.xenapi_use_fake_session and ['fake'] or [] - if cls.XenAPI is None: - cls.XenAPI = __import__(xenapi_module, - globals(), locals(), from_list, -1) - @classmethod @defer.inlineCallbacks def create_vm(cls, session, instance, kernel, ramdisk): diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 9a8db0ad4..c79245972 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -24,9 +24,11 @@ from twisted.internet import defer from nova import db from nova import context +from nova import flags from nova import exception from nova.auth.manager import AuthManager +from nova.virt.xenapi import load_sdk from nova.virt.xenapi.network_utils import NetworkHelper from nova.virt.xenapi.vm_utils import VMHelper @@ -36,10 +38,10 @@ class VMOps(object): Management class for VM-related tasks """ def __init__(self, session): - self.XenAPI = __import__('XenAPI') + self.XenAPI = load_sdk(flags.FLAGS) self._session = session # Load XenAPI module in the helper class - VMHelper.late_import() + VMHelper.late_import(flags.FLAGS) def list_instances(self): """ List VM instances """ diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 05143ed91..4482e465c 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -30,7 +30,7 @@ from nova import context from nova import flags from nova import process from nova import utils - +from nova.virt.xenapi import HelperBase FLAGS = flags.FLAGS @@ -41,29 +41,13 @@ class StorageError(Exception): super(StorageError, self).__init__(message) -class VolumeHelper(): +class VolumeHelper(HelperBase): """ The class that wraps the helper methods together. """ - - XenAPI = None - def __init__(self): return - @classmethod - def late_import(cls): - """ - Load XenAPI module in for helper class - """ - xenapi_module = \ - FLAGS.xenapi_use_fake_session and 'nova.virt.xenapi.fake' or 'XenAPI' - from_list = \ - FLAGS.xenapi_use_fake_session and ['fake'] or [] - if cls.XenAPI is None: - cls.XenAPI = __import__(xenapi_module, - globals(), locals(), from_list, -1) - @classmethod @utils.deferredToThread def create_iscsi_storage(cls, session, info, label, description): diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index b1afdc811..1b337a6ed 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -21,6 +21,8 @@ import logging from twisted.internet import defer +from nova import flags +from nova.virt.xenapi import load_sdk from nova.virt.xenapi.vm_utils import VMHelper from nova.virt.xenapi.volume_utils import VolumeHelper from nova.virt.xenapi.volume_utils import StorageError @@ -31,11 +33,11 @@ class VolumeOps(object): Management class for Volume-related tasks """ def __init__(self, session): - self.XenAPI = __import__('XenAPI') + self.XenAPI = load_sdk(flags.FLAGS) self._session = session # Load XenAPI module in the helper classes respectively - VolumeHelper.late_import() - VMHelper.late_import() + VolumeHelper.late_import(flags.FLAGS) + VMHelper.late_import(flags.FLAGS) @defer.inlineCallbacks def attach_volume(self, instance_name, device_path, mountpoint): diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index d8d21e24d..649d5dd04 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -59,6 +59,7 @@ from twisted.internet import reactor from nova import utils from nova import flags +from nova.virt.xenapi import load_sdk from nova.virt.xenapi.vmops import VMOps from nova.virt.xenapi.volumeops import VolumeOps @@ -91,7 +92,7 @@ flags.DEFINE_string('target_port', '3260', 'iSCSI Target Port, 3260 Default') flags.DEFINE_string('iqn_prefix', - 'iqn.2010-12.org.openstack', + 'iqn.2010-10.org.openstack', 'IQN Prefix') @@ -156,8 +157,11 @@ class XenAPISession(object): def __init__(self, url, user, pw): # This is loaded late so that there's no need to install this # library when not using XenAPI. - self.XenAPI = __import__('XenAPI') - self._session = self.XenAPI.Session(url) + self.XenAPI = load_sdk(FLAGS) + if FLAGS.xenapi_use_fake_session: + self._session = self.XenAPI.FakeSession(url) + else: + self._session = self.XenAPI.Session(url) self._session.login_with_password(user, pw) def get_xenapi(self): -- cgit From dada56794679b213b2d80e4e1f907a212b73f54e Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Wed, 15 Dec 2010 17:50:05 +0000 Subject: * code cleanup * revised unittest approach * added stubout and a number of tests --- nova/tests/xenapi_unittest.py | 136 ++++++++++++++++++++++++++++++++++++++---- nova/virt/xenapi/__init__.py | 22 +------ nova/virt/xenapi/fake.py | 40 +------------ nova/virt/xenapi/vmops.py | 6 +- nova/virt/xenapi/volumeops.py | 7 +-- nova/virt/xenapi_conn.py | 24 ++++---- 6 files changed, 146 insertions(+), 89 deletions(-) diff --git a/nova/tests/xenapi_unittest.py b/nova/tests/xenapi_unittest.py index c9be17327..b9955a946 100644 --- a/nova/tests/xenapi_unittest.py +++ b/nova/tests/xenapi_unittest.py @@ -31,7 +31,7 @@ # under the License. -import mox +import stubout import uuid from twisted.internet import defer @@ -51,20 +51,34 @@ from nova.virt.xenapi import fake from nova.virt.xenapi import volume_utils from nova.virt.xenapi import vm_utils from nova.virt.xenapi import volumeops +from boto.ec2.volume import Volume FLAGS = flags.FLAGS +def stubout_session(stubs, cls): + def fake_import(self): + fake_module = 'nova.virt.xenapi.fake' + from_list = ['fake'] + return __import__(fake_module, globals(), locals(), from_list, -1) + + stubs.Set(xenapi_conn.XenAPISession, '_create_session', + lambda s, url: cls(url)) + stubs.Set(xenapi_conn.XenAPISession, 'get_imported_xenapi', + fake_import) + + class XenAPIVolumeTestCase(test.TrialTestCase): """ - This uses Ewan's fake session approach + Unit tests for VM operations """ def setUp(self): super(XenAPIVolumeTestCase, self).setUp() - FLAGS.xenapi_use_fake_session = True + self.stubs = stubout.StubOutForTesting() FLAGS.target_host = '127.0.0.1' FLAGS.xenapi_connection_url = 'test_url' FLAGS.xenapi_connection_password = 'test_pass' + fake.reset() def _create_volume(self, size='0'): """Create a volume object.""" @@ -78,11 +92,12 @@ class XenAPIVolumeTestCase(test.TrialTestCase): vol['attach_status'] = "detached" return db.volume_create(context.get_admin_context(), vol) - def test_create_iscsi_storage_raise_no_exception(self): - fake.reset() + def test_create_iscsi_storage(self): + """ This shows how to test helper classes' methods """ + stubout_session(self.stubs, FakeSessionForVolumeTests) session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass') helper = volume_utils.VolumeHelper - helper.late_import(FLAGS) + helper.XenAPI = session.get_imported_xenapi() vol = self._create_volume() info = yield helper.parse_volume_info(vol['ec2_id'], '/dev/sdc') label = 'SR-%s' % vol['ec2_id'] @@ -93,8 +108,25 @@ class XenAPIVolumeTestCase(test.TrialTestCase): description) db.volume_destroy(context.get_admin_context(), vol['id']) + def test_parse_volume_info_raise_exception(self): + """ This shows how to test helper classes' methods """ + stubout_session(self.stubs, FakeSessionForVolumeTests) + session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass') + helper = volume_utils.VolumeHelper + helper.XenAPI = session.get_imported_xenapi() + vol = self._create_volume() + # oops, wrong mount point! + info = helper.parse_volume_info(vol['ec2_id'], '/dev/sd') + + def check(exc): + self.assertIsInstance(exc.value, volume_utils.StorageError) + + info.addErrback(check) + db.volume_destroy(context.get_admin_context(), vol['id']) + def test_attach_volume(self): - fake.reset() + """ This shows how to test Ops classes' methods """ + stubout_session(self.stubs, FakeSessionForVolumeTests) conn = xenapi_conn.get_connection(False) volume = self._create_volume() instance = FakeInstance(1, 'fake', 'fake', 1, 2, 3, @@ -116,13 +148,34 @@ class XenAPIVolumeTestCase(test.TrialTestCase): result.addCallback(check) return result + def test_attach_volume_raise_exception(self): + """ This shows how to test when exceptions are raised """ + stubout_session(self.stubs, FakeSessionForVolumeFailedTests) + conn = xenapi_conn.get_connection(False) + volume = self._create_volume() + instance = FakeInstance(1, 'fake', 'fake', 1, 2, 3, + 'm1.large', 'aa:bb:cc:dd:ee:ff') + fake.create_vm(instance.name, 'Running') + result = conn.attach_volume(instance.name, volume['ec2_id'], + '/dev/sdc') + + def check(exc): + if exc: + pass + else: + self.fail('Oops, no exception has been raised!') + + result.addErrback(check) + return result + def tearDown(self): super(XenAPIVolumeTestCase, self).tearDown() + self.stubs.UnsetAll() class XenAPIVMTestCase(test.TrialTestCase): """ - This uses Ewan's fake session approach + Unit tests for VM operations """ def setUp(self): super(XenAPIVMTestCase, self).setUp() @@ -131,19 +184,20 @@ class XenAPIVMTestCase(test.TrialTestCase): admin=True) self.project = self.manager.create_project('fake', 'fake', 'fake') self.network = utils.import_object(FLAGS.network_manager) - FLAGS.xenapi_use_fake_session = True + self.stubs = stubout.StubOutForTesting() FLAGS.xenapi_connection_url = 'test_url' FLAGS.xenapi_connection_password = 'test_pass' fake.reset() fake.create_network('fake', FLAGS.flat_network_bridge) def test_list_instances_0(self): + stubout_session(self.stubs, FakeSessionForVMTests) conn = xenapi_conn.get_connection(False) instances = conn.list_instances() self.assertEquals(instances, []) - #test_list_instances_0.skip = "E" def test_spawn(self): + stubout_session(self.stubs, FakeSessionForVMTests) conn = xenapi_conn.get_connection(False) instance = FakeInstance(1, self.project.id, self.user.id, 1, 2, 3, 'm1.large', 'aa:bb:cc:dd:ee:ff') @@ -186,6 +240,7 @@ class XenAPIVMTestCase(test.TrialTestCase): super(XenAPIVMTestCase, self).tearDown() self.manager.delete_project(self.project) self.manager.delete_user(self.user) + self.stubs.UnsetAll() class FakeInstance(): @@ -199,3 +254,64 @@ class FakeInstance(): self.ramdisk_id = ramdisk_id self.instance_type = instance_type self.mac_address = mac_address + + +class FakeSessionForVMTests(fake.SessionBase): + def __init__(self, uri): + super(FakeSessionForVMTests, self).__init__(uri) + + def network_get_all_records_where(self, _1, _2): + return self.xenapi.network.get_all_records() + + def host_call_plugin(self, _1, _2, _3, _4, _5): + return '' + + def VM_start(self, _1, ref, _2, _3): + vm = fake.get_record('VM', ref) + if vm['power_state'] != 'Halted': + raise fake.Failure(['VM_BAD_POWER_STATE', ref, 'Halted', + vm['power_state']]) + vm['power_state'] = 'Running' + + +class FakeSessionForVolumeTests(fake.SessionBase): + def __init__(self, uri): + super(FakeSessionForVolumeTests, self).__init__(uri) + + def VBD_plug(self, _1, _2): + #FIXME(armando):make proper plug + pass + + def PBD_unplug(self, _1, _2): + #FIXME(armando):make proper unplug + pass + + def SR_forget(self, _1, _2): + #FIXME(armando):make proper forget + pass + + def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, + _6, _7, _8, _9, _10, _11): + #FIXME(armando):make proper introduce + valid_vdi = False + refs = fake.get_all('VDI') + for ref in refs: + rec = fake.get_record('VDI', ref) + if rec['uuid'] == uuid: + valid_vdi = True + if not valid_vdi: + raise fake.Failure([['INVALID_VDI', 'session', self._session]]) + + +class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests): + def __init__(self, uri): + super(FakeSessionForVolumeFailedTests, self).__init__(uri) + + def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, + _6, _7, _8, _9, _10, _11): + # test failure + raise fake.Failure([['INVALID_VDI', 'session', self._session]]) + + def VBD_plug(self, _1, _2): + # test failure + raise fake.Failure([['INVALID_VBD', 'session', self._session]]) diff --git a/nova/virt/xenapi/__init__.py b/nova/virt/xenapi/__init__.py index 1a2903b98..c7038deae 100644 --- a/nova/virt/xenapi/__init__.py +++ b/nova/virt/xenapi/__init__.py @@ -20,31 +20,11 @@ """ -def load_sdk(flags): - """ - This method is used for loading the XenAPI SDK (fake or real) - """ - xenapi_module = \ - flags.xenapi_use_fake_session and 'nova.virt.xenapi.fake' or 'XenAPI' - from_list = \ - flags.xenapi_use_fake_session and ['fake'] or [] - - return __import__(xenapi_module, globals(), locals(), from_list, -1) - - class HelperBase(): """ - The class that wraps the helper methods together. + The base for helper classes. This adds the XenAPI class attribute """ XenAPI = None def __init__(self): return - - @classmethod - def late_import(cls, FLAGS): - """ - Load XenAPI module in for helper class - """ - if cls.XenAPI is None: - cls.XenAPI = load_sdk(FLAGS) diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index f5fea3cc2..038064e8e 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -154,7 +154,7 @@ class Failure(Exception): def __str__(self): try: return str(self.details) - except Exception, exn: + except Exception, exc: return "XenAPI Fake Failure: %s" % str(self.details) def _details_map(self): @@ -324,8 +324,8 @@ class SessionBase(object): try: task['result'] = self.xenapi_request(func, params[1:]) task['status'] = 'success' - except Failure, exn: - task['error_info'] = exn.details + except Failure, exc: + task['error_info'] = exc.details task['status'] = 'failed' task['finished'] = datetime.datetime.now() return task_ref @@ -372,37 +372,3 @@ class _Dispatcher: def __call__(self, *args): return self.__send(self.__name, args) - - -class FakeSession(SessionBase): - def __init__(self, uri): - super(FakeSession, self).__init__(uri) - - def network_get_all_records_where(self, _1, _2): - return self.xenapi.network.get_all_records() - - def host_call_plugin(self, _1, _2, _3, _4, _5): - return '' - - def VM_start(self, _1, ref, _2, _3): - vm = get_record('VM', ref) - if vm['power_state'] != 'Halted': - raise Failure(['VM_BAD_POWER_STATE', ref, 'Halted', - vm['power_state']]) - vm['power_state'] = 'Running' - - def VBD_plug(self, _1, _2): - #FIXME(armando):make proper plug - pass - - def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, - _6, _7, _8, _9, _10, _11): - #FIXME(armando):make proper introduce - valid_vdi = False - refs = get_all('VDI') - for ref in refs: - rec = get_record('VDI', ref) - if rec['uuid'] == uuid: - valid_vdi = True - if not valid_vdi: - raise Failure([['INVALID_VDI', 'session', self._session]]) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 36b8fecc2..abaa1f5f1 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -28,7 +28,6 @@ from nova import flags from nova import exception from nova.auth.manager import AuthManager -from nova.virt.xenapi import load_sdk from nova.virt.xenapi.network_utils import NetworkHelper from nova.virt.xenapi.vm_utils import VMHelper @@ -38,10 +37,9 @@ class VMOps(object): Management class for VM-related tasks """ def __init__(self, session): - self.XenAPI = load_sdk(flags.FLAGS) + self.XenAPI = session.get_imported_xenapi() self._session = session - # Load XenAPI module in the helper class - VMHelper.late_import(flags.FLAGS) + VMHelper.XenAPI = self.XenAPI def list_instances(self): """ List VM instances """ diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index 1b337a6ed..68806c4c2 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -22,7 +22,6 @@ import logging from twisted.internet import defer from nova import flags -from nova.virt.xenapi import load_sdk from nova.virt.xenapi.vm_utils import VMHelper from nova.virt.xenapi.volume_utils import VolumeHelper from nova.virt.xenapi.volume_utils import StorageError @@ -33,11 +32,11 @@ class VolumeOps(object): Management class for Volume-related tasks """ def __init__(self, session): - self.XenAPI = load_sdk(flags.FLAGS) + self.XenAPI = session.get_imported_xenapi() self._session = session # Load XenAPI module in the helper classes respectively - VolumeHelper.late_import(flags.FLAGS) - VMHelper.late_import(flags.FLAGS) + VolumeHelper.XenAPI = self.XenAPI + VMHelper.XenAPI = self.XenAPI @defer.inlineCallbacks def attach_volume(self, instance_name, device_path, mountpoint): diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 5f2b9c7c6..a7e3a6723 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -36,7 +36,6 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block. **Related Flags** -:xenapi_use_fake_session: To be set for unit testing :xenapi_connection_url: URL for connection to XenServer/Xen Cloud Platform. :xenapi_connection_username: Username for connection to XenServer/Xen Cloud Platform (default: root). @@ -59,15 +58,11 @@ from twisted.internet import reactor from nova import utils from nova import flags -from nova.virt.xenapi import load_sdk from nova.virt.xenapi.vmops import VMOps from nova.virt.xenapi.volumeops import VolumeOps FLAGS = flags.FLAGS -flags.DEFINE_boolean('xenapi_use_fake_session', - False, - 'Set to true in order to use the fake XenAPI SDK') flags.DEFINE_string('xenapi_connection_url', None, 'URL for connection to XenServer/Xen Cloud Platform.' @@ -159,15 +154,14 @@ class XenAPIConnection(object): class XenAPISession(object): """ The session to invoke XenAPI SDK calls """ def __init__(self, url, user, pw): - # This is loaded late so that there's no need to install this - # library when not using XenAPI. - self.XenAPI = load_sdk(FLAGS) - if FLAGS.xenapi_use_fake_session: - self._session = self.XenAPI.FakeSession(url) - else: - self._session = self.XenAPI.Session(url) + self.XenAPI = self.get_imported_xenapi() + self._session = self._create_session(url) self._session.login_with_password(user, pw) + def get_imported_xenapi(self): + """Stubout point. This can be replaced with a mock xenapi module.""" + return __import__('XenAPI') + def get_xenapi(self): """ Return the xenapi object """ return self._session.xenapi @@ -200,6 +194,10 @@ class XenAPISession(object): reactor.callLater(0, self._poll_task, task, d) return d + def _create_session(self, url): + """Stubout point. This can be replaced with a mock session.""" + return self.XenAPI.Session(url) + @utils.deferredToThread def _poll_task(self, task, deferred): """Poll the given XenAPI task, and fire the given Deferred if we @@ -220,7 +218,7 @@ class XenAPISession(object): error_info) deferred.errback(self.XenAPI.Failure(error_info)) #logging.debug('Polling task %s done.', task) - except self.XenAPI.Failure, exc: + except Exception, exc: logging.warn(exc) deferred.errback(exc) -- cgit From e893be0a8d32cf1eb2c91187b81a6febf90e5b7c Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Wed, 15 Dec 2010 18:28:00 +0000 Subject: Adding back in openssh-lpk schema, as keys will likely be stored in LDAP again. --- nova/auth/opendj.sh | 1 + nova/auth/openssh-lpk_openldap.schema | 19 +++++++++++++++++++ nova/auth/openssh-lpk_sun.schema | 10 ++++++++++ nova/auth/slap.sh | 1 + 4 files changed, 31 insertions(+) create mode 100644 nova/auth/openssh-lpk_openldap.schema create mode 100644 nova/auth/openssh-lpk_sun.schema diff --git a/nova/auth/opendj.sh b/nova/auth/opendj.sh index 9a9600342..1a280e5a8 100755 --- a/nova/auth/opendj.sh +++ b/nova/auth/opendj.sh @@ -30,6 +30,7 @@ fi abspath=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"` schemapath='/var/opendj/instance/config/schema' +cp $abspath/openssh-lpk_sun.schema $schemapath/97-openssh-lpk_sun.ldif cp $abspath/nova_sun.schema $schemapath/98-nova_sun.ldif chown opendj:opendj $schemapath/98-nova_sun.ldif diff --git a/nova/auth/openssh-lpk_openldap.schema b/nova/auth/openssh-lpk_openldap.schema new file mode 100644 index 000000000..93351da6d --- /dev/null +++ b/nova/auth/openssh-lpk_openldap.schema @@ -0,0 +1,19 @@ +# +# LDAP Public Key Patch schema for use with openssh-ldappubkey +# Author: Eric AUGE +# +# Based on the proposal of : Mark Ruijter +# + + +# octetString SYNTAX +attributetype ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey' + DESC 'MANDATORY: OpenSSH Public key' + EQUALITY octetStringMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 ) + +# printableString SYNTAX yes|no +objectclass ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY + DESC 'MANDATORY: OpenSSH LPK objectclass' + MAY ( sshPublicKey $ uid ) + ) diff --git a/nova/auth/openssh-lpk_sun.schema b/nova/auth/openssh-lpk_sun.schema new file mode 100644 index 000000000..5f52db3b6 --- /dev/null +++ b/nova/auth/openssh-lpk_sun.schema @@ -0,0 +1,10 @@ +# +# LDAP Public Key Patch schema for use with openssh-ldappubkey +# Author: Eric AUGE +# +# Schema for Sun Directory Server. +# Based on the original schema, modified by Stefan Fischer. +# +dn: cn=schema +attributeTypes: ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey' DESC 'MANDATORY: OpenSSH Public key' EQUALITY octetStringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 ) +objectClasses: ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY DESC 'MANDATORY: OpenSSH LPK objectclass' MAY ( sshPublicKey $ uid ) ) diff --git a/nova/auth/slap.sh b/nova/auth/slap.sh index 36c4ba37b..95c61dafd 100755 --- a/nova/auth/slap.sh +++ b/nova/auth/slap.sh @@ -21,6 +21,7 @@ apt-get install -y slapd ldap-utils python-ldap abspath=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"` +cp $abspath/openssh-lpk_openldap.schema /etc/ldap/schema/openssh-lpk_openldap.schema cp $abspath/nova_openldap.schema /etc/ldap/schema/nova.schema mv /etc/ldap/slapd.conf /etc/ldap/slapd.conf.orig -- cgit From a4db44b94e611798d57ad59f4d4dbb5fb00516db Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 16 Dec 2010 16:33:38 +0000 Subject: Removed FakeInstance and introduced stubout for DB. Code clean-up --- nova/tests/db/__init__.py | 33 +++++++++ nova/tests/db/fakes.py | 61 ++++++++++++++++ nova/tests/xenapi/__init__.py | 33 +++++++++ nova/tests/xenapi/stubs.py | 98 +++++++++++++++++++++++++ nova/tests/xenapi_unittest.py | 165 +++++++++++------------------------------- 5 files changed, 267 insertions(+), 123 deletions(-) create mode 100644 nova/tests/db/__init__.py create mode 100644 nova/tests/db/fakes.py create mode 100644 nova/tests/xenapi/__init__.py create mode 100644 nova/tests/xenapi/stubs.py diff --git a/nova/tests/db/__init__.py b/nova/tests/db/__init__.py new file mode 100644 index 000000000..a157d7592 --- /dev/null +++ b/nova/tests/db/__init__.py @@ -0,0 +1,33 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Stubouts, mocks and fixtures for the test suite""" diff --git a/nova/tests/db/fakes.py b/nova/tests/db/fakes.py new file mode 100644 index 000000000..b3fb56c69 --- /dev/null +++ b/nova/tests/db/fakes.py @@ -0,0 +1,61 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack, LLC +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Stubouts, mocks and fixtures for the test suite""" + +import time + +from nova import db +from nova import utils +from nova.compute import instance_types + + +def stub_out_db_instance_api(stubs): + """ Stubs out the db API for creating Instances """ + + class FakeInstance(object): + """ Stubs out the Instance model """ + def __init__(self, values): + self.values = values + + def __getattr__(self, name): + return self.values[name] + + def fake_create(values): + """ Stubs out the db.instance_create method """ + + type_data = instance_types.INSTANCE_TYPES[values['instance_type']] + + base_options = { + 'name': values['name'], + 'reservation_id': utils.generate_uid('r'), + 'image_id': values['image_id'], + 'kernel_id': values['kernel_id'], + 'ramdisk_id': values['ramdisk_id'], + 'state_description': 'scheduling', + 'user_id': values['user_id'], + 'project_id': values['project_id'], + 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), + 'instance_type': values['instance_type'], + 'memory_mb': type_data['memory_mb'], + 'mac_address': values['mac_address'], + 'vcpus': type_data['vcpus'], + 'local_gb': type_data['local_gb'], + } + return FakeInstance(base_options) + + stubs.Set(db, 'instance_create', fake_create) diff --git a/nova/tests/xenapi/__init__.py b/nova/tests/xenapi/__init__.py new file mode 100644 index 000000000..a157d7592 --- /dev/null +++ b/nova/tests/xenapi/__init__.py @@ -0,0 +1,33 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Stubouts, mocks and fixtures for the test suite""" diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py new file mode 100644 index 000000000..525189388 --- /dev/null +++ b/nova/tests/xenapi/stubs.py @@ -0,0 +1,98 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Stubouts, mocks and fixtures for the test suite""" + +from nova.virt import xenapi_conn +from nova.virt.xenapi import fake + + +def stubout_session(stubs, cls): + """ Stubs out two methods from XenAPISession """ + def fake_import(self): + """ Stubs out get_imported_xenapi of XenAPISession """ + fake_module = 'nova.virt.xenapi.fake' + from_list = ['fake'] + return __import__(fake_module, globals(), locals(), from_list, -1) + + stubs.Set(xenapi_conn.XenAPISession, '_create_session', + lambda s, url: cls(url)) + stubs.Set(xenapi_conn.XenAPISession, 'get_imported_xenapi', + fake_import) + + +class FakeSessionForVMTests(fake.SessionBase): + """ Stubs out a XenAPISession for VM tests """ + def __init__(self, uri): + super(FakeSessionForVMTests, self).__init__(uri) + + def network_get_all_records_where(self, _1, _2): + return self.xenapi.network.get_all_records() + + def host_call_plugin(self, _1, _2, _3, _4, _5): + return '' + + def VM_start(self, _1, ref, _2, _3): + vm = fake.get_record('VM', ref) + if vm['power_state'] != 'Halted': + raise fake.Failure(['VM_BAD_POWER_STATE', ref, 'Halted', + vm['power_state']]) + vm['power_state'] = 'Running' + + +class FakeSessionForVolumeTests(fake.SessionBase): + """ Stubs out a XenAPISession for Volume tests """ + def __init__(self, uri): + super(FakeSessionForVolumeTests, self).__init__(uri) + + def VBD_plug(self, _1, _2): + #FIXME(armando):make proper plug + pass + + def PBD_unplug(self, _1, _2): + #FIXME(armando):make proper unplug + pass + + def SR_forget(self, _1, _2): + #FIXME(armando):make proper forget + pass + + def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, + _6, _7, _8, _9, _10, _11): + #FIXME(armando):make proper introduce + valid_vdi = False + refs = fake.get_all('VDI') + for ref in refs: + rec = fake.get_record('VDI', ref) + if rec['uuid'] == uuid: + valid_vdi = True + if not valid_vdi: + raise fake.Failure([['INVALID_VDI', 'session', self._session]]) + + +class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests): + """ Stubs out a XenAPISession for Volume tests: it injects failures """ + def __init__(self, uri): + super(FakeSessionForVolumeFailedTests, self).__init__(uri) + + def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, + _6, _7, _8, _9, _10, _11): + # This is for testing failure + raise fake.Failure([['INVALID_VDI', 'session', self._session]]) + + def VBD_plug(self, _1, _2): + # This is for testing failure + raise fake.Failure([['INVALID_VBD', 'session', self._session]]) diff --git a/nova/tests/xenapi_unittest.py b/nova/tests/xenapi_unittest.py index b9955a946..c2612a4c5 100644 --- a/nova/tests/xenapi_unittest.py +++ b/nova/tests/xenapi_unittest.py @@ -1,21 +1,5 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright (c) 2010 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# # Copyright (c) 2010 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -30,16 +14,14 @@ # License for the specific language governing permissions and limitations # under the License. +""" +Test suite for XenAPI +""" import stubout -import uuid - -from twisted.internet import defer -from twisted.internet import threads from nova import db from nova import context -from nova import exception from nova import flags from nova import test from nova import utils @@ -49,28 +31,15 @@ from nova.compute import power_state from nova.virt import xenapi_conn from nova.virt.xenapi import fake from nova.virt.xenapi import volume_utils -from nova.virt.xenapi import vm_utils -from nova.virt.xenapi import volumeops -from boto.ec2.volume import Volume +from nova.tests.db import fakes +from nova.tests.xenapi import stubs FLAGS = flags.FLAGS -def stubout_session(stubs, cls): - def fake_import(self): - fake_module = 'nova.virt.xenapi.fake' - from_list = ['fake'] - return __import__(fake_module, globals(), locals(), from_list, -1) - - stubs.Set(xenapi_conn.XenAPISession, '_create_session', - lambda s, url: cls(url)) - stubs.Set(xenapi_conn.XenAPISession, 'get_imported_xenapi', - fake_import) - - class XenAPIVolumeTestCase(test.TrialTestCase): """ - Unit tests for VM operations + Unit tests for Volume operations """ def setUp(self): super(XenAPIVolumeTestCase, self).setUp() @@ -78,7 +47,17 @@ class XenAPIVolumeTestCase(test.TrialTestCase): FLAGS.target_host = '127.0.0.1' FLAGS.xenapi_connection_url = 'test_url' FLAGS.xenapi_connection_password = 'test_pass' + fakes.stub_out_db_instance_api(self.stubs) fake.reset() + self.values = {'name': 1, + 'project_id': 'fake', + 'user_id': 'fake', + 'image_id': 1, + 'kernel_id': 2, + 'ramdisk_id': 3, + 'instance_type': 'm1.large', + 'mac_address': 'aa:bb:cc:dd:ee:ff', + } def _create_volume(self, size='0'): """Create a volume object.""" @@ -94,7 +73,7 @@ class XenAPIVolumeTestCase(test.TrialTestCase): def test_create_iscsi_storage(self): """ This shows how to test helper classes' methods """ - stubout_session(self.stubs, FakeSessionForVolumeTests) + stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests) session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass') helper = volume_utils.VolumeHelper helper.XenAPI = session.get_imported_xenapi() @@ -106,11 +85,13 @@ class XenAPIVolumeTestCase(test.TrialTestCase): info, label, description) + srs = fake.get_all('SR') + self.assertEqual(sr_ref, srs[0]) db.volume_destroy(context.get_admin_context(), vol['id']) def test_parse_volume_info_raise_exception(self): """ This shows how to test helper classes' methods """ - stubout_session(self.stubs, FakeSessionForVolumeTests) + stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests) session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass') helper = volume_utils.VolumeHelper helper.XenAPI = session.get_imported_xenapi() @@ -119,6 +100,7 @@ class XenAPIVolumeTestCase(test.TrialTestCase): info = helper.parse_volume_info(vol['ec2_id'], '/dev/sd') def check(exc): + """ handler """ self.assertIsInstance(exc.value, volume_utils.StorageError) info.addErrback(check) @@ -126,16 +108,16 @@ class XenAPIVolumeTestCase(test.TrialTestCase): def test_attach_volume(self): """ This shows how to test Ops classes' methods """ - stubout_session(self.stubs, FakeSessionForVolumeTests) + stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests) conn = xenapi_conn.get_connection(False) volume = self._create_volume() - instance = FakeInstance(1, 'fake', 'fake', 1, 2, 3, - 'm1.large', 'aa:bb:cc:dd:ee:ff') + instance = db.instance_create(self.values) fake.create_vm(instance.name, 'Running') result = conn.attach_volume(instance.name, volume['ec2_id'], '/dev/sdc') def check(_): + """ handler """ # check that the VM has a VBD attached to it # Get XenAPI reference for the VM vms = fake.get_all('VM') @@ -150,16 +132,17 @@ class XenAPIVolumeTestCase(test.TrialTestCase): def test_attach_volume_raise_exception(self): """ This shows how to test when exceptions are raised """ - stubout_session(self.stubs, FakeSessionForVolumeFailedTests) + stubs.stubout_session(self.stubs, + stubs.FakeSessionForVolumeFailedTests) conn = xenapi_conn.get_connection(False) volume = self._create_volume() - instance = FakeInstance(1, 'fake', 'fake', 1, 2, 3, - 'm1.large', 'aa:bb:cc:dd:ee:ff') + instance = db.instance_create(self.values) fake.create_vm(instance.name, 'Running') result = conn.attach_volume(instance.name, volume['ec2_id'], '/dev/sdc') def check(exc): + """ handler """ if exc: pass else: @@ -188,22 +171,32 @@ class XenAPIVMTestCase(test.TrialTestCase): FLAGS.xenapi_connection_url = 'test_url' FLAGS.xenapi_connection_password = 'test_pass' fake.reset() + fakes.stub_out_db_instance_api(self.stubs) fake.create_network('fake', FLAGS.flat_network_bridge) def test_list_instances_0(self): - stubout_session(self.stubs, FakeSessionForVMTests) + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) conn = xenapi_conn.get_connection(False) instances = conn.list_instances() self.assertEquals(instances, []) def test_spawn(self): - stubout_session(self.stubs, FakeSessionForVMTests) + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + values = {'name': 1, + 'project_id': self.project.id, + 'user_id': self.user.id, + 'image_id': 1, + 'kernel_id': 2, + 'ramdisk_id': 3, + 'instance_type': 'm1.large', + 'mac_address': 'aa:bb:cc:dd:ee:ff', + } conn = xenapi_conn.get_connection(False) - instance = FakeInstance(1, self.project.id, self.user.id, 1, 2, 3, - 'm1.large', 'aa:bb:cc:dd:ee:ff') + instance = db.instance_create(values) result = conn.spawn(instance) def check(_): + """ handler """ instances = conn.list_instances() self.assertEquals(instances, [1]) @@ -241,77 +234,3 @@ class XenAPIVMTestCase(test.TrialTestCase): self.manager.delete_project(self.project) self.manager.delete_user(self.user) self.stubs.UnsetAll() - - -class FakeInstance(): - def __init__(self, name, project_id, user_id, image_id, kernel_id, - ramdisk_id, instance_type, mac_address): - self.name = name - self.project_id = project_id - self.user_id = user_id - self.image_id = image_id - self.kernel_id = kernel_id - self.ramdisk_id = ramdisk_id - self.instance_type = instance_type - self.mac_address = mac_address - - -class FakeSessionForVMTests(fake.SessionBase): - def __init__(self, uri): - super(FakeSessionForVMTests, self).__init__(uri) - - def network_get_all_records_where(self, _1, _2): - return self.xenapi.network.get_all_records() - - def host_call_plugin(self, _1, _2, _3, _4, _5): - return '' - - def VM_start(self, _1, ref, _2, _3): - vm = fake.get_record('VM', ref) - if vm['power_state'] != 'Halted': - raise fake.Failure(['VM_BAD_POWER_STATE', ref, 'Halted', - vm['power_state']]) - vm['power_state'] = 'Running' - - -class FakeSessionForVolumeTests(fake.SessionBase): - def __init__(self, uri): - super(FakeSessionForVolumeTests, self).__init__(uri) - - def VBD_plug(self, _1, _2): - #FIXME(armando):make proper plug - pass - - def PBD_unplug(self, _1, _2): - #FIXME(armando):make proper unplug - pass - - def SR_forget(self, _1, _2): - #FIXME(armando):make proper forget - pass - - def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, - _6, _7, _8, _9, _10, _11): - #FIXME(armando):make proper introduce - valid_vdi = False - refs = fake.get_all('VDI') - for ref in refs: - rec = fake.get_record('VDI', ref) - if rec['uuid'] == uuid: - valid_vdi = True - if not valid_vdi: - raise fake.Failure([['INVALID_VDI', 'session', self._session]]) - - -class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests): - def __init__(self, uri): - super(FakeSessionForVolumeFailedTests, self).__init__(uri) - - def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, - _6, _7, _8, _9, _10, _11): - # test failure - raise fake.Failure([['INVALID_VDI', 'session', self._session]]) - - def VBD_plug(self, _1, _2): - # test failure - raise fake.Failure([['INVALID_VBD', 'session', self._session]]) -- cgit From 8152acf7c3df83a04591fdafb21201965da7bfad Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 16 Dec 2010 17:47:48 +0000 Subject: fake session clean-up --- nova/tests/db/__init__.py | 16 ---------------- nova/tests/xenapi/__init__.py | 16 ---------------- nova/virt/xenapi/fake.py | 34 ++++++++++++++++++++++------------ 3 files changed, 22 insertions(+), 44 deletions(-) diff --git a/nova/tests/db/__init__.py b/nova/tests/db/__init__.py index a157d7592..dcd81b743 100644 --- a/nova/tests/db/__init__.py +++ b/nova/tests/db/__init__.py @@ -1,21 +1,5 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright (c) 2010 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# # Copyright (c) 2010 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/nova/tests/xenapi/__init__.py b/nova/tests/xenapi/__init__.py index a157d7592..dcd81b743 100644 --- a/nova/tests/xenapi/__init__.py +++ b/nova/tests/xenapi/__init__.py @@ -1,21 +1,5 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright (c) 2010 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# # Copyright (c) 2010 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 038064e8e..a46f3fd80 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -105,9 +105,10 @@ def create_vdi(name_label, read_only, sr_ref, sharable): }) -def create_pbd(config, attached): +def create_pbd(config, sr_ref, attached): return _create_object('PBD', { 'device-config': config, + 'SR': sr_ref, 'currently-attached': attached, }) @@ -126,6 +127,21 @@ def _create_object(table, obj): return ref +def _create_sr(table, obj): + sr_type = obj[6] + # Forces fake to support iscsi only + if sr_type != 'iscsi': + raise Failure(['SR_UNKNOWN_DRIVER', sr_type]) + sr_ref = _create_object(table, obj[2]) + vdi_ref = create_vdi('', False, sr_ref, False) + pbd_ref = create_pbd('', sr_ref, True) + _db_content['SR'][sr_ref]['VDIs'] = [vdi_ref] + _db_content['SR'][sr_ref]['PBDs'] = [pbd_ref] + _db_content['VDI'][vdi_ref]['SR'] = sr_ref + _db_content['PBD'][pbd_ref]['SR'] = sr_ref + return sr_ref + + def get_all(table): return _db_content[table].keys() @@ -296,19 +312,13 @@ class SessionBase(object): def _create(self, name, params): self._check_session(params) - expected = 2 - if name == 'SR.create': - expected = 10 + is_sr_create = name == 'SR.create' + # Storage Repositories have a different API + expected = is_sr_create and 10 or 2 self._check_arg_count(params, expected) (cls, _) = name.split('.') - if name == 'SR.create': - vdi_ref = create_vdi('', False, '', False) - pbd_ref = create_pbd('', True) - params[2]['VDIs'] = [vdi_ref] - params[2]['PBDs'] = [pbd_ref] - ref = _create_object(cls, params[2]) - else: - ref = _create_object(cls, params[1]) + ref = is_sr_create and \ + _create_sr(cls, params) or _create_object(cls, params[1]) obj = get_record(cls, ref) # Add RO fields -- cgit From db96fd559d28bcfdf8cc29d79b9afca6dea1cfb7 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 16 Dec 2010 18:44:42 +0000 Subject: reviewed the FIXMEs, and spotted an uncaught exception in volume_utils...yay! --- nova/tests/virt_unittest.py | 2 -- nova/tests/xenapi/stubs.py | 24 +++++++++--------------- nova/tests/xenapi_unittest.py | 5 ++--- nova/virt/xenapi/fake.py | 1 + nova/virt/xenapi/volume_utils.py | 29 +++++++++++++++++------------ 5 files changed, 29 insertions(+), 32 deletions(-) diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py index 52843b703..d49383fb7 100644 --- a/nova/tests/virt_unittest.py +++ b/nova/tests/virt_unittest.py @@ -25,8 +25,6 @@ from nova import utils from nova.api.ec2 import cloud from nova.auth import manager from nova.virt import libvirt_conn -from nova.virt.xenapi import fake -from nova.virt.xenapi import volume_utils FLAGS = flags.FLAGS flags.DECLARE('instances_path', 'nova.compute.manager') diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 525189388..11dd535d4 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -58,21 +58,12 @@ class FakeSessionForVolumeTests(fake.SessionBase): def __init__(self, uri): super(FakeSessionForVolumeTests, self).__init__(uri) - def VBD_plug(self, _1, _2): - #FIXME(armando):make proper plug - pass - - def PBD_unplug(self, _1, _2): - #FIXME(armando):make proper unplug - pass - - def SR_forget(self, _1, _2): - #FIXME(armando):make proper forget - pass + def VBD_plug(self, _1, ref): + rec = fake.get_record('VBD', ref) + rec['currently-attached'] = True def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11): - #FIXME(armando):make proper introduce valid_vdi = False refs = fake.get_all('VDI') for ref in refs: @@ -93,6 +84,9 @@ class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests): # This is for testing failure raise fake.Failure([['INVALID_VDI', 'session', self._session]]) - def VBD_plug(self, _1, _2): - # This is for testing failure - raise fake.Failure([['INVALID_VBD', 'session', self._session]]) + def PBD_unplug(self, _1, ref): + rec = fake.get_record('PBD', ref) + rec['currently-attached'] = False + + def SR_forget(self, _1, ref): + pass diff --git a/nova/tests/xenapi_unittest.py b/nova/tests/xenapi_unittest.py index c2612a4c5..839d6aa44 100644 --- a/nova/tests/xenapi_unittest.py +++ b/nova/tests/xenapi_unittest.py @@ -141,14 +141,13 @@ class XenAPIVolumeTestCase(test.TrialTestCase): result = conn.attach_volume(instance.name, volume['ec2_id'], '/dev/sdc') - def check(exc): + def check_exception(exc): """ handler """ if exc: pass else: self.fail('Oops, no exception has been raised!') - - result.addErrback(check) + result.addErrback(check_exception) return result def tearDown(self): diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index a46f3fd80..7877b5905 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -102,6 +102,7 @@ def create_vdi(name_label, read_only, sr_ref, sharable): 'location': '', 'xenstore_data': '', 'sm_config': {}, + 'VBDs': {}, }) diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 4482e465c..8d1d6fb81 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -152,18 +152,23 @@ class VolumeHelper(HelperBase): logging.warn(exc) raise StorageError('Unable to get record of VDI %s on' % vdis[0]) else: - return session.get_xenapi().VDI.introduce( - vdi_rec['uuid'], - vdi_rec['name_label'], - vdi_rec['name_description'], - vdi_rec['SR'], - vdi_rec['type'], - vdi_rec['sharable'], - vdi_rec['read_only'], - vdi_rec['other_config'], - vdi_rec['location'], - vdi_rec['xenstore_data'], - vdi_rec['sm_config']) + try: + vdi_ref = session.get_xenapi().VDI.introduce( + vdi_rec['uuid'], + vdi_rec['name_label'], + vdi_rec['name_description'], + vdi_rec['SR'], + vdi_rec['type'], + vdi_rec['sharable'], + vdi_rec['read_only'], + vdi_rec['other_config'], + vdi_rec['location'], + vdi_rec['xenstore_data'], + vdi_rec['sm_config']) + except cls.XenAPI.Failure, exc: + logging.warn(exc) + raise StorageError('Unable to introduce VDI for SR %s' + % sr_ref) @classmethod @defer.inlineCallbacks -- cgit From 8c343e1b4b92aa7b1062acebe8eaea402bc6ab4a Mon Sep 17 00:00:00 2001 From: Eric Day Date: Thu, 16 Dec 2010 17:05:54 -0800 Subject: First pass at converting run_tests.py to nosetests. The network and objctstore tests don't yet work. Also, we need to manually remove the sqlite file between runs. --- nova/test.py | 3 + nova/tests/access_unittest.py | 127 -------------- nova/tests/api/__init__.py | 81 --------- nova/tests/api/test.py | 81 +++++++++ nova/tests/api_integration.py | 54 ------ nova/tests/api_unittest.py | 338 ------------------------------------- nova/tests/auth_unittest.py | 352 --------------------------------------- nova/tests/cloud_unittest.py | 332 ------------------------------------ nova/tests/compute_unittest.py | 155 ----------------- nova/tests/flags_unittest.py | 102 ------------ nova/tests/misc_unittest.py | 55 ------ nova/tests/quota_unittest.py | 153 ----------------- nova/tests/rpc_unittest.py | 103 ------------ nova/tests/scheduler_unittest.py | 246 --------------------------- nova/tests/service_unittest.py | 227 ------------------------- nova/tests/test_access.py | 127 ++++++++++++++ nova/tests/test_api.py | 338 +++++++++++++++++++++++++++++++++++++ nova/tests/test_auth.py | 352 +++++++++++++++++++++++++++++++++++++++ nova/tests/test_cloud.py | 332 ++++++++++++++++++++++++++++++++++++ nova/tests/test_compute.py | 155 +++++++++++++++++ nova/tests/test_flags.py | 102 ++++++++++++ nova/tests/test_misc.py | 55 ++++++ nova/tests/test_quota.py | 153 +++++++++++++++++ nova/tests/test_rpc.py | 103 ++++++++++++ nova/tests/test_scheduler.py | 246 +++++++++++++++++++++++++++ nova/tests/test_service.py | 227 +++++++++++++++++++++++++ nova/tests/test_twistd.py | 53 ++++++ nova/tests/test_virt.py | 258 ++++++++++++++++++++++++++++ nova/tests/test_volume.py | 175 +++++++++++++++++++ nova/tests/twistd_unittest.py | 53 ------ nova/tests/virt_unittest.py | 258 ---------------------------- nova/tests/volume_unittest.py | 175 ------------------- run_tests.py | 125 -------------- 33 files changed, 2760 insertions(+), 2936 deletions(-) delete mode 100644 nova/tests/access_unittest.py create mode 100644 nova/tests/api/test.py delete mode 100644 nova/tests/api_integration.py delete mode 100644 nova/tests/api_unittest.py delete mode 100644 nova/tests/auth_unittest.py delete mode 100644 nova/tests/cloud_unittest.py delete mode 100644 nova/tests/compute_unittest.py delete mode 100644 nova/tests/flags_unittest.py delete mode 100644 nova/tests/misc_unittest.py delete mode 100644 nova/tests/quota_unittest.py delete mode 100644 nova/tests/rpc_unittest.py delete mode 100644 nova/tests/scheduler_unittest.py delete mode 100644 nova/tests/service_unittest.py create mode 100644 nova/tests/test_access.py create mode 100644 nova/tests/test_api.py create mode 100644 nova/tests/test_auth.py create mode 100644 nova/tests/test_cloud.py create mode 100644 nova/tests/test_compute.py create mode 100644 nova/tests/test_flags.py create mode 100644 nova/tests/test_misc.py create mode 100644 nova/tests/test_quota.py create mode 100644 nova/tests/test_rpc.py create mode 100644 nova/tests/test_scheduler.py create mode 100644 nova/tests/test_service.py create mode 100644 nova/tests/test_twistd.py create mode 100644 nova/tests/test_virt.py create mode 100644 nova/tests/test_volume.py delete mode 100644 nova/tests/twistd_unittest.py delete mode 100644 nova/tests/virt_unittest.py delete mode 100644 nova/tests/volume_unittest.py delete mode 100644 run_tests.py diff --git a/nova/test.py b/nova/test.py index 7076f1bf4..db5826c04 100644 --- a/nova/test.py +++ b/nova/test.py @@ -38,9 +38,12 @@ from nova import fakerabbit from nova import flags from nova import rpc from nova.network import manager as network_manager +from nova.tests import fake_flags FLAGS = flags.FLAGS +flags.DEFINE_bool('flush_db', True, + 'Flush the database before running fake tests') flags.DEFINE_bool('fake_tests', True, 'should we use everything for testing') diff --git a/nova/tests/access_unittest.py b/nova/tests/access_unittest.py deleted file mode 100644 index 58fdea3b5..000000000 --- a/nova/tests/access_unittest.py +++ /dev/null @@ -1,127 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import unittest -import logging -import webob - -from nova import context -from nova import exception -from nova import flags -from nova import test -from nova.api import ec2 -from nova.auth import manager - - -FLAGS = flags.FLAGS - - -class Context(object): - pass - - -class AccessTestCase(test.TestCase): - def setUp(self): - super(AccessTestCase, self).setUp() - um = manager.AuthManager() - self.context = context.get_admin_context() - # Make test users - self.testadmin = um.create_user('testadmin') - self.testpmsys = um.create_user('testpmsys') - self.testnet = um.create_user('testnet') - self.testsys = um.create_user('testsys') - # Assign some rules - um.add_role('testadmin', 'cloudadmin') - um.add_role('testpmsys', 'sysadmin') - um.add_role('testnet', 'netadmin') - um.add_role('testsys', 'sysadmin') - - # Make a test project - self.project = um.create_project('testproj', - 'testpmsys', - 'a test project', - ['testpmsys', 'testnet', 'testsys']) - self.project.add_role(self.testnet, 'netadmin') - self.project.add_role(self.testsys, 'sysadmin') - #user is set in each test - - def noopWSGIApp(environ, start_response): - start_response('200 OK', []) - return [''] - - self.mw = ec2.Authorizer(noopWSGIApp) - self.mw.action_roles = {'str': { - '_allow_all': ['all'], - '_allow_none': [], - '_allow_project_manager': ['projectmanager'], - '_allow_sys_and_net': ['sysadmin', 'netadmin'], - '_allow_sysadmin': ['sysadmin']}} - - def tearDown(self): - um = manager.AuthManager() - # Delete the test project - um.delete_project('testproj') - # Delete the test user - um.delete_user('testadmin') - um.delete_user('testpmsys') - um.delete_user('testnet') - um.delete_user('testsys') - super(AccessTestCase, self).tearDown() - - def response_status(self, user, methodName): - ctxt = context.RequestContext(user, self.project) - environ = {'ec2.context': ctxt, - 'ec2.controller': 'some string', - 'ec2.action': methodName} - req = webob.Request.blank('/', environ) - resp = req.get_response(self.mw) - return resp.status_int - - def shouldAllow(self, user, methodName): - self.assertEqual(200, self.response_status(user, methodName)) - - def shouldDeny(self, user, methodName): - self.assertEqual(401, self.response_status(user, methodName)) - - def test_001_allow_all(self): - users = [self.testadmin, self.testpmsys, self.testnet, self.testsys] - for user in users: - self.shouldAllow(user, '_allow_all') - - def test_002_allow_none(self): - self.shouldAllow(self.testadmin, '_allow_none') - users = [self.testpmsys, self.testnet, self.testsys] - for user in users: - self.shouldDeny(user, '_allow_none') - - def test_003_allow_project_manager(self): - for user in [self.testadmin, self.testpmsys]: - self.shouldAllow(user, '_allow_project_manager') - for user in [self.testnet, self.testsys]: - self.shouldDeny(user, '_allow_project_manager') - - def test_004_allow_sys_and_net(self): - for user in [self.testadmin, self.testnet, self.testsys]: - self.shouldAllow(user, '_allow_sys_and_net') - # denied because it doesn't have the per project sysadmin - for user in [self.testpmsys]: - self.shouldDeny(user, '_allow_sys_and_net') - -if __name__ == "__main__": - # TODO: Implement use_fake as an option - unittest.main() diff --git a/nova/tests/api/__init__.py b/nova/tests/api/__init__.py index 9caa8c9d0..e69de29bb 100644 --- a/nova/tests/api/__init__.py +++ b/nova/tests/api/__init__.py @@ -1,81 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Test for the root WSGI middleware for all API controllers. -""" - -import unittest - -import stubout -import webob -import webob.dec - -import nova.exception -from nova import api -from nova.tests.api.fakes import APIStub - - -class Test(unittest.TestCase): - - def setUp(self): - self.stubs = stubout.StubOutForTesting() - - def tearDown(self): - self.stubs.UnsetAll() - - def _request(self, url, subdomain, **kwargs): - environ_keys = {'HTTP_HOST': '%s.example.com' % subdomain} - environ_keys.update(kwargs) - req = webob.Request.blank(url, environ_keys) - return req.get_response(api.API('ec2')) - - def test_openstack(self): - self.stubs.Set(api.openstack, 'API', APIStub) - result = self._request('/v1.0/cloud', 'api') - self.assertEqual(result.body, "/cloud") - - def test_ec2(self): - self.stubs.Set(api.ec2, 'API', APIStub) - result = self._request('/services/cloud', 'ec2') - self.assertEqual(result.body, "/cloud") - - def test_not_found(self): - self.stubs.Set(api.ec2, 'API', APIStub) - self.stubs.Set(api.openstack, 'API', APIStub) - result = self._request('/test/cloud', 'ec2') - self.assertNotEqual(result.body, "/cloud") - - def test_query_api_versions(self): - result = self._request('/', 'api') - self.assertTrue('CURRENT' in result.body) - - def test_metadata(self): - def go(url): - result = self._request(url, 'ec2', REMOTE_ADDR='128.192.151.2') - # Each should get to the ORM layer and fail to find the IP - self.assertRaises(nova.exception.NotFound, go, '/latest/') - self.assertRaises(nova.exception.NotFound, go, '/2009-04-04/') - self.assertRaises(nova.exception.NotFound, go, '/1.0/') - - def test_ec2_root(self): - result = self._request('/', 'ec2') - self.assertTrue('2007-12-15\n' in result.body) - - -if __name__ == '__main__': - unittest.main() diff --git a/nova/tests/api/test.py b/nova/tests/api/test.py new file mode 100644 index 000000000..9caa8c9d0 --- /dev/null +++ b/nova/tests/api/test.py @@ -0,0 +1,81 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Test for the root WSGI middleware for all API controllers. +""" + +import unittest + +import stubout +import webob +import webob.dec + +import nova.exception +from nova import api +from nova.tests.api.fakes import APIStub + + +class Test(unittest.TestCase): + + def setUp(self): + self.stubs = stubout.StubOutForTesting() + + def tearDown(self): + self.stubs.UnsetAll() + + def _request(self, url, subdomain, **kwargs): + environ_keys = {'HTTP_HOST': '%s.example.com' % subdomain} + environ_keys.update(kwargs) + req = webob.Request.blank(url, environ_keys) + return req.get_response(api.API('ec2')) + + def test_openstack(self): + self.stubs.Set(api.openstack, 'API', APIStub) + result = self._request('/v1.0/cloud', 'api') + self.assertEqual(result.body, "/cloud") + + def test_ec2(self): + self.stubs.Set(api.ec2, 'API', APIStub) + result = self._request('/services/cloud', 'ec2') + self.assertEqual(result.body, "/cloud") + + def test_not_found(self): + self.stubs.Set(api.ec2, 'API', APIStub) + self.stubs.Set(api.openstack, 'API', APIStub) + result = self._request('/test/cloud', 'ec2') + self.assertNotEqual(result.body, "/cloud") + + def test_query_api_versions(self): + result = self._request('/', 'api') + self.assertTrue('CURRENT' in result.body) + + def test_metadata(self): + def go(url): + result = self._request(url, 'ec2', REMOTE_ADDR='128.192.151.2') + # Each should get to the ORM layer and fail to find the IP + self.assertRaises(nova.exception.NotFound, go, '/latest/') + self.assertRaises(nova.exception.NotFound, go, '/2009-04-04/') + self.assertRaises(nova.exception.NotFound, go, '/1.0/') + + def test_ec2_root(self): + result = self._request('/', 'ec2') + self.assertTrue('2007-12-15\n' in result.body) + + +if __name__ == '__main__': + unittest.main() diff --git a/nova/tests/api_integration.py b/nova/tests/api_integration.py deleted file mode 100644 index 54403c655..000000000 --- a/nova/tests/api_integration.py +++ /dev/null @@ -1,54 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import boto -from boto.ec2.regioninfo import RegionInfo -import unittest - - -ACCESS_KEY = 'fake' -SECRET_KEY = 'fake' -CLC_IP = '127.0.0.1' -CLC_PORT = 8773 -REGION = 'test' - - -def get_connection(): - return boto.connect_ec2( - aws_access_key_id=ACCESS_KEY, - aws_secret_access_key=SECRET_KEY, - is_secure=False, - region=RegionInfo(None, REGION, CLC_IP), - port=CLC_PORT, - path='/services/Cloud', - debug=99) - - -class APIIntegrationTests(unittest.TestCase): - def test_001_get_all_images(self): - conn = get_connection() - res = conn.get_all_images() - - -if __name__ == '__main__': - unittest.main() - -#print conn.get_all_key_pairs() -#print conn.create_key_pair -#print conn.create_security_group('name', 'description') diff --git a/nova/tests/api_unittest.py b/nova/tests/api_unittest.py deleted file mode 100644 index 33d4cb294..000000000 --- a/nova/tests/api_unittest.py +++ /dev/null @@ -1,338 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Unit tests for the API endpoint""" - -import boto -from boto.ec2 import regioninfo -import httplib -import random -import StringIO -import webob - -from nova import context -from nova import flags -from nova import test -from nova import api -from nova.api.ec2 import cloud -from nova.api.ec2 import apirequest -from nova.auth import manager - - -class FakeHttplibSocket(object): - """a fake socket implementation for httplib.HTTPResponse, trivial""" - def __init__(self, response_string): - self._buffer = StringIO.StringIO(response_string) - - def makefile(self, _mode, _other): - """Returns the socket's internal buffer""" - return self._buffer - - -class FakeHttplibConnection(object): - """A fake httplib.HTTPConnection for boto to use - - requests made via this connection actually get translated and routed into - our WSGI app, we then wait for the response and turn it back into - the httplib.HTTPResponse that boto expects. - """ - def __init__(self, app, host, is_secure=False): - self.app = app - self.host = host - - def request(self, method, path, data, headers): - req = webob.Request.blank(path) - req.method = method - req.body = data - req.headers = headers - req.headers['Accept'] = 'text/html' - req.host = self.host - # Call the WSGI app, get the HTTP response - resp = str(req.get_response(self.app)) - # For some reason, the response doesn't have "HTTP/1.0 " prepended; I - # guess that's a function the web server usually provides. - resp = "HTTP/1.0 %s" % resp - sock = FakeHttplibSocket(resp) - self.http_response = httplib.HTTPResponse(sock) - self.http_response.begin() - - def getresponse(self): - return self.http_response - - def close(self): - """Required for compatibility with boto/tornado""" - pass - - -class XmlConversionTestCase(test.TrialTestCase): - """Unit test api xml conversion""" - def test_number_conversion(self): - conv = apirequest._try_convert - self.assertEqual(conv('None'), None) - self.assertEqual(conv('True'), True) - self.assertEqual(conv('False'), False) - self.assertEqual(conv('0'), 0) - self.assertEqual(conv('42'), 42) - self.assertEqual(conv('3.14'), 3.14) - self.assertEqual(conv('-57.12'), -57.12) - self.assertEqual(conv('0x57'), 0x57) - self.assertEqual(conv('-0x57'), -0x57) - self.assertEqual(conv('-'), '-') - self.assertEqual(conv('-0'), 0) - - -class ApiEc2TestCase(test.TrialTestCase): - """Unit test for the cloud controller on an EC2 API""" - def setUp(self): - super(ApiEc2TestCase, self).setUp() - - self.manager = manager.AuthManager() - - self.host = '127.0.0.1' - - self.app = api.API('ec2') - - def expect_http(self, host=None, is_secure=False): - """Returns a new EC2 connection""" - self.ec2 = boto.connect_ec2( - aws_access_key_id='fake', - aws_secret_access_key='fake', - is_secure=False, - region=regioninfo.RegionInfo(None, 'test', self.host), - port=8773, - path='/services/Cloud') - - self.mox.StubOutWithMock(self.ec2, 'new_http_connection') - http = FakeHttplibConnection( - self.app, '%s:8773' % (self.host), False) - # pylint: disable-msg=E1103 - self.ec2.new_http_connection(host, is_secure).AndReturn(http) - return http - - def test_describe_instances(self): - """Test that, after creating a user and a project, the describe - instances call to the API works properly""" - self.expect_http() - self.mox.ReplayAll() - user = self.manager.create_user('fake', 'fake', 'fake') - project = self.manager.create_project('fake', 'fake', 'fake') - self.assertEqual(self.ec2.get_all_instances(), []) - self.manager.delete_project(project) - self.manager.delete_user(user) - - def test_get_all_key_pairs(self): - """Test that, after creating a user and project and generating - a key pair, that the API call to list key pairs works properly""" - self.expect_http() - self.mox.ReplayAll() - keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \ - for x in range(random.randint(4, 8))) - user = self.manager.create_user('fake', 'fake', 'fake') - project = self.manager.create_project('fake', 'fake', 'fake') - # NOTE(vish): create depends on pool, so call helper directly - cloud._gen_key(context.get_admin_context(), user.id, keyname) - - rv = self.ec2.get_all_key_pairs() - results = [k for k in rv if k.name == keyname] - self.assertEquals(len(results), 1) - self.manager.delete_project(project) - self.manager.delete_user(user) - - def test_get_all_security_groups(self): - """Test that we can retrieve security groups""" - self.expect_http() - self.mox.ReplayAll() - user = self.manager.create_user('fake', 'fake', 'fake', admin=True) - project = self.manager.create_project('fake', 'fake', 'fake') - - rv = self.ec2.get_all_security_groups() - - self.assertEquals(len(rv), 1) - self.assertEquals(rv[0].name, 'default') - - self.manager.delete_project(project) - self.manager.delete_user(user) - - def test_create_delete_security_group(self): - """Test that we can create a security group""" - self.expect_http() - self.mox.ReplayAll() - user = self.manager.create_user('fake', 'fake', 'fake', admin=True) - project = self.manager.create_project('fake', 'fake', 'fake') - - # At the moment, you need both of these to actually be netadmin - self.manager.add_role('fake', 'netadmin') - project.add_role('fake', 'netadmin') - - security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") - for x in range(random.randint(4, 8))) - - self.ec2.create_security_group(security_group_name, 'test group') - - self.expect_http() - self.mox.ReplayAll() - - rv = self.ec2.get_all_security_groups() - self.assertEquals(len(rv), 2) - self.assertTrue(security_group_name in [group.name for group in rv]) - - self.expect_http() - self.mox.ReplayAll() - - self.ec2.delete_security_group(security_group_name) - - self.manager.delete_project(project) - self.manager.delete_user(user) - - def test_authorize_revoke_security_group_cidr(self): - """ - Test that we can add and remove CIDR based rules - to a security group - """ - self.expect_http() - self.mox.ReplayAll() - user = self.manager.create_user('fake', 'fake', 'fake') - project = self.manager.create_project('fake', 'fake', 'fake') - - # At the moment, you need both of these to actually be netadmin - self.manager.add_role('fake', 'netadmin') - project.add_role('fake', 'netadmin') - - security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") - for x in range(random.randint(4, 8))) - - group = self.ec2.create_security_group(security_group_name, - 'test group') - - self.expect_http() - self.mox.ReplayAll() - group.connection = self.ec2 - - group.authorize('tcp', 80, 81, '0.0.0.0/0') - - self.expect_http() - self.mox.ReplayAll() - - rv = self.ec2.get_all_security_groups() - # I don't bother checkng that we actually find it here, - # because the create/delete unit test further up should - # be good enough for that. - for group in rv: - if group.name == security_group_name: - self.assertEquals(len(group.rules), 1) - self.assertEquals(int(group.rules[0].from_port), 80) - self.assertEquals(int(group.rules[0].to_port), 81) - self.assertEquals(len(group.rules[0].grants), 1) - self.assertEquals(str(group.rules[0].grants[0]), '0.0.0.0/0') - - self.expect_http() - self.mox.ReplayAll() - group.connection = self.ec2 - - group.revoke('tcp', 80, 81, '0.0.0.0/0') - - self.expect_http() - self.mox.ReplayAll() - - self.ec2.delete_security_group(security_group_name) - - self.expect_http() - self.mox.ReplayAll() - group.connection = self.ec2 - - rv = self.ec2.get_all_security_groups() - - self.assertEqual(len(rv), 1) - self.assertEqual(rv[0].name, 'default') - - self.manager.delete_project(project) - self.manager.delete_user(user) - - return - - def test_authorize_revoke_security_group_foreign_group(self): - """ - Test that we can grant and revoke another security group access - to a security group - """ - self.expect_http() - self.mox.ReplayAll() - user = self.manager.create_user('fake', 'fake', 'fake', admin=True) - project = self.manager.create_project('fake', 'fake', 'fake') - - # At the moment, you need both of these to actually be netadmin - self.manager.add_role('fake', 'netadmin') - project.add_role('fake', 'netadmin') - - rand_string = 'sdiuisudfsdcnpaqwertasd' - security_group_name = "".join(random.choice(rand_string) - for x in range(random.randint(4, 8))) - other_security_group_name = "".join(random.choice(rand_string) - for x in range(random.randint(4, 8))) - - group = self.ec2.create_security_group(security_group_name, - 'test group') - - self.expect_http() - self.mox.ReplayAll() - - other_group = self.ec2.create_security_group(other_security_group_name, - 'some other group') - - self.expect_http() - self.mox.ReplayAll() - group.connection = self.ec2 - - group.authorize(src_group=other_group) - - self.expect_http() - self.mox.ReplayAll() - - rv = self.ec2.get_all_security_groups() - - # I don't bother checkng that we actually find it here, - # because the create/delete unit test further up should - # be good enough for that. - for group in rv: - if group.name == security_group_name: - self.assertEquals(len(group.rules), 1) - self.assertEquals(len(group.rules[0].grants), 1) - self.assertEquals(str(group.rules[0].grants[0]), '%s-%s' % - (other_security_group_name, 'fake')) - - self.expect_http() - self.mox.ReplayAll() - - rv = self.ec2.get_all_security_groups() - - for group in rv: - if group.name == security_group_name: - self.expect_http() - self.mox.ReplayAll() - group.connection = self.ec2 - group.revoke(src_group=other_group) - - self.expect_http() - self.mox.ReplayAll() - - self.ec2.delete_security_group(security_group_name) - - self.manager.delete_project(project) - self.manager.delete_user(user) - - return diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py deleted file mode 100644 index 4508d6721..000000000 --- a/nova/tests/auth_unittest.py +++ /dev/null @@ -1,352 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -from M2Crypto import X509 -import unittest - -from nova import crypto -from nova import flags -from nova import test -from nova.auth import manager -from nova.api.ec2 import cloud - -FLAGS = flags.FLAGS - - -class user_generator(object): - def __init__(self, manager, **user_state): - if 'name' not in user_state: - user_state['name'] = 'test1' - self.manager = manager - self.user = manager.create_user(**user_state) - - def __enter__(self): - return self.user - - def __exit__(self, value, type, trace): - self.manager.delete_user(self.user) - - -class project_generator(object): - def __init__(self, manager, **project_state): - if 'name' not in project_state: - project_state['name'] = 'testproj' - if 'manager_user' not in project_state: - project_state['manager_user'] = 'test1' - self.manager = manager - self.project = manager.create_project(**project_state) - - def __enter__(self): - return self.project - - def __exit__(self, value, type, trace): - self.manager.delete_project(self.project) - - -class user_and_project_generator(object): - def __init__(self, manager, user_state={}, project_state={}): - self.manager = manager - if 'name' not in user_state: - user_state['name'] = 'test1' - if 'name' not in project_state: - project_state['name'] = 'testproj' - if 'manager_user' not in project_state: - project_state['manager_user'] = 'test1' - self.user = manager.create_user(**user_state) - self.project = manager.create_project(**project_state) - - def __enter__(self): - return (self.user, self.project) - - def __exit__(self, value, type, trace): - self.manager.delete_user(self.user) - self.manager.delete_project(self.project) - - -class AuthManagerTestCase(object): - def setUp(self): - FLAGS.auth_driver = self.auth_driver - super(AuthManagerTestCase, self).setUp() - self.flags(connection_type='fake') - self.manager = manager.AuthManager(new=True) - - def test_create_and_find_user(self): - with user_generator(self.manager): - self.assert_(self.manager.get_user('test1')) - - def test_create_and_find_with_properties(self): - with user_generator(self.manager, name="herbert", secret="classified", - access="private-party"): - u = self.manager.get_user('herbert') - self.assertEqual('herbert', u.id) - self.assertEqual('herbert', u.name) - self.assertEqual('classified', u.secret) - self.assertEqual('private-party', u.access) - - def test_004_signature_is_valid(self): - #self.assertTrue(self.manager.authenticate(**boto.generate_url ...? )) - pass - #raise NotImplementedError - - def test_005_can_get_credentials(self): - return - credentials = self.manager.get_user('test1').get_credentials() - self.assertEqual(credentials, - 'export EC2_ACCESS_KEY="access"\n' + - 'export EC2_SECRET_KEY="secret"\n' + - 'export EC2_URL="http://127.0.0.1:8773/services/Cloud"\n' + - 'export S3_URL="http://127.0.0.1:3333/"\n' + - 'export EC2_USER_ID="test1"\n') - - def test_can_list_users(self): - with user_generator(self.manager): - with user_generator(self.manager, name="test2"): - users = self.manager.get_users() - self.assert_(filter(lambda u: u.id == 'test1', users)) - self.assert_(filter(lambda u: u.id == 'test2', users)) - self.assert_(not filter(lambda u: u.id == 'test3', users)) - - def test_can_add_and_remove_user_role(self): - with user_generator(self.manager): - self.assertFalse(self.manager.has_role('test1', 'itsec')) - self.manager.add_role('test1', 'itsec') - self.assertTrue(self.manager.has_role('test1', 'itsec')) - self.manager.remove_role('test1', 'itsec') - self.assertFalse(self.manager.has_role('test1', 'itsec')) - - def test_can_create_and_get_project(self): - with user_and_project_generator(self.manager) as (u, p): - self.assert_(self.manager.get_user('test1')) - self.assert_(self.manager.get_user('test1')) - self.assert_(self.manager.get_project('testproj')) - - def test_can_list_projects(self): - with user_and_project_generator(self.manager): - with project_generator(self.manager, name="testproj2"): - projects = self.manager.get_projects() - self.assert_(filter(lambda p: p.name == 'testproj', projects)) - self.assert_(filter(lambda p: p.name == 'testproj2', projects)) - self.assert_(not filter(lambda p: p.name == 'testproj3', - projects)) - - def test_can_create_and_get_project_with_attributes(self): - with user_generator(self.manager): - with project_generator(self.manager, description='A test project'): - project = self.manager.get_project('testproj') - self.assertEqual('A test project', project.description) - - def test_can_create_project_with_manager(self): - with user_and_project_generator(self.manager) as (user, project): - self.assertEqual('test1', project.project_manager_id) - self.assertTrue(self.manager.is_project_manager(user, project)) - - def test_create_project_assigns_manager_to_members(self): - with user_and_project_generator(self.manager) as (user, project): - self.assertTrue(self.manager.is_project_member(user, project)) - - def test_no_extra_project_members(self): - with user_generator(self.manager, name='test2') as baduser: - with user_and_project_generator(self.manager) as (user, project): - self.assertFalse(self.manager.is_project_member(baduser, - project)) - - def test_no_extra_project_managers(self): - with user_generator(self.manager, name='test2') as baduser: - with user_and_project_generator(self.manager) as (user, project): - self.assertFalse(self.manager.is_project_manager(baduser, - project)) - - def test_can_add_user_to_project(self): - with user_generator(self.manager, name='test2') as user: - with user_and_project_generator(self.manager) as (_user, project): - self.manager.add_to_project(user, project) - project = self.manager.get_project('testproj') - self.assertTrue(self.manager.is_project_member(user, project)) - - def test_can_remove_user_from_project(self): - with user_generator(self.manager, name='test2') as user: - with user_and_project_generator(self.manager) as (_user, project): - self.manager.add_to_project(user, project) - project = self.manager.get_project('testproj') - self.assertTrue(self.manager.is_project_member(user, project)) - self.manager.remove_from_project(user, project) - project = self.manager.get_project('testproj') - self.assertFalse(self.manager.is_project_member(user, project)) - - def test_can_add_remove_user_with_role(self): - with user_generator(self.manager, name='test2') as user: - with user_and_project_generator(self.manager) as (_user, project): - # NOTE(todd): after modifying users you must reload project - self.manager.add_to_project(user, project) - project = self.manager.get_project('testproj') - self.manager.add_role(user, 'developer', project) - self.assertTrue(self.manager.is_project_member(user, project)) - self.manager.remove_from_project(user, project) - project = self.manager.get_project('testproj') - self.assertFalse(self.manager.has_role(user, 'developer', - project)) - self.assertFalse(self.manager.is_project_member(user, project)) - - def test_can_generate_x509(self): - # NOTE(todd): this doesn't assert against the auth manager - # so it probably belongs in crypto_unittest - # but I'm leaving it where I found it. - with user_and_project_generator(self.manager) as (user, project): - # NOTE(todd): Should mention why we must setup controller first - # (somebody please clue me in) - cloud_controller = cloud.CloudController() - cloud_controller.setup() - _key, cert_str = self.manager._generate_x509_cert('test1', - 'testproj') - logging.debug(cert_str) - - # Need to verify that it's signed by the right intermediate CA - full_chain = crypto.fetch_ca(project_id='testproj', chain=True) - int_cert = crypto.fetch_ca(project_id='testproj', chain=False) - cloud_cert = crypto.fetch_ca() - logging.debug("CA chain:\n\n =====\n%s\n\n=====" % full_chain) - signed_cert = X509.load_cert_string(cert_str) - chain_cert = X509.load_cert_string(full_chain) - int_cert = X509.load_cert_string(int_cert) - cloud_cert = X509.load_cert_string(cloud_cert) - self.assertTrue(signed_cert.verify(chain_cert.get_pubkey())) - self.assertTrue(signed_cert.verify(int_cert.get_pubkey())) - if not FLAGS.use_intermediate_ca: - self.assertTrue(signed_cert.verify(cloud_cert.get_pubkey())) - else: - self.assertFalse(signed_cert.verify(cloud_cert.get_pubkey())) - - def test_adding_role_to_project_is_ignored_unless_added_to_user(self): - with user_and_project_generator(self.manager) as (user, project): - self.assertFalse(self.manager.has_role(user, 'sysadmin', project)) - self.manager.add_role(user, 'sysadmin', project) - # NOTE(todd): it will still show up in get_user_roles(u, project) - self.assertFalse(self.manager.has_role(user, 'sysadmin', project)) - self.manager.add_role(user, 'sysadmin') - self.assertTrue(self.manager.has_role(user, 'sysadmin', project)) - - def test_add_user_role_doesnt_infect_project_roles(self): - with user_and_project_generator(self.manager) as (user, project): - self.assertFalse(self.manager.has_role(user, 'sysadmin', project)) - self.manager.add_role(user, 'sysadmin') - self.assertFalse(self.manager.has_role(user, 'sysadmin', project)) - - def test_can_list_user_roles(self): - with user_and_project_generator(self.manager) as (user, project): - self.manager.add_role(user, 'sysadmin') - roles = self.manager.get_user_roles(user) - self.assertTrue('sysadmin' in roles) - self.assertFalse('netadmin' in roles) - - def test_can_list_project_roles(self): - with user_and_project_generator(self.manager) as (user, project): - self.manager.add_role(user, 'sysadmin') - self.manager.add_role(user, 'sysadmin', project) - self.manager.add_role(user, 'netadmin', project) - project_roles = self.manager.get_user_roles(user, project) - self.assertTrue('sysadmin' in project_roles) - self.assertTrue('netadmin' in project_roles) - # has role should be false user-level role is missing - self.assertFalse(self.manager.has_role(user, 'netadmin', project)) - - def test_can_remove_user_roles(self): - with user_and_project_generator(self.manager) as (user, project): - self.manager.add_role(user, 'sysadmin') - self.assertTrue(self.manager.has_role(user, 'sysadmin')) - self.manager.remove_role(user, 'sysadmin') - self.assertFalse(self.manager.has_role(user, 'sysadmin')) - - def test_removing_user_role_hides_it_from_project(self): - with user_and_project_generator(self.manager) as (user, project): - self.manager.add_role(user, 'sysadmin') - self.manager.add_role(user, 'sysadmin', project) - self.assertTrue(self.manager.has_role(user, 'sysadmin', project)) - self.manager.remove_role(user, 'sysadmin') - self.assertFalse(self.manager.has_role(user, 'sysadmin', project)) - - def test_can_remove_project_role_but_keep_user_role(self): - with user_and_project_generator(self.manager) as (user, project): - self.manager.add_role(user, 'sysadmin') - self.manager.add_role(user, 'sysadmin', project) - self.assertTrue(self.manager.has_role(user, 'sysadmin')) - self.manager.remove_role(user, 'sysadmin', project) - self.assertFalse(self.manager.has_role(user, 'sysadmin', project)) - self.assertTrue(self.manager.has_role(user, 'sysadmin')) - - def test_can_retrieve_project_by_user(self): - with user_and_project_generator(self.manager) as (user, project): - self.assertEqual(1, len(self.manager.get_projects('test1'))) - - def test_can_modify_project(self): - with user_and_project_generator(self.manager): - with user_generator(self.manager, name='test2'): - self.manager.modify_project('testproj', 'test2', 'new desc') - project = self.manager.get_project('testproj') - self.assertEqual('test2', project.project_manager_id) - self.assertEqual('new desc', project.description) - - def test_can_delete_project(self): - with user_generator(self.manager): - self.manager.create_project('testproj', 'test1') - self.assert_(self.manager.get_project('testproj')) - self.manager.delete_project('testproj') - projectlist = self.manager.get_projects() - self.assert_(not filter(lambda p: p.name == 'testproj', - projectlist)) - - def test_can_delete_user(self): - self.manager.create_user('test1') - self.assert_(self.manager.get_user('test1')) - self.manager.delete_user('test1') - userlist = self.manager.get_users() - self.assert_(not filter(lambda u: u.id == 'test1', userlist)) - - def test_can_modify_users(self): - with user_generator(self.manager): - self.manager.modify_user('test1', 'access', 'secret', True) - user = self.manager.get_user('test1') - self.assertEqual('access', user.access) - self.assertEqual('secret', user.secret) - self.assertTrue(user.is_admin()) - - -class AuthManagerLdapTestCase(AuthManagerTestCase, test.TestCase): - auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver' - - def __init__(self, *args, **kwargs): - AuthManagerTestCase.__init__(self) - test.TestCase.__init__(self, *args, **kwargs) - import nova.auth.fakeldap as fakeldap - FLAGS.redis_db = 8 - if FLAGS.flush_db: - logging.info("Flushing redis datastore") - try: - r = fakeldap.Redis.instance() - r.flushdb() - except: - self.skip = True - - -class AuthManagerDbTestCase(AuthManagerTestCase, test.TestCase): - auth_driver = 'nova.auth.dbdriver.DbDriver' - - -if __name__ == "__main__": - # TODO: Implement use_fake as an option - unittest.main() diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py deleted file mode 100644 index 53a762310..000000000 --- a/nova/tests/cloud_unittest.py +++ /dev/null @@ -1,332 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from base64 import b64decode -import json -import logging -from M2Crypto import BIO -from M2Crypto import RSA -import os -import StringIO -import tempfile -import time - -from eventlet import greenthread -from xml.etree import ElementTree - -from nova import context -from nova import crypto -from nova import db -from nova import flags -from nova import rpc -from nova import test -from nova import utils -from nova.auth import manager -from nova.compute import power_state -from nova.api.ec2 import cloud -from nova.objectstore import image - - -FLAGS = flags.FLAGS - -# Temp dirs for working with image attributes through the cloud controller -# (stole this from objectstore_unittest.py) -OSS_TEMPDIR = tempfile.mkdtemp(prefix='test_oss-') -IMAGES_PATH = os.path.join(OSS_TEMPDIR, 'images') -os.makedirs(IMAGES_PATH) - - -class CloudTestCase(test.TestCase): - def setUp(self): - super(CloudTestCase, self).setUp() - self.flags(connection_type='fake', images_path=IMAGES_PATH) - - self.conn = rpc.Connection.instance() - logging.getLogger().setLevel(logging.DEBUG) - - # set up our cloud - self.cloud = cloud.CloudController() - - # set up a service - self.compute = utils.import_object(FLAGS.compute_manager) - self.compute_consumer = rpc.AdapterConsumer(connection=self.conn, - topic=FLAGS.compute_topic, - proxy=self.compute) - self.compute_consumer.attach_to_eventlet() - self.network = utils.import_object(FLAGS.network_manager) - self.network_consumer = rpc.AdapterConsumer(connection=self.conn, - topic=FLAGS.network_topic, - proxy=self.network) - self.network_consumer.attach_to_eventlet() - - self.manager = manager.AuthManager() - self.user = self.manager.create_user('admin', 'admin', 'admin', True) - self.project = self.manager.create_project('proj', 'admin', 'proj') - self.context = context.RequestContext(user=self.user, - project=self.project) - - def tearDown(self): - self.manager.delete_project(self.project) - self.manager.delete_user(self.user) - super(CloudTestCase, self).tearDown() - - def _create_key(self, name): - # NOTE(vish): create depends on pool, so just call helper directly - return cloud._gen_key(self.context, self.context.user.id, name) - - def test_describe_addresses(self): - """Makes sure describe addresses runs without raising an exception""" - address = "10.10.10.10" - db.floating_ip_create(self.context, - {'address': address, - 'host': FLAGS.host}) - self.cloud.allocate_address(self.context) - self.cloud.describe_addresses(self.context) - self.cloud.release_address(self.context, - public_ip=address) - greenthread.sleep(0.3) - db.floating_ip_destroy(self.context, address) - - def test_associate_disassociate_address(self): - """Verifies associate runs cleanly without raising an exception""" - address = "10.10.10.10" - db.floating_ip_create(self.context, - {'address': address, - 'host': FLAGS.host}) - self.cloud.allocate_address(self.context) - inst = db.instance_create(self.context, {}) - fixed = self.network.allocate_fixed_ip(self.context, inst['id']) - ec2_id = cloud.internal_id_to_ec2_id(inst['internal_id']) - self.cloud.associate_address(self.context, - instance_id=ec2_id, - public_ip=address) - self.cloud.disassociate_address(self.context, - public_ip=address) - self.cloud.release_address(self.context, - public_ip=address) - greenthread.sleep(0.3) - self.network.deallocate_fixed_ip(self.context, fixed) - db.instance_destroy(self.context, inst['id']) - db.floating_ip_destroy(self.context, address) - - def test_describe_volumes(self): - """Makes sure describe_volumes works and filters results.""" - vol1 = db.volume_create(self.context, {}) - vol2 = db.volume_create(self.context, {}) - result = self.cloud.describe_volumes(self.context) - self.assertEqual(len(result['volumeSet']), 2) - result = self.cloud.describe_volumes(self.context, - volume_id=[vol2['ec2_id']]) - self.assertEqual(len(result['volumeSet']), 1) - self.assertEqual(result['volumeSet'][0]['volumeId'], vol2['ec2_id']) - db.volume_destroy(self.context, vol1['id']) - db.volume_destroy(self.context, vol2['id']) - - def test_console_output(self): - image_id = FLAGS.default_image - instance_type = FLAGS.default_instance_type - max_count = 1 - kwargs = {'image_id': image_id, - 'instance_type': instance_type, - 'max_count': max_count} - rv = yield self.cloud.run_instances(self.context, **kwargs) - instance_id = rv['instancesSet'][0]['instanceId'] - output = yield self.cloud.get_console_output(context=self.context, - instance_id=[instance_id]) - self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE OUTPUT') - # TODO(soren): We need this until we can stop polling in the rpc code - # for unit tests. - greenthread.sleep(0.3) - rv = yield self.cloud.terminate_instances(self.context, [instance_id]) - - def test_key_generation(self): - result = self._create_key('test') - private_key = result['private_key'] - key = RSA.load_key_string(private_key, callback=lambda: None) - bio = BIO.MemoryBuffer() - public_key = db.key_pair_get(self.context, - self.context.user.id, - 'test')['public_key'] - key.save_pub_key_bio(bio) - converted = crypto.ssl_pub_to_ssh_pub(bio.read()) - # assert key fields are equal - self.assertEqual(public_key.split(" ")[1].strip(), - converted.split(" ")[1].strip()) - - def test_describe_key_pairs(self): - self._create_key('test1') - self._create_key('test2') - result = self.cloud.describe_key_pairs(self.context) - keys = result["keypairsSet"] - self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys)) - self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys)) - - def test_delete_key_pair(self): - self._create_key('test') - self.cloud.delete_key_pair(self.context, 'test') - - def test_run_instances(self): - if FLAGS.connection_type == 'fake': - logging.debug("Can't test instances without a real virtual env.") - return - image_id = FLAGS.default_image - instance_type = FLAGS.default_instance_type - max_count = 1 - kwargs = {'image_id': image_id, - 'instance_type': instance_type, - 'max_count': max_count} - rv = yield self.cloud.run_instances(self.context, **kwargs) - # TODO: check for proper response - instance_id = rv['reservationSet'][0].keys()[0] - instance = rv['reservationSet'][0][instance_id][0] - logging.debug("Need to watch instance %s until it's running..." % - instance['instance_id']) - while True: - greenthread.sleep(1) - info = self.cloud._get_instance(instance['instance_id']) - logging.debug(info['state']) - if info['state'] == power_state.RUNNING: - break - self.assert_(rv) - - if connection_type != 'fake': - time.sleep(45) # Should use boto for polling here - for reservations in rv['reservationSet']: - # for res_id in reservations.keys(): - # logging.debug(reservations[res_id]) - # for instance in reservations[res_id]: - for instance in reservations[reservations.keys()[0]]: - instance_id = instance['instance_id'] - logging.debug("Terminating instance %s" % instance_id) - rv = yield self.compute.terminate_instance(instance_id) - - def test_instance_update_state(self): - def instance(num): - return { - 'reservation_id': 'r-1', - 'instance_id': 'i-%s' % num, - 'image_id': 'ami-%s' % num, - 'private_dns_name': '10.0.0.%s' % num, - 'dns_name': '10.0.0%s' % num, - 'ami_launch_index': str(num), - 'instance_type': 'fake', - 'availability_zone': 'fake', - 'key_name': None, - 'kernel_id': 'fake', - 'ramdisk_id': 'fake', - 'groups': ['default'], - 'product_codes': None, - 'state': 0x01, - 'user_data': ''} - rv = self.cloud._format_describe_instances(self.context) - self.assert_(len(rv['reservationSet']) == 0) - - # simulate launch of 5 instances - # self.cloud.instances['pending'] = {} - #for i in xrange(5): - # inst = instance(i) - # self.cloud.instances['pending'][inst['instance_id']] = inst - - #rv = self.cloud._format_instances(self.admin) - #self.assert_(len(rv['reservationSet']) == 1) - #self.assert_(len(rv['reservationSet'][0]['instances_set']) == 5) - # report 4 nodes each having 1 of the instances - #for i in xrange(4): - # self.cloud.update_state('instances', - # {('node-%s' % i): {('i-%s' % i): - # instance(i)}}) - - # one instance should be pending still - #self.assert_(len(self.cloud.instances['pending'].keys()) == 1) - - # check that the reservations collapse - #rv = self.cloud._format_instances(self.admin) - #self.assert_(len(rv['reservationSet']) == 1) - #self.assert_(len(rv['reservationSet'][0]['instances_set']) == 5) - - # check that we can get metadata for each instance - #for i in xrange(4): - # data = self.cloud.get_metadata(instance(i)['private_dns_name']) - # self.assert_(data['meta-data']['ami-id'] == 'ami-%s' % i) - - @staticmethod - def _fake_set_image_description(ctxt, image_id, description): - from nova.objectstore import handler - - class req: - pass - - request = req() - request.context = ctxt - request.args = {'image_id': [image_id], - 'description': [description]} - - resource = handler.ImagesResource() - resource.render_POST(request) - - def test_user_editable_image_endpoint(self): - pathdir = os.path.join(FLAGS.images_path, 'ami-testing') - os.mkdir(pathdir) - info = {'isPublic': False} - with open(os.path.join(pathdir, 'info.json'), 'w') as f: - json.dump(info, f) - img = image.Image('ami-testing') - # self.cloud.set_image_description(self.context, 'ami-testing', - # 'Foo Img') - # NOTE(vish): Above won't work unless we start objectstore or create - # a fake version of api/ec2/images.py conn that can - # call methods directly instead of going through boto. - # for now, just cheat and call the method directly - self._fake_set_image_description(self.context, 'ami-testing', - 'Foo Img') - self.assertEqual('Foo Img', img.metadata['description']) - self._fake_set_image_description(self.context, 'ami-testing', '') - self.assertEqual('', img.metadata['description']) - - def test_update_of_instance_display_fields(self): - inst = db.instance_create(self.context, {}) - ec2_id = cloud.internal_id_to_ec2_id(inst['internal_id']) - self.cloud.update_instance(self.context, ec2_id, - display_name='c00l 1m4g3') - inst = db.instance_get(self.context, inst['id']) - self.assertEqual('c00l 1m4g3', inst['display_name']) - db.instance_destroy(self.context, inst['id']) - - def test_update_of_instance_wont_update_private_fields(self): - inst = db.instance_create(self.context, {}) - self.cloud.update_instance(self.context, inst['id'], - mac_address='DE:AD:BE:EF') - inst = db.instance_get(self.context, inst['id']) - self.assertEqual(None, inst['mac_address']) - db.instance_destroy(self.context, inst['id']) - - def test_update_of_volume_display_fields(self): - vol = db.volume_create(self.context, {}) - self.cloud.update_volume(self.context, vol['id'], - display_name='c00l v0lum3') - vol = db.volume_get(self.context, vol['id']) - self.assertEqual('c00l v0lum3', vol['display_name']) - db.volume_destroy(self.context, vol['id']) - - def test_update_of_volume_wont_update_private_fields(self): - vol = db.volume_create(self.context, {}) - self.cloud.update_volume(self.context, vol['id'], - mountpoint='/not/here') - vol = db.volume_get(self.context, vol['id']) - self.assertEqual(None, vol['mountpoint']) - db.volume_destroy(self.context, vol['id']) diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py deleted file mode 100644 index c6353d357..000000000 --- a/nova/tests/compute_unittest.py +++ /dev/null @@ -1,155 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Compute -""" - -import datetime -import logging - -from nova import context -from nova import db -from nova import exception -from nova import flags -from nova import test -from nova import utils -from nova.auth import manager -from nova.compute import api as compute_api - - -FLAGS = flags.FLAGS - - -class ComputeTestCase(test.TestCase): - """Test case for compute""" - def setUp(self): - logging.getLogger().setLevel(logging.DEBUG) - super(ComputeTestCase, self).setUp() - self.flags(connection_type='fake', - network_manager='nova.network.manager.FlatManager') - self.compute = utils.import_object(FLAGS.compute_manager) - self.compute_api = compute_api.ComputeAPI() - self.manager = manager.AuthManager() - self.user = self.manager.create_user('fake', 'fake', 'fake') - self.project = self.manager.create_project('fake', 'fake', 'fake') - self.context = context.get_admin_context() - - def tearDown(self): - self.manager.delete_user(self.user) - self.manager.delete_project(self.project) - super(ComputeTestCase, self).tearDown() - - def _create_instance(self): - """Create a test instance""" - inst = {} - inst['image_id'] = 'ami-test' - inst['reservation_id'] = 'r-fakeres' - inst['launch_time'] = '10' - inst['user_id'] = self.user.id - inst['project_id'] = self.project.id - inst['instance_type'] = 'm1.tiny' - inst['mac_address'] = utils.generate_mac() - inst['ami_launch_index'] = 0 - return db.instance_create(self.context, inst)['id'] - - def test_create_instance_defaults_display_name(self): - """Verify that an instance cannot be created without a display_name.""" - cases = [dict(), dict(display_name=None)] - for instance in cases: - ref = self.compute_api.create_instances(self.context, - FLAGS.default_instance_type, None, **instance) - try: - self.assertNotEqual(ref[0].display_name, None) - finally: - db.instance_destroy(self.context, ref[0]['id']) - - def test_create_instance_associates_security_groups(self): - """Make sure create_instances associates security groups""" - values = {'name': 'default', - 'description': 'default', - 'user_id': self.user.id, - 'project_id': self.project.id} - group = db.security_group_create(self.context, values) - ref = self.compute_api.create_instances(self.context, - FLAGS.default_instance_type, None, security_group=['default']) - try: - self.assertEqual(len(ref[0]['security_groups']), 1) - finally: - db.security_group_destroy(self.context, group['id']) - db.instance_destroy(self.context, ref[0]['id']) - - def test_run_terminate(self): - """Make sure it is possible to run and terminate instance""" - instance_id = self._create_instance() - - self.compute.run_instance(self.context, instance_id) - - instances = db.instance_get_all(context.get_admin_context()) - logging.info("Running instances: %s", instances) - self.assertEqual(len(instances), 1) - - self.compute.terminate_instance(self.context, instance_id) - - instances = db.instance_get_all(context.get_admin_context()) - logging.info("After terminating instances: %s", instances) - self.assertEqual(len(instances), 0) - - def test_run_terminate_timestamps(self): - """Make sure timestamps are set for launched and destroyed""" - instance_id = self._create_instance() - instance_ref = db.instance_get(self.context, instance_id) - self.assertEqual(instance_ref['launched_at'], None) - self.assertEqual(instance_ref['deleted_at'], None) - launch = datetime.datetime.utcnow() - self.compute.run_instance(self.context, instance_id) - instance_ref = db.instance_get(self.context, instance_id) - self.assert_(instance_ref['launched_at'] > launch) - self.assertEqual(instance_ref['deleted_at'], None) - terminate = datetime.datetime.utcnow() - self.compute.terminate_instance(self.context, instance_id) - self.context = self.context.elevated(True) - instance_ref = db.instance_get(self.context, instance_id) - self.assert_(instance_ref['launched_at'] < terminate) - self.assert_(instance_ref['deleted_at'] > terminate) - - def test_reboot(self): - """Ensure instance can be rebooted""" - instance_id = self._create_instance() - self.compute.run_instance(self.context, instance_id) - self.compute.reboot_instance(self.context, instance_id) - self.compute.terminate_instance(self.context, instance_id) - - def test_console_output(self): - """Make sure we can get console output from instance""" - instance_id = self._create_instance() - self.compute.run_instance(self.context, instance_id) - - console = self.compute.get_console_output(self.context, - instance_id) - self.assert_(console) - self.compute.terminate_instance(self.context, instance_id) - - def test_run_instance_existing(self): - """Ensure failure when running an instance that already exists""" - instance_id = self._create_instance() - self.compute.run_instance(self.context, instance_id) - self.assertRaises(exception.Error, - self.compute.run_instance, - self.context, - instance_id) - self.compute.terminate_instance(self.context, instance_id) diff --git a/nova/tests/flags_unittest.py b/nova/tests/flags_unittest.py deleted file mode 100644 index 707300fcf..000000000 --- a/nova/tests/flags_unittest.py +++ /dev/null @@ -1,102 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova import exception -from nova import flags -from nova import test - -FLAGS = flags.FLAGS -flags.DEFINE_string('flags_unittest', 'foo', 'for testing purposes only') - - -class FlagsTestCase(test.TestCase): - - def setUp(self): - super(FlagsTestCase, self).setUp() - self.FLAGS = flags.FlagValues() - self.global_FLAGS = flags.FLAGS - - def test_define(self): - self.assert_('string' not in self.FLAGS) - self.assert_('int' not in self.FLAGS) - self.assert_('false' not in self.FLAGS) - self.assert_('true' not in self.FLAGS) - - flags.DEFINE_string('string', 'default', 'desc', - flag_values=self.FLAGS) - flags.DEFINE_integer('int', 1, 'desc', flag_values=self.FLAGS) - flags.DEFINE_bool('false', False, 'desc', flag_values=self.FLAGS) - flags.DEFINE_bool('true', True, 'desc', flag_values=self.FLAGS) - - self.assert_(self.FLAGS['string']) - self.assert_(self.FLAGS['int']) - self.assert_(self.FLAGS['false']) - self.assert_(self.FLAGS['true']) - self.assertEqual(self.FLAGS.string, 'default') - self.assertEqual(self.FLAGS.int, 1) - self.assertEqual(self.FLAGS.false, False) - self.assertEqual(self.FLAGS.true, True) - - argv = ['flags_test', - '--string', 'foo', - '--int', '2', - '--false', - '--notrue'] - - self.FLAGS(argv) - self.assertEqual(self.FLAGS.string, 'foo') - self.assertEqual(self.FLAGS.int, 2) - self.assertEqual(self.FLAGS.false, True) - self.assertEqual(self.FLAGS.true, False) - - def test_declare(self): - self.assert_('answer' not in self.global_FLAGS) - flags.DECLARE('answer', 'nova.tests.declare_flags') - self.assert_('answer' in self.global_FLAGS) - self.assertEqual(self.global_FLAGS.answer, 42) - - # Make sure we don't overwrite anything - self.global_FLAGS.answer = 256 - self.assertEqual(self.global_FLAGS.answer, 256) - flags.DECLARE('answer', 'nova.tests.declare_flags') - self.assertEqual(self.global_FLAGS.answer, 256) - - def test_runtime_and_unknown_flags(self): - self.assert_('runtime_answer' not in self.global_FLAGS) - - argv = ['flags_test', '--runtime_answer=60', 'extra_arg'] - args = self.global_FLAGS(argv) - self.assertEqual(len(args), 2) - self.assertEqual(args[1], 'extra_arg') - - self.assert_('runtime_answer' not in self.global_FLAGS) - - import nova.tests.runtime_flags - - self.assert_('runtime_answer' in self.global_FLAGS) - self.assertEqual(self.global_FLAGS.runtime_answer, 60) - - def test_flag_leak_left(self): - self.assertEqual(FLAGS.flags_unittest, 'foo') - FLAGS.flags_unittest = 'bar' - self.assertEqual(FLAGS.flags_unittest, 'bar') - - def test_flag_leak_right(self): - self.assertEqual(FLAGS.flags_unittest, 'foo') - FLAGS.flags_unittest = 'bar' - self.assertEqual(FLAGS.flags_unittest, 'bar') diff --git a/nova/tests/misc_unittest.py b/nova/tests/misc_unittest.py deleted file mode 100644 index 3d947427a..000000000 --- a/nova/tests/misc_unittest.py +++ /dev/null @@ -1,55 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from nova import test -from nova.utils import parse_mailmap, str_dict_replace - - -class ProjectTestCase(test.TestCase): - def test_authors_up_to_date(self): - if os.path.exists('../.bzr'): - contributors = set() - - mailmap = parse_mailmap('../.mailmap') - - import bzrlib.workingtree - tree = bzrlib.workingtree.WorkingTree.open('..') - tree.lock_read() - try: - parents = tree.get_parent_ids() - g = tree.branch.repository.get_graph() - for p in parents[1:]: - rev_ids = [r for r, _ in g.iter_ancestry(parents) - if r != "null:"] - revs = tree.branch.repository.get_revisions(rev_ids) - for r in revs: - for author in r.get_apparent_authors(): - email = author.split(' ')[-1] - contributors.add(str_dict_replace(email, mailmap)) - - authors_file = open('../Authors', 'r').read() - - missing = set() - for contributor in contributors: - if not contributor in authors_file: - missing.add(contributor) - - self.assertTrue(len(missing) == 0, - '%r not listed in Authors' % missing) - finally: - tree.unlock() diff --git a/nova/tests/quota_unittest.py b/nova/tests/quota_unittest.py deleted file mode 100644 index 8cf2a5e54..000000000 --- a/nova/tests/quota_unittest.py +++ /dev/null @@ -1,153 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -from nova import context -from nova import db -from nova import exception -from nova import flags -from nova import quota -from nova import test -from nova import utils -from nova.auth import manager -from nova.api.ec2 import cloud - - -FLAGS = flags.FLAGS - - -class QuotaTestCase(test.TestCase): - def setUp(self): - logging.getLogger().setLevel(logging.DEBUG) - super(QuotaTestCase, self).setUp() - self.flags(connection_type='fake', - quota_instances=2, - quota_cores=4, - quota_volumes=2, - quota_gigabytes=20, - quota_floating_ips=1) - - self.cloud = cloud.CloudController() - self.manager = manager.AuthManager() - self.user = self.manager.create_user('admin', 'admin', 'admin', True) - self.project = self.manager.create_project('admin', 'admin', 'admin') - self.network = utils.import_object(FLAGS.network_manager) - self.context = context.RequestContext(project=self.project, - user=self.user) - - def tearDown(self): - manager.AuthManager().delete_project(self.project) - manager.AuthManager().delete_user(self.user) - super(QuotaTestCase, self).tearDown() - - def _create_instance(self, cores=2): - """Create a test instance""" - inst = {} - inst['image_id'] = 'ami-test' - inst['reservation_id'] = 'r-fakeres' - inst['user_id'] = self.user.id - inst['project_id'] = self.project.id - inst['instance_type'] = 'm1.large' - inst['vcpus'] = cores - inst['mac_address'] = utils.generate_mac() - return db.instance_create(self.context, inst)['id'] - - def _create_volume(self, size=10): - """Create a test volume""" - vol = {} - vol['user_id'] = self.user.id - vol['project_id'] = self.project.id - vol['size'] = size - return db.volume_create(self.context, vol)['id'] - - def test_quota_overrides(self): - """Make sure overriding a projects quotas works""" - num_instances = quota.allowed_instances(self.context, 100, 'm1.small') - self.assertEqual(num_instances, 2) - db.quota_create(self.context, {'project_id': self.project.id, - 'instances': 10}) - num_instances = quota.allowed_instances(self.context, 100, 'm1.small') - self.assertEqual(num_instances, 4) - db.quota_update(self.context, self.project.id, {'cores': 100}) - num_instances = quota.allowed_instances(self.context, 100, 'm1.small') - self.assertEqual(num_instances, 10) - db.quota_destroy(self.context, self.project.id) - - def test_too_many_instances(self): - instance_ids = [] - for i in range(FLAGS.quota_instances): - instance_id = self._create_instance() - instance_ids.append(instance_id) - self.assertRaises(quota.QuotaError, self.cloud.run_instances, - self.context, - min_count=1, - max_count=1, - instance_type='m1.small', - image_id='fake') - for instance_id in instance_ids: - db.instance_destroy(self.context, instance_id) - - def test_too_many_cores(self): - instance_ids = [] - instance_id = self._create_instance(cores=4) - instance_ids.append(instance_id) - self.assertRaises(quota.QuotaError, self.cloud.run_instances, - self.context, - min_count=1, - max_count=1, - instance_type='m1.small', - image_id='fake') - for instance_id in instance_ids: - db.instance_destroy(self.context, instance_id) - - def test_too_many_volumes(self): - volume_ids = [] - for i in range(FLAGS.quota_volumes): - volume_id = self._create_volume() - volume_ids.append(volume_id) - self.assertRaises(quota.QuotaError, self.cloud.create_volume, - self.context, - size=10) - for volume_id in volume_ids: - db.volume_destroy(self.context, volume_id) - - def test_too_many_gigabytes(self): - volume_ids = [] - volume_id = self._create_volume(size=20) - volume_ids.append(volume_id) - self.assertRaises(quota.QuotaError, - self.cloud.create_volume, - self.context, - size=10) - for volume_id in volume_ids: - db.volume_destroy(self.context, volume_id) - - def test_too_many_addresses(self): - address = '192.168.0.100' - db.floating_ip_create(context.get_admin_context(), - {'address': address, 'host': FLAGS.host}) - float_addr = self.network.allocate_floating_ip(self.context, - self.project.id) - # NOTE(vish): This assert never fails. When cloud attempts to - # make an rpc.call, the test just finishes with OK. It - # appears to be something in the magic inline callbacks - # that is breaking. - self.assertRaises(quota.QuotaError, self.cloud.allocate_address, - self.context) - db.floating_ip_destroy(context.get_admin_context(), address) diff --git a/nova/tests/rpc_unittest.py b/nova/tests/rpc_unittest.py deleted file mode 100644 index a2495e65a..000000000 --- a/nova/tests/rpc_unittest.py +++ /dev/null @@ -1,103 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Unit Tests for remote procedure calls using queue -""" -import logging - -from nova import context -from nova import flags -from nova import rpc -from nova import test - - -FLAGS = flags.FLAGS - - -class RpcTestCase(test.TestCase): - """Test cases for rpc""" - def setUp(self): - super(RpcTestCase, self).setUp() - self.conn = rpc.Connection.instance() - self.receiver = TestReceiver() - self.consumer = rpc.AdapterConsumer(connection=self.conn, - topic='test', - proxy=self.receiver) - self.consumer.attach_to_eventlet() - self.context = context.get_admin_context() - - def test_call_succeed(self): - """Get a value through rpc call""" - value = 42 - result = rpc.call(self.context, 'test', {"method": "echo", - "args": {"value": value}}) - self.assertEqual(value, result) - - def test_context_passed(self): - """Makes sure a context is passed through rpc call""" - value = 42 - result = rpc.call(self.context, - 'test', {"method": "context", - "args": {"value": value}}) - self.assertEqual(self.context.to_dict(), result) - - def test_call_exception(self): - """Test that exception gets passed back properly - - rpc.call returns a RemoteError object. The value of the - exception is converted to a string, so we convert it back - to an int in the test. - """ - value = 42 - self.assertRaises(rpc.RemoteError, - rpc.call, - self.context, - 'test', - {"method": "fail", - "args": {"value": value}}) - try: - rpc.call(self.context, - 'test', - {"method": "fail", - "args": {"value": value}}) - self.fail("should have thrown rpc.RemoteError") - except rpc.RemoteError as exc: - self.assertEqual(int(exc.value), value) - - -class TestReceiver(object): - """Simple Proxy class so the consumer has methods to call - - Uses static methods because we aren't actually storing any state""" - - @staticmethod - def echo(context, value): - """Simply returns whatever value is sent in""" - logging.debug("Received %s", value) - return value - - @staticmethod - def context(context, value): - """Returns dictionary version of context""" - logging.debug("Received %s", context) - return context.to_dict() - - @staticmethod - def fail(context, value): - """Raises an exception with the value sent in""" - raise Exception(value) diff --git a/nova/tests/scheduler_unittest.py b/nova/tests/scheduler_unittest.py deleted file mode 100644 index d1756b8fb..000000000 --- a/nova/tests/scheduler_unittest.py +++ /dev/null @@ -1,246 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Scheduler -""" - -from nova import context -from nova import db -from nova import flags -from nova import service -from nova import test -from nova import rpc -from nova import utils -from nova.auth import manager as auth_manager -from nova.scheduler import manager -from nova.scheduler import driver - - -FLAGS = flags.FLAGS -flags.DECLARE('max_cores', 'nova.scheduler.simple') - - -class TestDriver(driver.Scheduler): - """Scheduler Driver for Tests""" - def schedule(context, topic, *args, **kwargs): - return 'fallback_host' - - def schedule_named_method(context, topic, num): - return 'named_host' - - -class SchedulerTestCase(test.TestCase): - """Test case for scheduler""" - def setUp(self): - super(SchedulerTestCase, self).setUp() - self.flags(scheduler_driver='nova.tests.scheduler_unittest.TestDriver') - - def test_fallback(self): - scheduler = manager.SchedulerManager() - self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) - ctxt = context.get_admin_context() - rpc.cast(ctxt, - 'topic.fallback_host', - {'method': 'noexist', - 'args': {'num': 7}}) - self.mox.ReplayAll() - scheduler.noexist(ctxt, 'topic', num=7) - - def test_named_method(self): - scheduler = manager.SchedulerManager() - self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) - ctxt = context.get_admin_context() - rpc.cast(ctxt, - 'topic.named_host', - {'method': 'named_method', - 'args': {'num': 7}}) - self.mox.ReplayAll() - scheduler.named_method(ctxt, 'topic', num=7) - - -class SimpleDriverTestCase(test.TestCase): - """Test case for simple driver""" - def setUp(self): - super(SimpleDriverTestCase, self).setUp() - self.flags(connection_type='fake', - max_cores=4, - max_gigabytes=4, - network_manager='nova.network.manager.FlatManager', - volume_driver='nova.volume.driver.FakeISCSIDriver', - scheduler_driver='nova.scheduler.simple.SimpleScheduler') - self.scheduler = manager.SchedulerManager() - self.manager = auth_manager.AuthManager() - self.user = self.manager.create_user('fake', 'fake', 'fake') - self.project = self.manager.create_project('fake', 'fake', 'fake') - self.context = context.get_admin_context() - - def tearDown(self): - self.manager.delete_user(self.user) - self.manager.delete_project(self.project) - - def _create_instance(self): - """Create a test instance""" - inst = {} - inst['image_id'] = 'ami-test' - inst['reservation_id'] = 'r-fakeres' - inst['user_id'] = self.user.id - inst['project_id'] = self.project.id - inst['instance_type'] = 'm1.tiny' - inst['mac_address'] = utils.generate_mac() - inst['ami_launch_index'] = 0 - inst['vcpus'] = 1 - return db.instance_create(self.context, inst)['id'] - - def _create_volume(self): - """Create a test volume""" - vol = {} - vol['image_id'] = 'ami-test' - vol['reservation_id'] = 'r-fakeres' - vol['size'] = 1 - return db.volume_create(self.context, vol)['id'] - - def test_hosts_are_up(self): - """Ensures driver can find the hosts that are up""" - # NOTE(vish): constructing service without create method - # because we are going to use it without queue - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() - hosts = self.scheduler.driver.hosts_up(self.context, 'compute') - self.assertEqual(len(hosts), 2) - compute1.kill() - compute2.kill() - - def test_least_busy_host_gets_instance(self): - """Ensures the host with less cores gets the next one""" - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() - instance_id1 = self._create_instance() - compute1.run_instance(self.context, instance_id1) - instance_id2 = self._create_instance() - host = self.scheduler.driver.schedule_run_instance(self.context, - instance_id2) - self.assertEqual(host, 'host2') - compute1.terminate_instance(self.context, instance_id1) - db.instance_destroy(self.context, instance_id2) - compute1.kill() - compute2.kill() - - def test_too_many_cores(self): - """Ensures we don't go over max cores""" - compute1 = service.Service('host1', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute1.start() - compute2 = service.Service('host2', - 'nova-compute', - 'compute', - FLAGS.compute_manager) - compute2.start() - instance_ids1 = [] - instance_ids2 = [] - for index in xrange(FLAGS.max_cores): - instance_id = self._create_instance() - compute1.run_instance(self.context, instance_id) - instance_ids1.append(instance_id) - instance_id = self._create_instance() - compute2.run_instance(self.context, instance_id) - instance_ids2.append(instance_id) - instance_id = self._create_instance() - self.assertRaises(driver.NoValidHost, - self.scheduler.driver.schedule_run_instance, - self.context, - instance_id) - for instance_id in instance_ids1: - compute1.terminate_instance(self.context, instance_id) - for instance_id in instance_ids2: - compute2.terminate_instance(self.context, instance_id) - compute1.kill() - compute2.kill() - - def test_least_busy_host_gets_volume(self): - """Ensures the host with less gigabytes gets the next one""" - volume1 = service.Service('host1', - 'nova-volume', - 'volume', - FLAGS.volume_manager) - volume1.start() - volume2 = service.Service('host2', - 'nova-volume', - 'volume', - FLAGS.volume_manager) - volume2.start() - volume_id1 = self._create_volume() - volume1.create_volume(self.context, volume_id1) - volume_id2 = self._create_volume() - host = self.scheduler.driver.schedule_create_volume(self.context, - volume_id2) - self.assertEqual(host, 'host2') - volume1.delete_volume(self.context, volume_id1) - db.volume_destroy(self.context, volume_id2) - volume1.kill() - volume2.kill() - - def test_too_many_gigabytes(self): - """Ensures we don't go over max gigabytes""" - volume1 = service.Service('host1', - 'nova-volume', - 'volume', - FLAGS.volume_manager) - volume1.start() - volume2 = service.Service('host2', - 'nova-volume', - 'volume', - FLAGS.volume_manager) - volume2.start() - volume_ids1 = [] - volume_ids2 = [] - for index in xrange(FLAGS.max_gigabytes): - volume_id = self._create_volume() - volume1.create_volume(self.context, volume_id) - volume_ids1.append(volume_id) - volume_id = self._create_volume() - volume2.create_volume(self.context, volume_id) - volume_ids2.append(volume_id) - volume_id = self._create_volume() - self.assertRaises(driver.NoValidHost, - self.scheduler.driver.schedule_create_volume, - self.context, - volume_id) - for volume_id in volume_ids1: - volume1.delete_volume(self.context, volume_id) - for volume_id in volume_ids2: - volume2.delete_volume(self.context, volume_id) - volume1.kill() - volume2.kill() diff --git a/nova/tests/service_unittest.py b/nova/tests/service_unittest.py deleted file mode 100644 index 47c092f8e..000000000 --- a/nova/tests/service_unittest.py +++ /dev/null @@ -1,227 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Unit Tests for remote procedure calls using queue -""" - -import mox - -from nova import exception -from nova import flags -from nova import rpc -from nova import test -from nova import service -from nova import manager - -FLAGS = flags.FLAGS -flags.DEFINE_string("fake_manager", "nova.tests.service_unittest.FakeManager", - "Manager for testing") - - -class FakeManager(manager.Manager): - """Fake manager for tests""" - def test_method(self): - return 'manager' - - -class ExtendedService(service.Service): - def test_method(self): - return 'service' - - -class ServiceManagerTestCase(test.TestCase): - """Test cases for Services""" - - def test_attribute_error_for_no_manager(self): - serv = service.Service('test', - 'test', - 'test', - 'nova.tests.service_unittest.FakeManager') - self.assertRaises(AttributeError, getattr, serv, 'test_method') - - def test_message_gets_to_manager(self): - serv = service.Service('test', - 'test', - 'test', - 'nova.tests.service_unittest.FakeManager') - serv.start() - self.assertEqual(serv.test_method(), 'manager') - - def test_override_manager_method(self): - serv = ExtendedService('test', - 'test', - 'test', - 'nova.tests.service_unittest.FakeManager') - serv.start() - self.assertEqual(serv.test_method(), 'service') - - -class ServiceTestCase(test.TestCase): - """Test cases for Services""" - - def setUp(self): - super(ServiceTestCase, self).setUp() - self.mox.StubOutWithMock(service, 'db') - - def test_create(self): - host = 'foo' - binary = 'nova-fake' - topic = 'fake' - - # NOTE(vish): Create was moved out of mox replay to make sure that - # the looping calls are created in StartService. - app = service.Service.create(host=host, binary=binary) - - self.mox.StubOutWithMock(rpc, - 'AdapterConsumer', - use_mock_anything=True) - rpc.AdapterConsumer(connection=mox.IgnoreArg(), - topic=topic, - proxy=mox.IsA(service.Service)).AndReturn( - rpc.AdapterConsumer) - - rpc.AdapterConsumer(connection=mox.IgnoreArg(), - topic='%s.%s' % (topic, host), - proxy=mox.IsA(service.Service)).AndReturn( - rpc.AdapterConsumer) - - rpc.AdapterConsumer.attach_to_eventlet() - rpc.AdapterConsumer.attach_to_eventlet() - - service_create = {'host': host, - 'binary': binary, - 'topic': topic, - 'report_count': 0} - service_ref = {'host': host, - 'binary': binary, - 'report_count': 0, - 'id': 1} - - service.db.service_get_by_args(mox.IgnoreArg(), - host, - binary).AndRaise(exception.NotFound()) - service.db.service_create(mox.IgnoreArg(), - service_create).AndReturn(service_ref) - self.mox.ReplayAll() - - app.start() - app.stop() - self.assert_(app) - - # We're testing sort of weird behavior in how report_state decides - # whether it is disconnected, it looks for a variable on itself called - # 'model_disconnected' and report_state doesn't really do much so this - # these are mostly just for coverage - def test_report_state_no_service(self): - host = 'foo' - binary = 'bar' - topic = 'test' - service_create = {'host': host, - 'binary': binary, - 'topic': topic, - 'report_count': 0} - service_ref = {'host': host, - 'binary': binary, - 'topic': topic, - 'report_count': 0, - 'id': 1} - - service.db.service_get_by_args(mox.IgnoreArg(), - host, - binary).AndRaise(exception.NotFound()) - service.db.service_create(mox.IgnoreArg(), - service_create).AndReturn(service_ref) - service.db.service_get(mox.IgnoreArg(), - service_ref['id']).AndReturn(service_ref) - service.db.service_update(mox.IgnoreArg(), service_ref['id'], - mox.ContainsKeyValue('report_count', 1)) - - self.mox.ReplayAll() - serv = service.Service(host, - binary, - topic, - 'nova.tests.service_unittest.FakeManager') - serv.start() - serv.report_state() - - def test_report_state_newly_disconnected(self): - host = 'foo' - binary = 'bar' - topic = 'test' - service_create = {'host': host, - 'binary': binary, - 'topic': topic, - 'report_count': 0} - service_ref = {'host': host, - 'binary': binary, - 'topic': topic, - 'report_count': 0, - 'id': 1} - - service.db.service_get_by_args(mox.IgnoreArg(), - host, - binary).AndRaise(exception.NotFound()) - service.db.service_create(mox.IgnoreArg(), - service_create).AndReturn(service_ref) - service.db.service_get(mox.IgnoreArg(), - mox.IgnoreArg()).AndRaise(Exception()) - - self.mox.ReplayAll() - serv = service.Service(host, - binary, - topic, - 'nova.tests.service_unittest.FakeManager') - serv.start() - serv.report_state() - self.assert_(serv.model_disconnected) - - def test_report_state_newly_connected(self): - host = 'foo' - binary = 'bar' - topic = 'test' - service_create = {'host': host, - 'binary': binary, - 'topic': topic, - 'report_count': 0} - service_ref = {'host': host, - 'binary': binary, - 'topic': topic, - 'report_count': 0, - 'id': 1} - - service.db.service_get_by_args(mox.IgnoreArg(), - host, - binary).AndRaise(exception.NotFound()) - service.db.service_create(mox.IgnoreArg(), - service_create).AndReturn(service_ref) - service.db.service_get(mox.IgnoreArg(), - service_ref['id']).AndReturn(service_ref) - service.db.service_update(mox.IgnoreArg(), service_ref['id'], - mox.ContainsKeyValue('report_count', 1)) - - self.mox.ReplayAll() - serv = service.Service(host, - binary, - topic, - 'nova.tests.service_unittest.FakeManager') - serv.start() - serv.model_disconnected = True - serv.report_state() - - self.assert_(not serv.model_disconnected) diff --git a/nova/tests/test_access.py b/nova/tests/test_access.py new file mode 100644 index 000000000..58fdea3b5 --- /dev/null +++ b/nova/tests/test_access.py @@ -0,0 +1,127 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import unittest +import logging +import webob + +from nova import context +from nova import exception +from nova import flags +from nova import test +from nova.api import ec2 +from nova.auth import manager + + +FLAGS = flags.FLAGS + + +class Context(object): + pass + + +class AccessTestCase(test.TestCase): + def setUp(self): + super(AccessTestCase, self).setUp() + um = manager.AuthManager() + self.context = context.get_admin_context() + # Make test users + self.testadmin = um.create_user('testadmin') + self.testpmsys = um.create_user('testpmsys') + self.testnet = um.create_user('testnet') + self.testsys = um.create_user('testsys') + # Assign some rules + um.add_role('testadmin', 'cloudadmin') + um.add_role('testpmsys', 'sysadmin') + um.add_role('testnet', 'netadmin') + um.add_role('testsys', 'sysadmin') + + # Make a test project + self.project = um.create_project('testproj', + 'testpmsys', + 'a test project', + ['testpmsys', 'testnet', 'testsys']) + self.project.add_role(self.testnet, 'netadmin') + self.project.add_role(self.testsys, 'sysadmin') + #user is set in each test + + def noopWSGIApp(environ, start_response): + start_response('200 OK', []) + return [''] + + self.mw = ec2.Authorizer(noopWSGIApp) + self.mw.action_roles = {'str': { + '_allow_all': ['all'], + '_allow_none': [], + '_allow_project_manager': ['projectmanager'], + '_allow_sys_and_net': ['sysadmin', 'netadmin'], + '_allow_sysadmin': ['sysadmin']}} + + def tearDown(self): + um = manager.AuthManager() + # Delete the test project + um.delete_project('testproj') + # Delete the test user + um.delete_user('testadmin') + um.delete_user('testpmsys') + um.delete_user('testnet') + um.delete_user('testsys') + super(AccessTestCase, self).tearDown() + + def response_status(self, user, methodName): + ctxt = context.RequestContext(user, self.project) + environ = {'ec2.context': ctxt, + 'ec2.controller': 'some string', + 'ec2.action': methodName} + req = webob.Request.blank('/', environ) + resp = req.get_response(self.mw) + return resp.status_int + + def shouldAllow(self, user, methodName): + self.assertEqual(200, self.response_status(user, methodName)) + + def shouldDeny(self, user, methodName): + self.assertEqual(401, self.response_status(user, methodName)) + + def test_001_allow_all(self): + users = [self.testadmin, self.testpmsys, self.testnet, self.testsys] + for user in users: + self.shouldAllow(user, '_allow_all') + + def test_002_allow_none(self): + self.shouldAllow(self.testadmin, '_allow_none') + users = [self.testpmsys, self.testnet, self.testsys] + for user in users: + self.shouldDeny(user, '_allow_none') + + def test_003_allow_project_manager(self): + for user in [self.testadmin, self.testpmsys]: + self.shouldAllow(user, '_allow_project_manager') + for user in [self.testnet, self.testsys]: + self.shouldDeny(user, '_allow_project_manager') + + def test_004_allow_sys_and_net(self): + for user in [self.testadmin, self.testnet, self.testsys]: + self.shouldAllow(user, '_allow_sys_and_net') + # denied because it doesn't have the per project sysadmin + for user in [self.testpmsys]: + self.shouldDeny(user, '_allow_sys_and_net') + +if __name__ == "__main__": + # TODO: Implement use_fake as an option + unittest.main() diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py new file mode 100644 index 000000000..33d4cb294 --- /dev/null +++ b/nova/tests/test_api.py @@ -0,0 +1,338 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Unit tests for the API endpoint""" + +import boto +from boto.ec2 import regioninfo +import httplib +import random +import StringIO +import webob + +from nova import context +from nova import flags +from nova import test +from nova import api +from nova.api.ec2 import cloud +from nova.api.ec2 import apirequest +from nova.auth import manager + + +class FakeHttplibSocket(object): + """a fake socket implementation for httplib.HTTPResponse, trivial""" + def __init__(self, response_string): + self._buffer = StringIO.StringIO(response_string) + + def makefile(self, _mode, _other): + """Returns the socket's internal buffer""" + return self._buffer + + +class FakeHttplibConnection(object): + """A fake httplib.HTTPConnection for boto to use + + requests made via this connection actually get translated and routed into + our WSGI app, we then wait for the response and turn it back into + the httplib.HTTPResponse that boto expects. + """ + def __init__(self, app, host, is_secure=False): + self.app = app + self.host = host + + def request(self, method, path, data, headers): + req = webob.Request.blank(path) + req.method = method + req.body = data + req.headers = headers + req.headers['Accept'] = 'text/html' + req.host = self.host + # Call the WSGI app, get the HTTP response + resp = str(req.get_response(self.app)) + # For some reason, the response doesn't have "HTTP/1.0 " prepended; I + # guess that's a function the web server usually provides. + resp = "HTTP/1.0 %s" % resp + sock = FakeHttplibSocket(resp) + self.http_response = httplib.HTTPResponse(sock) + self.http_response.begin() + + def getresponse(self): + return self.http_response + + def close(self): + """Required for compatibility with boto/tornado""" + pass + + +class XmlConversionTestCase(test.TrialTestCase): + """Unit test api xml conversion""" + def test_number_conversion(self): + conv = apirequest._try_convert + self.assertEqual(conv('None'), None) + self.assertEqual(conv('True'), True) + self.assertEqual(conv('False'), False) + self.assertEqual(conv('0'), 0) + self.assertEqual(conv('42'), 42) + self.assertEqual(conv('3.14'), 3.14) + self.assertEqual(conv('-57.12'), -57.12) + self.assertEqual(conv('0x57'), 0x57) + self.assertEqual(conv('-0x57'), -0x57) + self.assertEqual(conv('-'), '-') + self.assertEqual(conv('-0'), 0) + + +class ApiEc2TestCase(test.TrialTestCase): + """Unit test for the cloud controller on an EC2 API""" + def setUp(self): + super(ApiEc2TestCase, self).setUp() + + self.manager = manager.AuthManager() + + self.host = '127.0.0.1' + + self.app = api.API('ec2') + + def expect_http(self, host=None, is_secure=False): + """Returns a new EC2 connection""" + self.ec2 = boto.connect_ec2( + aws_access_key_id='fake', + aws_secret_access_key='fake', + is_secure=False, + region=regioninfo.RegionInfo(None, 'test', self.host), + port=8773, + path='/services/Cloud') + + self.mox.StubOutWithMock(self.ec2, 'new_http_connection') + http = FakeHttplibConnection( + self.app, '%s:8773' % (self.host), False) + # pylint: disable-msg=E1103 + self.ec2.new_http_connection(host, is_secure).AndReturn(http) + return http + + def test_describe_instances(self): + """Test that, after creating a user and a project, the describe + instances call to the API works properly""" + self.expect_http() + self.mox.ReplayAll() + user = self.manager.create_user('fake', 'fake', 'fake') + project = self.manager.create_project('fake', 'fake', 'fake') + self.assertEqual(self.ec2.get_all_instances(), []) + self.manager.delete_project(project) + self.manager.delete_user(user) + + def test_get_all_key_pairs(self): + """Test that, after creating a user and project and generating + a key pair, that the API call to list key pairs works properly""" + self.expect_http() + self.mox.ReplayAll() + keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \ + for x in range(random.randint(4, 8))) + user = self.manager.create_user('fake', 'fake', 'fake') + project = self.manager.create_project('fake', 'fake', 'fake') + # NOTE(vish): create depends on pool, so call helper directly + cloud._gen_key(context.get_admin_context(), user.id, keyname) + + rv = self.ec2.get_all_key_pairs() + results = [k for k in rv if k.name == keyname] + self.assertEquals(len(results), 1) + self.manager.delete_project(project) + self.manager.delete_user(user) + + def test_get_all_security_groups(self): + """Test that we can retrieve security groups""" + self.expect_http() + self.mox.ReplayAll() + user = self.manager.create_user('fake', 'fake', 'fake', admin=True) + project = self.manager.create_project('fake', 'fake', 'fake') + + rv = self.ec2.get_all_security_groups() + + self.assertEquals(len(rv), 1) + self.assertEquals(rv[0].name, 'default') + + self.manager.delete_project(project) + self.manager.delete_user(user) + + def test_create_delete_security_group(self): + """Test that we can create a security group""" + self.expect_http() + self.mox.ReplayAll() + user = self.manager.create_user('fake', 'fake', 'fake', admin=True) + project = self.manager.create_project('fake', 'fake', 'fake') + + # At the moment, you need both of these to actually be netadmin + self.manager.add_role('fake', 'netadmin') + project.add_role('fake', 'netadmin') + + security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") + for x in range(random.randint(4, 8))) + + self.ec2.create_security_group(security_group_name, 'test group') + + self.expect_http() + self.mox.ReplayAll() + + rv = self.ec2.get_all_security_groups() + self.assertEquals(len(rv), 2) + self.assertTrue(security_group_name in [group.name for group in rv]) + + self.expect_http() + self.mox.ReplayAll() + + self.ec2.delete_security_group(security_group_name) + + self.manager.delete_project(project) + self.manager.delete_user(user) + + def test_authorize_revoke_security_group_cidr(self): + """ + Test that we can add and remove CIDR based rules + to a security group + """ + self.expect_http() + self.mox.ReplayAll() + user = self.manager.create_user('fake', 'fake', 'fake') + project = self.manager.create_project('fake', 'fake', 'fake') + + # At the moment, you need both of these to actually be netadmin + self.manager.add_role('fake', 'netadmin') + project.add_role('fake', 'netadmin') + + security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") + for x in range(random.randint(4, 8))) + + group = self.ec2.create_security_group(security_group_name, + 'test group') + + self.expect_http() + self.mox.ReplayAll() + group.connection = self.ec2 + + group.authorize('tcp', 80, 81, '0.0.0.0/0') + + self.expect_http() + self.mox.ReplayAll() + + rv = self.ec2.get_all_security_groups() + # I don't bother checkng that we actually find it here, + # because the create/delete unit test further up should + # be good enough for that. + for group in rv: + if group.name == security_group_name: + self.assertEquals(len(group.rules), 1) + self.assertEquals(int(group.rules[0].from_port), 80) + self.assertEquals(int(group.rules[0].to_port), 81) + self.assertEquals(len(group.rules[0].grants), 1) + self.assertEquals(str(group.rules[0].grants[0]), '0.0.0.0/0') + + self.expect_http() + self.mox.ReplayAll() + group.connection = self.ec2 + + group.revoke('tcp', 80, 81, '0.0.0.0/0') + + self.expect_http() + self.mox.ReplayAll() + + self.ec2.delete_security_group(security_group_name) + + self.expect_http() + self.mox.ReplayAll() + group.connection = self.ec2 + + rv = self.ec2.get_all_security_groups() + + self.assertEqual(len(rv), 1) + self.assertEqual(rv[0].name, 'default') + + self.manager.delete_project(project) + self.manager.delete_user(user) + + return + + def test_authorize_revoke_security_group_foreign_group(self): + """ + Test that we can grant and revoke another security group access + to a security group + """ + self.expect_http() + self.mox.ReplayAll() + user = self.manager.create_user('fake', 'fake', 'fake', admin=True) + project = self.manager.create_project('fake', 'fake', 'fake') + + # At the moment, you need both of these to actually be netadmin + self.manager.add_role('fake', 'netadmin') + project.add_role('fake', 'netadmin') + + rand_string = 'sdiuisudfsdcnpaqwertasd' + security_group_name = "".join(random.choice(rand_string) + for x in range(random.randint(4, 8))) + other_security_group_name = "".join(random.choice(rand_string) + for x in range(random.randint(4, 8))) + + group = self.ec2.create_security_group(security_group_name, + 'test group') + + self.expect_http() + self.mox.ReplayAll() + + other_group = self.ec2.create_security_group(other_security_group_name, + 'some other group') + + self.expect_http() + self.mox.ReplayAll() + group.connection = self.ec2 + + group.authorize(src_group=other_group) + + self.expect_http() + self.mox.ReplayAll() + + rv = self.ec2.get_all_security_groups() + + # I don't bother checkng that we actually find it here, + # because the create/delete unit test further up should + # be good enough for that. + for group in rv: + if group.name == security_group_name: + self.assertEquals(len(group.rules), 1) + self.assertEquals(len(group.rules[0].grants), 1) + self.assertEquals(str(group.rules[0].grants[0]), '%s-%s' % + (other_security_group_name, 'fake')) + + self.expect_http() + self.mox.ReplayAll() + + rv = self.ec2.get_all_security_groups() + + for group in rv: + if group.name == security_group_name: + self.expect_http() + self.mox.ReplayAll() + group.connection = self.ec2 + group.revoke(src_group=other_group) + + self.expect_http() + self.mox.ReplayAll() + + self.ec2.delete_security_group(security_group_name) + + self.manager.delete_project(project) + self.manager.delete_user(user) + + return diff --git a/nova/tests/test_auth.py b/nova/tests/test_auth.py new file mode 100644 index 000000000..4508d6721 --- /dev/null +++ b/nova/tests/test_auth.py @@ -0,0 +1,352 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +from M2Crypto import X509 +import unittest + +from nova import crypto +from nova import flags +from nova import test +from nova.auth import manager +from nova.api.ec2 import cloud + +FLAGS = flags.FLAGS + + +class user_generator(object): + def __init__(self, manager, **user_state): + if 'name' not in user_state: + user_state['name'] = 'test1' + self.manager = manager + self.user = manager.create_user(**user_state) + + def __enter__(self): + return self.user + + def __exit__(self, value, type, trace): + self.manager.delete_user(self.user) + + +class project_generator(object): + def __init__(self, manager, **project_state): + if 'name' not in project_state: + project_state['name'] = 'testproj' + if 'manager_user' not in project_state: + project_state['manager_user'] = 'test1' + self.manager = manager + self.project = manager.create_project(**project_state) + + def __enter__(self): + return self.project + + def __exit__(self, value, type, trace): + self.manager.delete_project(self.project) + + +class user_and_project_generator(object): + def __init__(self, manager, user_state={}, project_state={}): + self.manager = manager + if 'name' not in user_state: + user_state['name'] = 'test1' + if 'name' not in project_state: + project_state['name'] = 'testproj' + if 'manager_user' not in project_state: + project_state['manager_user'] = 'test1' + self.user = manager.create_user(**user_state) + self.project = manager.create_project(**project_state) + + def __enter__(self): + return (self.user, self.project) + + def __exit__(self, value, type, trace): + self.manager.delete_user(self.user) + self.manager.delete_project(self.project) + + +class AuthManagerTestCase(object): + def setUp(self): + FLAGS.auth_driver = self.auth_driver + super(AuthManagerTestCase, self).setUp() + self.flags(connection_type='fake') + self.manager = manager.AuthManager(new=True) + + def test_create_and_find_user(self): + with user_generator(self.manager): + self.assert_(self.manager.get_user('test1')) + + def test_create_and_find_with_properties(self): + with user_generator(self.manager, name="herbert", secret="classified", + access="private-party"): + u = self.manager.get_user('herbert') + self.assertEqual('herbert', u.id) + self.assertEqual('herbert', u.name) + self.assertEqual('classified', u.secret) + self.assertEqual('private-party', u.access) + + def test_004_signature_is_valid(self): + #self.assertTrue(self.manager.authenticate(**boto.generate_url ...? )) + pass + #raise NotImplementedError + + def test_005_can_get_credentials(self): + return + credentials = self.manager.get_user('test1').get_credentials() + self.assertEqual(credentials, + 'export EC2_ACCESS_KEY="access"\n' + + 'export EC2_SECRET_KEY="secret"\n' + + 'export EC2_URL="http://127.0.0.1:8773/services/Cloud"\n' + + 'export S3_URL="http://127.0.0.1:3333/"\n' + + 'export EC2_USER_ID="test1"\n') + + def test_can_list_users(self): + with user_generator(self.manager): + with user_generator(self.manager, name="test2"): + users = self.manager.get_users() + self.assert_(filter(lambda u: u.id == 'test1', users)) + self.assert_(filter(lambda u: u.id == 'test2', users)) + self.assert_(not filter(lambda u: u.id == 'test3', users)) + + def test_can_add_and_remove_user_role(self): + with user_generator(self.manager): + self.assertFalse(self.manager.has_role('test1', 'itsec')) + self.manager.add_role('test1', 'itsec') + self.assertTrue(self.manager.has_role('test1', 'itsec')) + self.manager.remove_role('test1', 'itsec') + self.assertFalse(self.manager.has_role('test1', 'itsec')) + + def test_can_create_and_get_project(self): + with user_and_project_generator(self.manager) as (u, p): + self.assert_(self.manager.get_user('test1')) + self.assert_(self.manager.get_user('test1')) + self.assert_(self.manager.get_project('testproj')) + + def test_can_list_projects(self): + with user_and_project_generator(self.manager): + with project_generator(self.manager, name="testproj2"): + projects = self.manager.get_projects() + self.assert_(filter(lambda p: p.name == 'testproj', projects)) + self.assert_(filter(lambda p: p.name == 'testproj2', projects)) + self.assert_(not filter(lambda p: p.name == 'testproj3', + projects)) + + def test_can_create_and_get_project_with_attributes(self): + with user_generator(self.manager): + with project_generator(self.manager, description='A test project'): + project = self.manager.get_project('testproj') + self.assertEqual('A test project', project.description) + + def test_can_create_project_with_manager(self): + with user_and_project_generator(self.manager) as (user, project): + self.assertEqual('test1', project.project_manager_id) + self.assertTrue(self.manager.is_project_manager(user, project)) + + def test_create_project_assigns_manager_to_members(self): + with user_and_project_generator(self.manager) as (user, project): + self.assertTrue(self.manager.is_project_member(user, project)) + + def test_no_extra_project_members(self): + with user_generator(self.manager, name='test2') as baduser: + with user_and_project_generator(self.manager) as (user, project): + self.assertFalse(self.manager.is_project_member(baduser, + project)) + + def test_no_extra_project_managers(self): + with user_generator(self.manager, name='test2') as baduser: + with user_and_project_generator(self.manager) as (user, project): + self.assertFalse(self.manager.is_project_manager(baduser, + project)) + + def test_can_add_user_to_project(self): + with user_generator(self.manager, name='test2') as user: + with user_and_project_generator(self.manager) as (_user, project): + self.manager.add_to_project(user, project) + project = self.manager.get_project('testproj') + self.assertTrue(self.manager.is_project_member(user, project)) + + def test_can_remove_user_from_project(self): + with user_generator(self.manager, name='test2') as user: + with user_and_project_generator(self.manager) as (_user, project): + self.manager.add_to_project(user, project) + project = self.manager.get_project('testproj') + self.assertTrue(self.manager.is_project_member(user, project)) + self.manager.remove_from_project(user, project) + project = self.manager.get_project('testproj') + self.assertFalse(self.manager.is_project_member(user, project)) + + def test_can_add_remove_user_with_role(self): + with user_generator(self.manager, name='test2') as user: + with user_and_project_generator(self.manager) as (_user, project): + # NOTE(todd): after modifying users you must reload project + self.manager.add_to_project(user, project) + project = self.manager.get_project('testproj') + self.manager.add_role(user, 'developer', project) + self.assertTrue(self.manager.is_project_member(user, project)) + self.manager.remove_from_project(user, project) + project = self.manager.get_project('testproj') + self.assertFalse(self.manager.has_role(user, 'developer', + project)) + self.assertFalse(self.manager.is_project_member(user, project)) + + def test_can_generate_x509(self): + # NOTE(todd): this doesn't assert against the auth manager + # so it probably belongs in crypto_unittest + # but I'm leaving it where I found it. + with user_and_project_generator(self.manager) as (user, project): + # NOTE(todd): Should mention why we must setup controller first + # (somebody please clue me in) + cloud_controller = cloud.CloudController() + cloud_controller.setup() + _key, cert_str = self.manager._generate_x509_cert('test1', + 'testproj') + logging.debug(cert_str) + + # Need to verify that it's signed by the right intermediate CA + full_chain = crypto.fetch_ca(project_id='testproj', chain=True) + int_cert = crypto.fetch_ca(project_id='testproj', chain=False) + cloud_cert = crypto.fetch_ca() + logging.debug("CA chain:\n\n =====\n%s\n\n=====" % full_chain) + signed_cert = X509.load_cert_string(cert_str) + chain_cert = X509.load_cert_string(full_chain) + int_cert = X509.load_cert_string(int_cert) + cloud_cert = X509.load_cert_string(cloud_cert) + self.assertTrue(signed_cert.verify(chain_cert.get_pubkey())) + self.assertTrue(signed_cert.verify(int_cert.get_pubkey())) + if not FLAGS.use_intermediate_ca: + self.assertTrue(signed_cert.verify(cloud_cert.get_pubkey())) + else: + self.assertFalse(signed_cert.verify(cloud_cert.get_pubkey())) + + def test_adding_role_to_project_is_ignored_unless_added_to_user(self): + with user_and_project_generator(self.manager) as (user, project): + self.assertFalse(self.manager.has_role(user, 'sysadmin', project)) + self.manager.add_role(user, 'sysadmin', project) + # NOTE(todd): it will still show up in get_user_roles(u, project) + self.assertFalse(self.manager.has_role(user, 'sysadmin', project)) + self.manager.add_role(user, 'sysadmin') + self.assertTrue(self.manager.has_role(user, 'sysadmin', project)) + + def test_add_user_role_doesnt_infect_project_roles(self): + with user_and_project_generator(self.manager) as (user, project): + self.assertFalse(self.manager.has_role(user, 'sysadmin', project)) + self.manager.add_role(user, 'sysadmin') + self.assertFalse(self.manager.has_role(user, 'sysadmin', project)) + + def test_can_list_user_roles(self): + with user_and_project_generator(self.manager) as (user, project): + self.manager.add_role(user, 'sysadmin') + roles = self.manager.get_user_roles(user) + self.assertTrue('sysadmin' in roles) + self.assertFalse('netadmin' in roles) + + def test_can_list_project_roles(self): + with user_and_project_generator(self.manager) as (user, project): + self.manager.add_role(user, 'sysadmin') + self.manager.add_role(user, 'sysadmin', project) + self.manager.add_role(user, 'netadmin', project) + project_roles = self.manager.get_user_roles(user, project) + self.assertTrue('sysadmin' in project_roles) + self.assertTrue('netadmin' in project_roles) + # has role should be false user-level role is missing + self.assertFalse(self.manager.has_role(user, 'netadmin', project)) + + def test_can_remove_user_roles(self): + with user_and_project_generator(self.manager) as (user, project): + self.manager.add_role(user, 'sysadmin') + self.assertTrue(self.manager.has_role(user, 'sysadmin')) + self.manager.remove_role(user, 'sysadmin') + self.assertFalse(self.manager.has_role(user, 'sysadmin')) + + def test_removing_user_role_hides_it_from_project(self): + with user_and_project_generator(self.manager) as (user, project): + self.manager.add_role(user, 'sysadmin') + self.manager.add_role(user, 'sysadmin', project) + self.assertTrue(self.manager.has_role(user, 'sysadmin', project)) + self.manager.remove_role(user, 'sysadmin') + self.assertFalse(self.manager.has_role(user, 'sysadmin', project)) + + def test_can_remove_project_role_but_keep_user_role(self): + with user_and_project_generator(self.manager) as (user, project): + self.manager.add_role(user, 'sysadmin') + self.manager.add_role(user, 'sysadmin', project) + self.assertTrue(self.manager.has_role(user, 'sysadmin')) + self.manager.remove_role(user, 'sysadmin', project) + self.assertFalse(self.manager.has_role(user, 'sysadmin', project)) + self.assertTrue(self.manager.has_role(user, 'sysadmin')) + + def test_can_retrieve_project_by_user(self): + with user_and_project_generator(self.manager) as (user, project): + self.assertEqual(1, len(self.manager.get_projects('test1'))) + + def test_can_modify_project(self): + with user_and_project_generator(self.manager): + with user_generator(self.manager, name='test2'): + self.manager.modify_project('testproj', 'test2', 'new desc') + project = self.manager.get_project('testproj') + self.assertEqual('test2', project.project_manager_id) + self.assertEqual('new desc', project.description) + + def test_can_delete_project(self): + with user_generator(self.manager): + self.manager.create_project('testproj', 'test1') + self.assert_(self.manager.get_project('testproj')) + self.manager.delete_project('testproj') + projectlist = self.manager.get_projects() + self.assert_(not filter(lambda p: p.name == 'testproj', + projectlist)) + + def test_can_delete_user(self): + self.manager.create_user('test1') + self.assert_(self.manager.get_user('test1')) + self.manager.delete_user('test1') + userlist = self.manager.get_users() + self.assert_(not filter(lambda u: u.id == 'test1', userlist)) + + def test_can_modify_users(self): + with user_generator(self.manager): + self.manager.modify_user('test1', 'access', 'secret', True) + user = self.manager.get_user('test1') + self.assertEqual('access', user.access) + self.assertEqual('secret', user.secret) + self.assertTrue(user.is_admin()) + + +class AuthManagerLdapTestCase(AuthManagerTestCase, test.TestCase): + auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver' + + def __init__(self, *args, **kwargs): + AuthManagerTestCase.__init__(self) + test.TestCase.__init__(self, *args, **kwargs) + import nova.auth.fakeldap as fakeldap + FLAGS.redis_db = 8 + if FLAGS.flush_db: + logging.info("Flushing redis datastore") + try: + r = fakeldap.Redis.instance() + r.flushdb() + except: + self.skip = True + + +class AuthManagerDbTestCase(AuthManagerTestCase, test.TestCase): + auth_driver = 'nova.auth.dbdriver.DbDriver' + + +if __name__ == "__main__": + # TODO: Implement use_fake as an option + unittest.main() diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py new file mode 100644 index 000000000..53a762310 --- /dev/null +++ b/nova/tests/test_cloud.py @@ -0,0 +1,332 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from base64 import b64decode +import json +import logging +from M2Crypto import BIO +from M2Crypto import RSA +import os +import StringIO +import tempfile +import time + +from eventlet import greenthread +from xml.etree import ElementTree + +from nova import context +from nova import crypto +from nova import db +from nova import flags +from nova import rpc +from nova import test +from nova import utils +from nova.auth import manager +from nova.compute import power_state +from nova.api.ec2 import cloud +from nova.objectstore import image + + +FLAGS = flags.FLAGS + +# Temp dirs for working with image attributes through the cloud controller +# (stole this from objectstore_unittest.py) +OSS_TEMPDIR = tempfile.mkdtemp(prefix='test_oss-') +IMAGES_PATH = os.path.join(OSS_TEMPDIR, 'images') +os.makedirs(IMAGES_PATH) + + +class CloudTestCase(test.TestCase): + def setUp(self): + super(CloudTestCase, self).setUp() + self.flags(connection_type='fake', images_path=IMAGES_PATH) + + self.conn = rpc.Connection.instance() + logging.getLogger().setLevel(logging.DEBUG) + + # set up our cloud + self.cloud = cloud.CloudController() + + # set up a service + self.compute = utils.import_object(FLAGS.compute_manager) + self.compute_consumer = rpc.AdapterConsumer(connection=self.conn, + topic=FLAGS.compute_topic, + proxy=self.compute) + self.compute_consumer.attach_to_eventlet() + self.network = utils.import_object(FLAGS.network_manager) + self.network_consumer = rpc.AdapterConsumer(connection=self.conn, + topic=FLAGS.network_topic, + proxy=self.network) + self.network_consumer.attach_to_eventlet() + + self.manager = manager.AuthManager() + self.user = self.manager.create_user('admin', 'admin', 'admin', True) + self.project = self.manager.create_project('proj', 'admin', 'proj') + self.context = context.RequestContext(user=self.user, + project=self.project) + + def tearDown(self): + self.manager.delete_project(self.project) + self.manager.delete_user(self.user) + super(CloudTestCase, self).tearDown() + + def _create_key(self, name): + # NOTE(vish): create depends on pool, so just call helper directly + return cloud._gen_key(self.context, self.context.user.id, name) + + def test_describe_addresses(self): + """Makes sure describe addresses runs without raising an exception""" + address = "10.10.10.10" + db.floating_ip_create(self.context, + {'address': address, + 'host': FLAGS.host}) + self.cloud.allocate_address(self.context) + self.cloud.describe_addresses(self.context) + self.cloud.release_address(self.context, + public_ip=address) + greenthread.sleep(0.3) + db.floating_ip_destroy(self.context, address) + + def test_associate_disassociate_address(self): + """Verifies associate runs cleanly without raising an exception""" + address = "10.10.10.10" + db.floating_ip_create(self.context, + {'address': address, + 'host': FLAGS.host}) + self.cloud.allocate_address(self.context) + inst = db.instance_create(self.context, {}) + fixed = self.network.allocate_fixed_ip(self.context, inst['id']) + ec2_id = cloud.internal_id_to_ec2_id(inst['internal_id']) + self.cloud.associate_address(self.context, + instance_id=ec2_id, + public_ip=address) + self.cloud.disassociate_address(self.context, + public_ip=address) + self.cloud.release_address(self.context, + public_ip=address) + greenthread.sleep(0.3) + self.network.deallocate_fixed_ip(self.context, fixed) + db.instance_destroy(self.context, inst['id']) + db.floating_ip_destroy(self.context, address) + + def test_describe_volumes(self): + """Makes sure describe_volumes works and filters results.""" + vol1 = db.volume_create(self.context, {}) + vol2 = db.volume_create(self.context, {}) + result = self.cloud.describe_volumes(self.context) + self.assertEqual(len(result['volumeSet']), 2) + result = self.cloud.describe_volumes(self.context, + volume_id=[vol2['ec2_id']]) + self.assertEqual(len(result['volumeSet']), 1) + self.assertEqual(result['volumeSet'][0]['volumeId'], vol2['ec2_id']) + db.volume_destroy(self.context, vol1['id']) + db.volume_destroy(self.context, vol2['id']) + + def test_console_output(self): + image_id = FLAGS.default_image + instance_type = FLAGS.default_instance_type + max_count = 1 + kwargs = {'image_id': image_id, + 'instance_type': instance_type, + 'max_count': max_count} + rv = yield self.cloud.run_instances(self.context, **kwargs) + instance_id = rv['instancesSet'][0]['instanceId'] + output = yield self.cloud.get_console_output(context=self.context, + instance_id=[instance_id]) + self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE OUTPUT') + # TODO(soren): We need this until we can stop polling in the rpc code + # for unit tests. + greenthread.sleep(0.3) + rv = yield self.cloud.terminate_instances(self.context, [instance_id]) + + def test_key_generation(self): + result = self._create_key('test') + private_key = result['private_key'] + key = RSA.load_key_string(private_key, callback=lambda: None) + bio = BIO.MemoryBuffer() + public_key = db.key_pair_get(self.context, + self.context.user.id, + 'test')['public_key'] + key.save_pub_key_bio(bio) + converted = crypto.ssl_pub_to_ssh_pub(bio.read()) + # assert key fields are equal + self.assertEqual(public_key.split(" ")[1].strip(), + converted.split(" ")[1].strip()) + + def test_describe_key_pairs(self): + self._create_key('test1') + self._create_key('test2') + result = self.cloud.describe_key_pairs(self.context) + keys = result["keypairsSet"] + self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys)) + self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys)) + + def test_delete_key_pair(self): + self._create_key('test') + self.cloud.delete_key_pair(self.context, 'test') + + def test_run_instances(self): + if FLAGS.connection_type == 'fake': + logging.debug("Can't test instances without a real virtual env.") + return + image_id = FLAGS.default_image + instance_type = FLAGS.default_instance_type + max_count = 1 + kwargs = {'image_id': image_id, + 'instance_type': instance_type, + 'max_count': max_count} + rv = yield self.cloud.run_instances(self.context, **kwargs) + # TODO: check for proper response + instance_id = rv['reservationSet'][0].keys()[0] + instance = rv['reservationSet'][0][instance_id][0] + logging.debug("Need to watch instance %s until it's running..." % + instance['instance_id']) + while True: + greenthread.sleep(1) + info = self.cloud._get_instance(instance['instance_id']) + logging.debug(info['state']) + if info['state'] == power_state.RUNNING: + break + self.assert_(rv) + + if connection_type != 'fake': + time.sleep(45) # Should use boto for polling here + for reservations in rv['reservationSet']: + # for res_id in reservations.keys(): + # logging.debug(reservations[res_id]) + # for instance in reservations[res_id]: + for instance in reservations[reservations.keys()[0]]: + instance_id = instance['instance_id'] + logging.debug("Terminating instance %s" % instance_id) + rv = yield self.compute.terminate_instance(instance_id) + + def test_instance_update_state(self): + def instance(num): + return { + 'reservation_id': 'r-1', + 'instance_id': 'i-%s' % num, + 'image_id': 'ami-%s' % num, + 'private_dns_name': '10.0.0.%s' % num, + 'dns_name': '10.0.0%s' % num, + 'ami_launch_index': str(num), + 'instance_type': 'fake', + 'availability_zone': 'fake', + 'key_name': None, + 'kernel_id': 'fake', + 'ramdisk_id': 'fake', + 'groups': ['default'], + 'product_codes': None, + 'state': 0x01, + 'user_data': ''} + rv = self.cloud._format_describe_instances(self.context) + self.assert_(len(rv['reservationSet']) == 0) + + # simulate launch of 5 instances + # self.cloud.instances['pending'] = {} + #for i in xrange(5): + # inst = instance(i) + # self.cloud.instances['pending'][inst['instance_id']] = inst + + #rv = self.cloud._format_instances(self.admin) + #self.assert_(len(rv['reservationSet']) == 1) + #self.assert_(len(rv['reservationSet'][0]['instances_set']) == 5) + # report 4 nodes each having 1 of the instances + #for i in xrange(4): + # self.cloud.update_state('instances', + # {('node-%s' % i): {('i-%s' % i): + # instance(i)}}) + + # one instance should be pending still + #self.assert_(len(self.cloud.instances['pending'].keys()) == 1) + + # check that the reservations collapse + #rv = self.cloud._format_instances(self.admin) + #self.assert_(len(rv['reservationSet']) == 1) + #self.assert_(len(rv['reservationSet'][0]['instances_set']) == 5) + + # check that we can get metadata for each instance + #for i in xrange(4): + # data = self.cloud.get_metadata(instance(i)['private_dns_name']) + # self.assert_(data['meta-data']['ami-id'] == 'ami-%s' % i) + + @staticmethod + def _fake_set_image_description(ctxt, image_id, description): + from nova.objectstore import handler + + class req: + pass + + request = req() + request.context = ctxt + request.args = {'image_id': [image_id], + 'description': [description]} + + resource = handler.ImagesResource() + resource.render_POST(request) + + def test_user_editable_image_endpoint(self): + pathdir = os.path.join(FLAGS.images_path, 'ami-testing') + os.mkdir(pathdir) + info = {'isPublic': False} + with open(os.path.join(pathdir, 'info.json'), 'w') as f: + json.dump(info, f) + img = image.Image('ami-testing') + # self.cloud.set_image_description(self.context, 'ami-testing', + # 'Foo Img') + # NOTE(vish): Above won't work unless we start objectstore or create + # a fake version of api/ec2/images.py conn that can + # call methods directly instead of going through boto. + # for now, just cheat and call the method directly + self._fake_set_image_description(self.context, 'ami-testing', + 'Foo Img') + self.assertEqual('Foo Img', img.metadata['description']) + self._fake_set_image_description(self.context, 'ami-testing', '') + self.assertEqual('', img.metadata['description']) + + def test_update_of_instance_display_fields(self): + inst = db.instance_create(self.context, {}) + ec2_id = cloud.internal_id_to_ec2_id(inst['internal_id']) + self.cloud.update_instance(self.context, ec2_id, + display_name='c00l 1m4g3') + inst = db.instance_get(self.context, inst['id']) + self.assertEqual('c00l 1m4g3', inst['display_name']) + db.instance_destroy(self.context, inst['id']) + + def test_update_of_instance_wont_update_private_fields(self): + inst = db.instance_create(self.context, {}) + self.cloud.update_instance(self.context, inst['id'], + mac_address='DE:AD:BE:EF') + inst = db.instance_get(self.context, inst['id']) + self.assertEqual(None, inst['mac_address']) + db.instance_destroy(self.context, inst['id']) + + def test_update_of_volume_display_fields(self): + vol = db.volume_create(self.context, {}) + self.cloud.update_volume(self.context, vol['id'], + display_name='c00l v0lum3') + vol = db.volume_get(self.context, vol['id']) + self.assertEqual('c00l v0lum3', vol['display_name']) + db.volume_destroy(self.context, vol['id']) + + def test_update_of_volume_wont_update_private_fields(self): + vol = db.volume_create(self.context, {}) + self.cloud.update_volume(self.context, vol['id'], + mountpoint='/not/here') + vol = db.volume_get(self.context, vol['id']) + self.assertEqual(None, vol['mountpoint']) + db.volume_destroy(self.context, vol['id']) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py new file mode 100644 index 000000000..c6353d357 --- /dev/null +++ b/nova/tests/test_compute.py @@ -0,0 +1,155 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Compute +""" + +import datetime +import logging + +from nova import context +from nova import db +from nova import exception +from nova import flags +from nova import test +from nova import utils +from nova.auth import manager +from nova.compute import api as compute_api + + +FLAGS = flags.FLAGS + + +class ComputeTestCase(test.TestCase): + """Test case for compute""" + def setUp(self): + logging.getLogger().setLevel(logging.DEBUG) + super(ComputeTestCase, self).setUp() + self.flags(connection_type='fake', + network_manager='nova.network.manager.FlatManager') + self.compute = utils.import_object(FLAGS.compute_manager) + self.compute_api = compute_api.ComputeAPI() + self.manager = manager.AuthManager() + self.user = self.manager.create_user('fake', 'fake', 'fake') + self.project = self.manager.create_project('fake', 'fake', 'fake') + self.context = context.get_admin_context() + + def tearDown(self): + self.manager.delete_user(self.user) + self.manager.delete_project(self.project) + super(ComputeTestCase, self).tearDown() + + def _create_instance(self): + """Create a test instance""" + inst = {} + inst['image_id'] = 'ami-test' + inst['reservation_id'] = 'r-fakeres' + inst['launch_time'] = '10' + inst['user_id'] = self.user.id + inst['project_id'] = self.project.id + inst['instance_type'] = 'm1.tiny' + inst['mac_address'] = utils.generate_mac() + inst['ami_launch_index'] = 0 + return db.instance_create(self.context, inst)['id'] + + def test_create_instance_defaults_display_name(self): + """Verify that an instance cannot be created without a display_name.""" + cases = [dict(), dict(display_name=None)] + for instance in cases: + ref = self.compute_api.create_instances(self.context, + FLAGS.default_instance_type, None, **instance) + try: + self.assertNotEqual(ref[0].display_name, None) + finally: + db.instance_destroy(self.context, ref[0]['id']) + + def test_create_instance_associates_security_groups(self): + """Make sure create_instances associates security groups""" + values = {'name': 'default', + 'description': 'default', + 'user_id': self.user.id, + 'project_id': self.project.id} + group = db.security_group_create(self.context, values) + ref = self.compute_api.create_instances(self.context, + FLAGS.default_instance_type, None, security_group=['default']) + try: + self.assertEqual(len(ref[0]['security_groups']), 1) + finally: + db.security_group_destroy(self.context, group['id']) + db.instance_destroy(self.context, ref[0]['id']) + + def test_run_terminate(self): + """Make sure it is possible to run and terminate instance""" + instance_id = self._create_instance() + + self.compute.run_instance(self.context, instance_id) + + instances = db.instance_get_all(context.get_admin_context()) + logging.info("Running instances: %s", instances) + self.assertEqual(len(instances), 1) + + self.compute.terminate_instance(self.context, instance_id) + + instances = db.instance_get_all(context.get_admin_context()) + logging.info("After terminating instances: %s", instances) + self.assertEqual(len(instances), 0) + + def test_run_terminate_timestamps(self): + """Make sure timestamps are set for launched and destroyed""" + instance_id = self._create_instance() + instance_ref = db.instance_get(self.context, instance_id) + self.assertEqual(instance_ref['launched_at'], None) + self.assertEqual(instance_ref['deleted_at'], None) + launch = datetime.datetime.utcnow() + self.compute.run_instance(self.context, instance_id) + instance_ref = db.instance_get(self.context, instance_id) + self.assert_(instance_ref['launched_at'] > launch) + self.assertEqual(instance_ref['deleted_at'], None) + terminate = datetime.datetime.utcnow() + self.compute.terminate_instance(self.context, instance_id) + self.context = self.context.elevated(True) + instance_ref = db.instance_get(self.context, instance_id) + self.assert_(instance_ref['launched_at'] < terminate) + self.assert_(instance_ref['deleted_at'] > terminate) + + def test_reboot(self): + """Ensure instance can be rebooted""" + instance_id = self._create_instance() + self.compute.run_instance(self.context, instance_id) + self.compute.reboot_instance(self.context, instance_id) + self.compute.terminate_instance(self.context, instance_id) + + def test_console_output(self): + """Make sure we can get console output from instance""" + instance_id = self._create_instance() + self.compute.run_instance(self.context, instance_id) + + console = self.compute.get_console_output(self.context, + instance_id) + self.assert_(console) + self.compute.terminate_instance(self.context, instance_id) + + def test_run_instance_existing(self): + """Ensure failure when running an instance that already exists""" + instance_id = self._create_instance() + self.compute.run_instance(self.context, instance_id) + self.assertRaises(exception.Error, + self.compute.run_instance, + self.context, + instance_id) + self.compute.terminate_instance(self.context, instance_id) diff --git a/nova/tests/test_flags.py b/nova/tests/test_flags.py new file mode 100644 index 000000000..707300fcf --- /dev/null +++ b/nova/tests/test_flags.py @@ -0,0 +1,102 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import exception +from nova import flags +from nova import test + +FLAGS = flags.FLAGS +flags.DEFINE_string('flags_unittest', 'foo', 'for testing purposes only') + + +class FlagsTestCase(test.TestCase): + + def setUp(self): + super(FlagsTestCase, self).setUp() + self.FLAGS = flags.FlagValues() + self.global_FLAGS = flags.FLAGS + + def test_define(self): + self.assert_('string' not in self.FLAGS) + self.assert_('int' not in self.FLAGS) + self.assert_('false' not in self.FLAGS) + self.assert_('true' not in self.FLAGS) + + flags.DEFINE_string('string', 'default', 'desc', + flag_values=self.FLAGS) + flags.DEFINE_integer('int', 1, 'desc', flag_values=self.FLAGS) + flags.DEFINE_bool('false', False, 'desc', flag_values=self.FLAGS) + flags.DEFINE_bool('true', True, 'desc', flag_values=self.FLAGS) + + self.assert_(self.FLAGS['string']) + self.assert_(self.FLAGS['int']) + self.assert_(self.FLAGS['false']) + self.assert_(self.FLAGS['true']) + self.assertEqual(self.FLAGS.string, 'default') + self.assertEqual(self.FLAGS.int, 1) + self.assertEqual(self.FLAGS.false, False) + self.assertEqual(self.FLAGS.true, True) + + argv = ['flags_test', + '--string', 'foo', + '--int', '2', + '--false', + '--notrue'] + + self.FLAGS(argv) + self.assertEqual(self.FLAGS.string, 'foo') + self.assertEqual(self.FLAGS.int, 2) + self.assertEqual(self.FLAGS.false, True) + self.assertEqual(self.FLAGS.true, False) + + def test_declare(self): + self.assert_('answer' not in self.global_FLAGS) + flags.DECLARE('answer', 'nova.tests.declare_flags') + self.assert_('answer' in self.global_FLAGS) + self.assertEqual(self.global_FLAGS.answer, 42) + + # Make sure we don't overwrite anything + self.global_FLAGS.answer = 256 + self.assertEqual(self.global_FLAGS.answer, 256) + flags.DECLARE('answer', 'nova.tests.declare_flags') + self.assertEqual(self.global_FLAGS.answer, 256) + + def test_runtime_and_unknown_flags(self): + self.assert_('runtime_answer' not in self.global_FLAGS) + + argv = ['flags_test', '--runtime_answer=60', 'extra_arg'] + args = self.global_FLAGS(argv) + self.assertEqual(len(args), 2) + self.assertEqual(args[1], 'extra_arg') + + self.assert_('runtime_answer' not in self.global_FLAGS) + + import nova.tests.runtime_flags + + self.assert_('runtime_answer' in self.global_FLAGS) + self.assertEqual(self.global_FLAGS.runtime_answer, 60) + + def test_flag_leak_left(self): + self.assertEqual(FLAGS.flags_unittest, 'foo') + FLAGS.flags_unittest = 'bar' + self.assertEqual(FLAGS.flags_unittest, 'bar') + + def test_flag_leak_right(self): + self.assertEqual(FLAGS.flags_unittest, 'foo') + FLAGS.flags_unittest = 'bar' + self.assertEqual(FLAGS.flags_unittest, 'bar') diff --git a/nova/tests/test_misc.py b/nova/tests/test_misc.py new file mode 100644 index 000000000..33c1777d5 --- /dev/null +++ b/nova/tests/test_misc.py @@ -0,0 +1,55 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +from nova import test +from nova.utils import parse_mailmap, str_dict_replace + + +class ProjectTestCase(test.TestCase): + def test_authors_up_to_date(self): + if os.path.exists('.bzr'): + contributors = set() + + mailmap = parse_mailmap('.mailmap') + + import bzrlib.workingtree + tree = bzrlib.workingtree.WorkingTree.open('.') + tree.lock_read() + try: + parents = tree.get_parent_ids() + g = tree.branch.repository.get_graph() + for p in parents[1:]: + rev_ids = [r for r, _ in g.iter_ancestry(parents) + if r != "null:"] + revs = tree.branch.repository.get_revisions(rev_ids) + for r in revs: + for author in r.get_apparent_authors(): + email = author.split(' ')[-1] + contributors.add(str_dict_replace(email, mailmap)) + + authors_file = open('Authors', 'r').read() + + missing = set() + for contributor in contributors: + if not contributor in authors_file: + missing.add(contributor) + + self.assertTrue(len(missing) == 0, + '%r not listed in Authors' % missing) + finally: + tree.unlock() diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py new file mode 100644 index 000000000..8cf2a5e54 --- /dev/null +++ b/nova/tests/test_quota.py @@ -0,0 +1,153 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging + +from nova import context +from nova import db +from nova import exception +from nova import flags +from nova import quota +from nova import test +from nova import utils +from nova.auth import manager +from nova.api.ec2 import cloud + + +FLAGS = flags.FLAGS + + +class QuotaTestCase(test.TestCase): + def setUp(self): + logging.getLogger().setLevel(logging.DEBUG) + super(QuotaTestCase, self).setUp() + self.flags(connection_type='fake', + quota_instances=2, + quota_cores=4, + quota_volumes=2, + quota_gigabytes=20, + quota_floating_ips=1) + + self.cloud = cloud.CloudController() + self.manager = manager.AuthManager() + self.user = self.manager.create_user('admin', 'admin', 'admin', True) + self.project = self.manager.create_project('admin', 'admin', 'admin') + self.network = utils.import_object(FLAGS.network_manager) + self.context = context.RequestContext(project=self.project, + user=self.user) + + def tearDown(self): + manager.AuthManager().delete_project(self.project) + manager.AuthManager().delete_user(self.user) + super(QuotaTestCase, self).tearDown() + + def _create_instance(self, cores=2): + """Create a test instance""" + inst = {} + inst['image_id'] = 'ami-test' + inst['reservation_id'] = 'r-fakeres' + inst['user_id'] = self.user.id + inst['project_id'] = self.project.id + inst['instance_type'] = 'm1.large' + inst['vcpus'] = cores + inst['mac_address'] = utils.generate_mac() + return db.instance_create(self.context, inst)['id'] + + def _create_volume(self, size=10): + """Create a test volume""" + vol = {} + vol['user_id'] = self.user.id + vol['project_id'] = self.project.id + vol['size'] = size + return db.volume_create(self.context, vol)['id'] + + def test_quota_overrides(self): + """Make sure overriding a projects quotas works""" + num_instances = quota.allowed_instances(self.context, 100, 'm1.small') + self.assertEqual(num_instances, 2) + db.quota_create(self.context, {'project_id': self.project.id, + 'instances': 10}) + num_instances = quota.allowed_instances(self.context, 100, 'm1.small') + self.assertEqual(num_instances, 4) + db.quota_update(self.context, self.project.id, {'cores': 100}) + num_instances = quota.allowed_instances(self.context, 100, 'm1.small') + self.assertEqual(num_instances, 10) + db.quota_destroy(self.context, self.project.id) + + def test_too_many_instances(self): + instance_ids = [] + for i in range(FLAGS.quota_instances): + instance_id = self._create_instance() + instance_ids.append(instance_id) + self.assertRaises(quota.QuotaError, self.cloud.run_instances, + self.context, + min_count=1, + max_count=1, + instance_type='m1.small', + image_id='fake') + for instance_id in instance_ids: + db.instance_destroy(self.context, instance_id) + + def test_too_many_cores(self): + instance_ids = [] + instance_id = self._create_instance(cores=4) + instance_ids.append(instance_id) + self.assertRaises(quota.QuotaError, self.cloud.run_instances, + self.context, + min_count=1, + max_count=1, + instance_type='m1.small', + image_id='fake') + for instance_id in instance_ids: + db.instance_destroy(self.context, instance_id) + + def test_too_many_volumes(self): + volume_ids = [] + for i in range(FLAGS.quota_volumes): + volume_id = self._create_volume() + volume_ids.append(volume_id) + self.assertRaises(quota.QuotaError, self.cloud.create_volume, + self.context, + size=10) + for volume_id in volume_ids: + db.volume_destroy(self.context, volume_id) + + def test_too_many_gigabytes(self): + volume_ids = [] + volume_id = self._create_volume(size=20) + volume_ids.append(volume_id) + self.assertRaises(quota.QuotaError, + self.cloud.create_volume, + self.context, + size=10) + for volume_id in volume_ids: + db.volume_destroy(self.context, volume_id) + + def test_too_many_addresses(self): + address = '192.168.0.100' + db.floating_ip_create(context.get_admin_context(), + {'address': address, 'host': FLAGS.host}) + float_addr = self.network.allocate_floating_ip(self.context, + self.project.id) + # NOTE(vish): This assert never fails. When cloud attempts to + # make an rpc.call, the test just finishes with OK. It + # appears to be something in the magic inline callbacks + # that is breaking. + self.assertRaises(quota.QuotaError, self.cloud.allocate_address, + self.context) + db.floating_ip_destroy(context.get_admin_context(), address) diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py new file mode 100644 index 000000000..a2495e65a --- /dev/null +++ b/nova/tests/test_rpc.py @@ -0,0 +1,103 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for remote procedure calls using queue +""" +import logging + +from nova import context +from nova import flags +from nova import rpc +from nova import test + + +FLAGS = flags.FLAGS + + +class RpcTestCase(test.TestCase): + """Test cases for rpc""" + def setUp(self): + super(RpcTestCase, self).setUp() + self.conn = rpc.Connection.instance() + self.receiver = TestReceiver() + self.consumer = rpc.AdapterConsumer(connection=self.conn, + topic='test', + proxy=self.receiver) + self.consumer.attach_to_eventlet() + self.context = context.get_admin_context() + + def test_call_succeed(self): + """Get a value through rpc call""" + value = 42 + result = rpc.call(self.context, 'test', {"method": "echo", + "args": {"value": value}}) + self.assertEqual(value, result) + + def test_context_passed(self): + """Makes sure a context is passed through rpc call""" + value = 42 + result = rpc.call(self.context, + 'test', {"method": "context", + "args": {"value": value}}) + self.assertEqual(self.context.to_dict(), result) + + def test_call_exception(self): + """Test that exception gets passed back properly + + rpc.call returns a RemoteError object. The value of the + exception is converted to a string, so we convert it back + to an int in the test. + """ + value = 42 + self.assertRaises(rpc.RemoteError, + rpc.call, + self.context, + 'test', + {"method": "fail", + "args": {"value": value}}) + try: + rpc.call(self.context, + 'test', + {"method": "fail", + "args": {"value": value}}) + self.fail("should have thrown rpc.RemoteError") + except rpc.RemoteError as exc: + self.assertEqual(int(exc.value), value) + + +class TestReceiver(object): + """Simple Proxy class so the consumer has methods to call + + Uses static methods because we aren't actually storing any state""" + + @staticmethod + def echo(context, value): + """Simply returns whatever value is sent in""" + logging.debug("Received %s", value) + return value + + @staticmethod + def context(context, value): + """Returns dictionary version of context""" + logging.debug("Received %s", context) + return context.to_dict() + + @staticmethod + def fail(context, value): + """Raises an exception with the value sent in""" + raise Exception(value) diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py new file mode 100644 index 000000000..c56f69698 --- /dev/null +++ b/nova/tests/test_scheduler.py @@ -0,0 +1,246 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Scheduler +""" + +from nova import context +from nova import db +from nova import flags +from nova import service +from nova import test +from nova import rpc +from nova import utils +from nova.auth import manager as auth_manager +from nova.scheduler import manager +from nova.scheduler import driver + + +FLAGS = flags.FLAGS +flags.DECLARE('max_cores', 'nova.scheduler.simple') + + +class TestDriver(driver.Scheduler): + """Scheduler Driver for Tests""" + def schedule(context, topic, *args, **kwargs): + return 'fallback_host' + + def schedule_named_method(context, topic, num): + return 'named_host' + + +class SchedulerTestCase(test.TestCase): + """Test case for scheduler""" + def setUp(self): + super(SchedulerTestCase, self).setUp() + self.flags(scheduler_driver='nova.tests.test_scheduler.TestDriver') + + def test_fallback(self): + scheduler = manager.SchedulerManager() + self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) + ctxt = context.get_admin_context() + rpc.cast(ctxt, + 'topic.fallback_host', + {'method': 'noexist', + 'args': {'num': 7}}) + self.mox.ReplayAll() + scheduler.noexist(ctxt, 'topic', num=7) + + def test_named_method(self): + scheduler = manager.SchedulerManager() + self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) + ctxt = context.get_admin_context() + rpc.cast(ctxt, + 'topic.named_host', + {'method': 'named_method', + 'args': {'num': 7}}) + self.mox.ReplayAll() + scheduler.named_method(ctxt, 'topic', num=7) + + +class SimpleDriverTestCase(test.TestCase): + """Test case for simple driver""" + def setUp(self): + super(SimpleDriverTestCase, self).setUp() + self.flags(connection_type='fake', + max_cores=4, + max_gigabytes=4, + network_manager='nova.network.manager.FlatManager', + volume_driver='nova.volume.driver.FakeISCSIDriver', + scheduler_driver='nova.scheduler.simple.SimpleScheduler') + self.scheduler = manager.SchedulerManager() + self.manager = auth_manager.AuthManager() + self.user = self.manager.create_user('fake', 'fake', 'fake') + self.project = self.manager.create_project('fake', 'fake', 'fake') + self.context = context.get_admin_context() + + def tearDown(self): + self.manager.delete_user(self.user) + self.manager.delete_project(self.project) + + def _create_instance(self): + """Create a test instance""" + inst = {} + inst['image_id'] = 'ami-test' + inst['reservation_id'] = 'r-fakeres' + inst['user_id'] = self.user.id + inst['project_id'] = self.project.id + inst['instance_type'] = 'm1.tiny' + inst['mac_address'] = utils.generate_mac() + inst['ami_launch_index'] = 0 + inst['vcpus'] = 1 + return db.instance_create(self.context, inst)['id'] + + def _create_volume(self): + """Create a test volume""" + vol = {} + vol['image_id'] = 'ami-test' + vol['reservation_id'] = 'r-fakeres' + vol['size'] = 1 + return db.volume_create(self.context, vol)['id'] + + def test_hosts_are_up(self): + """Ensures driver can find the hosts that are up""" + # NOTE(vish): constructing service without create method + # because we are going to use it without queue + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + compute2 = service.Service('host2', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute2.start() + hosts = self.scheduler.driver.hosts_up(self.context, 'compute') + self.assertEqual(len(hosts), 2) + compute1.kill() + compute2.kill() + + def test_least_busy_host_gets_instance(self): + """Ensures the host with less cores gets the next one""" + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + compute2 = service.Service('host2', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute2.start() + instance_id1 = self._create_instance() + compute1.run_instance(self.context, instance_id1) + instance_id2 = self._create_instance() + host = self.scheduler.driver.schedule_run_instance(self.context, + instance_id2) + self.assertEqual(host, 'host2') + compute1.terminate_instance(self.context, instance_id1) + db.instance_destroy(self.context, instance_id2) + compute1.kill() + compute2.kill() + + def test_too_many_cores(self): + """Ensures we don't go over max cores""" + compute1 = service.Service('host1', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute1.start() + compute2 = service.Service('host2', + 'nova-compute', + 'compute', + FLAGS.compute_manager) + compute2.start() + instance_ids1 = [] + instance_ids2 = [] + for index in xrange(FLAGS.max_cores): + instance_id = self._create_instance() + compute1.run_instance(self.context, instance_id) + instance_ids1.append(instance_id) + instance_id = self._create_instance() + compute2.run_instance(self.context, instance_id) + instance_ids2.append(instance_id) + instance_id = self._create_instance() + self.assertRaises(driver.NoValidHost, + self.scheduler.driver.schedule_run_instance, + self.context, + instance_id) + for instance_id in instance_ids1: + compute1.terminate_instance(self.context, instance_id) + for instance_id in instance_ids2: + compute2.terminate_instance(self.context, instance_id) + compute1.kill() + compute2.kill() + + def test_least_busy_host_gets_volume(self): + """Ensures the host with less gigabytes gets the next one""" + volume1 = service.Service('host1', + 'nova-volume', + 'volume', + FLAGS.volume_manager) + volume1.start() + volume2 = service.Service('host2', + 'nova-volume', + 'volume', + FLAGS.volume_manager) + volume2.start() + volume_id1 = self._create_volume() + volume1.create_volume(self.context, volume_id1) + volume_id2 = self._create_volume() + host = self.scheduler.driver.schedule_create_volume(self.context, + volume_id2) + self.assertEqual(host, 'host2') + volume1.delete_volume(self.context, volume_id1) + db.volume_destroy(self.context, volume_id2) + volume1.kill() + volume2.kill() + + def test_too_many_gigabytes(self): + """Ensures we don't go over max gigabytes""" + volume1 = service.Service('host1', + 'nova-volume', + 'volume', + FLAGS.volume_manager) + volume1.start() + volume2 = service.Service('host2', + 'nova-volume', + 'volume', + FLAGS.volume_manager) + volume2.start() + volume_ids1 = [] + volume_ids2 = [] + for index in xrange(FLAGS.max_gigabytes): + volume_id = self._create_volume() + volume1.create_volume(self.context, volume_id) + volume_ids1.append(volume_id) + volume_id = self._create_volume() + volume2.create_volume(self.context, volume_id) + volume_ids2.append(volume_id) + volume_id = self._create_volume() + self.assertRaises(driver.NoValidHost, + self.scheduler.driver.schedule_create_volume, + self.context, + volume_id) + for volume_id in volume_ids1: + volume1.delete_volume(self.context, volume_id) + for volume_id in volume_ids2: + volume2.delete_volume(self.context, volume_id) + volume1.kill() + volume2.kill() diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py new file mode 100644 index 000000000..b30838ad7 --- /dev/null +++ b/nova/tests/test_service.py @@ -0,0 +1,227 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Unit Tests for remote procedure calls using queue +""" + +import mox + +from nova import exception +from nova import flags +from nova import rpc +from nova import test +from nova import service +from nova import manager + +FLAGS = flags.FLAGS +flags.DEFINE_string("fake_manager", "nova.tests.test_service.FakeManager", + "Manager for testing") + + +class FakeManager(manager.Manager): + """Fake manager for tests""" + def test_method(self): + return 'manager' + + +class ExtendedService(service.Service): + def test_method(self): + return 'service' + + +class ServiceManagerTestCase(test.TestCase): + """Test cases for Services""" + + def test_attribute_error_for_no_manager(self): + serv = service.Service('test', + 'test', + 'test', + 'nova.tests.test_service.FakeManager') + self.assertRaises(AttributeError, getattr, serv, 'test_method') + + def test_message_gets_to_manager(self): + serv = service.Service('test', + 'test', + 'test', + 'nova.tests.test_service.FakeManager') + serv.start() + self.assertEqual(serv.test_method(), 'manager') + + def test_override_manager_method(self): + serv = ExtendedService('test', + 'test', + 'test', + 'nova.tests.test_service.FakeManager') + serv.start() + self.assertEqual(serv.test_method(), 'service') + + +class ServiceTestCase(test.TestCase): + """Test cases for Services""" + + def setUp(self): + super(ServiceTestCase, self).setUp() + self.mox.StubOutWithMock(service, 'db') + + def test_create(self): + host = 'foo' + binary = 'nova-fake' + topic = 'fake' + + # NOTE(vish): Create was moved out of mox replay to make sure that + # the looping calls are created in StartService. + app = service.Service.create(host=host, binary=binary) + + self.mox.StubOutWithMock(rpc, + 'AdapterConsumer', + use_mock_anything=True) + rpc.AdapterConsumer(connection=mox.IgnoreArg(), + topic=topic, + proxy=mox.IsA(service.Service)).AndReturn( + rpc.AdapterConsumer) + + rpc.AdapterConsumer(connection=mox.IgnoreArg(), + topic='%s.%s' % (topic, host), + proxy=mox.IsA(service.Service)).AndReturn( + rpc.AdapterConsumer) + + rpc.AdapterConsumer.attach_to_eventlet() + rpc.AdapterConsumer.attach_to_eventlet() + + service_create = {'host': host, + 'binary': binary, + 'topic': topic, + 'report_count': 0} + service_ref = {'host': host, + 'binary': binary, + 'report_count': 0, + 'id': 1} + + service.db.service_get_by_args(mox.IgnoreArg(), + host, + binary).AndRaise(exception.NotFound()) + service.db.service_create(mox.IgnoreArg(), + service_create).AndReturn(service_ref) + self.mox.ReplayAll() + + app.start() + app.stop() + self.assert_(app) + + # We're testing sort of weird behavior in how report_state decides + # whether it is disconnected, it looks for a variable on itself called + # 'model_disconnected' and report_state doesn't really do much so this + # these are mostly just for coverage + def test_report_state_no_service(self): + host = 'foo' + binary = 'bar' + topic = 'test' + service_create = {'host': host, + 'binary': binary, + 'topic': topic, + 'report_count': 0} + service_ref = {'host': host, + 'binary': binary, + 'topic': topic, + 'report_count': 0, + 'id': 1} + + service.db.service_get_by_args(mox.IgnoreArg(), + host, + binary).AndRaise(exception.NotFound()) + service.db.service_create(mox.IgnoreArg(), + service_create).AndReturn(service_ref) + service.db.service_get(mox.IgnoreArg(), + service_ref['id']).AndReturn(service_ref) + service.db.service_update(mox.IgnoreArg(), service_ref['id'], + mox.ContainsKeyValue('report_count', 1)) + + self.mox.ReplayAll() + serv = service.Service(host, + binary, + topic, + 'nova.tests.test_service.FakeManager') + serv.start() + serv.report_state() + + def test_report_state_newly_disconnected(self): + host = 'foo' + binary = 'bar' + topic = 'test' + service_create = {'host': host, + 'binary': binary, + 'topic': topic, + 'report_count': 0} + service_ref = {'host': host, + 'binary': binary, + 'topic': topic, + 'report_count': 0, + 'id': 1} + + service.db.service_get_by_args(mox.IgnoreArg(), + host, + binary).AndRaise(exception.NotFound()) + service.db.service_create(mox.IgnoreArg(), + service_create).AndReturn(service_ref) + service.db.service_get(mox.IgnoreArg(), + mox.IgnoreArg()).AndRaise(Exception()) + + self.mox.ReplayAll() + serv = service.Service(host, + binary, + topic, + 'nova.tests.test_service.FakeManager') + serv.start() + serv.report_state() + self.assert_(serv.model_disconnected) + + def test_report_state_newly_connected(self): + host = 'foo' + binary = 'bar' + topic = 'test' + service_create = {'host': host, + 'binary': binary, + 'topic': topic, + 'report_count': 0} + service_ref = {'host': host, + 'binary': binary, + 'topic': topic, + 'report_count': 0, + 'id': 1} + + service.db.service_get_by_args(mox.IgnoreArg(), + host, + binary).AndRaise(exception.NotFound()) + service.db.service_create(mox.IgnoreArg(), + service_create).AndReturn(service_ref) + service.db.service_get(mox.IgnoreArg(), + service_ref['id']).AndReturn(service_ref) + service.db.service_update(mox.IgnoreArg(), service_ref['id'], + mox.ContainsKeyValue('report_count', 1)) + + self.mox.ReplayAll() + serv = service.Service(host, + binary, + topic, + 'nova.tests.test_service.FakeManager') + serv.start() + serv.model_disconnected = True + serv.report_state() + + self.assert_(not serv.model_disconnected) diff --git a/nova/tests/test_twistd.py b/nova/tests/test_twistd.py new file mode 100644 index 000000000..75007b9c8 --- /dev/null +++ b/nova/tests/test_twistd.py @@ -0,0 +1,53 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import StringIO +import sys + +from nova import twistd +from nova import exception +from nova import flags +from nova import test + + +FLAGS = flags.FLAGS + + +class TwistdTestCase(test.TrialTestCase): + def setUp(self): + super(TwistdTestCase, self).setUp() + self.Options = twistd.WrapTwistedOptions(twistd.TwistdServerOptions) + sys.stdout = StringIO.StringIO() + + def tearDown(self): + super(TwistdTestCase, self).tearDown() + sys.stdout = sys.__stdout__ + + def test_basic(self): + options = self.Options() + argv = options.parseOptions() + + def test_logfile(self): + options = self.Options() + argv = options.parseOptions(['--logfile=foo']) + self.assertEqual(FLAGS.logfile, 'foo') + + def test_help(self): + options = self.Options() + self.assertRaises(SystemExit, options.parseOptions, ['--help']) + self.assert_('pidfile' in sys.stdout.getvalue()) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py new file mode 100644 index 000000000..85e569858 --- /dev/null +++ b/nova/tests/test_virt.py @@ -0,0 +1,258 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2010 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from xml.etree.ElementTree import fromstring as xml_to_tree +from xml.dom.minidom import parseString as xml_to_dom + +from nova import context +from nova import db +from nova import flags +from nova import test +from nova import utils +from nova.api.ec2 import cloud +from nova.auth import manager +from nova.virt import libvirt_conn + +FLAGS = flags.FLAGS +flags.DECLARE('instances_path', 'nova.compute.manager') + + +class LibvirtConnTestCase(test.TestCase): + def setUp(self): + super(LibvirtConnTestCase, self).setUp() + self.manager = manager.AuthManager() + self.user = self.manager.create_user('fake', 'fake', 'fake', + admin=True) + self.project = self.manager.create_project('fake', 'fake', 'fake') + self.network = utils.import_object(FLAGS.network_manager) + FLAGS.instances_path = '' + + def test_get_uri_and_template(self): + ip = '10.11.12.13' + + instance = {'internal_id': 1, + 'memory_kb': '1024000', + 'basepath': '/some/path', + 'bridge_name': 'br100', + 'mac_address': '02:12:34:46:56:67', + 'vcpus': 2, + 'project_id': 'fake', + 'bridge': 'br101', + 'instance_type': 'm1.small'} + + user_context = context.RequestContext(project=self.project, + user=self.user) + instance_ref = db.instance_create(user_context, instance) + network_ref = self.network.get_network(user_context) + self.network.set_network_host(context.get_admin_context(), + network_ref['id']) + + fixed_ip = {'address': ip, + 'network_id': network_ref['id']} + + ctxt = context.get_admin_context() + fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip) + db.fixed_ip_update(ctxt, ip, {'allocated': True, + 'instance_id': instance_ref['id']}) + + type_uri_map = {'qemu': ('qemu:///system', + [(lambda t: t.find('.').get('type'), 'qemu'), + (lambda t: t.find('./os/type').text, 'hvm'), + (lambda t: t.find('./devices/emulator'), None)]), + 'kvm': ('qemu:///system', + [(lambda t: t.find('.').get('type'), 'kvm'), + (lambda t: t.find('./os/type').text, 'hvm'), + (lambda t: t.find('./devices/emulator'), None)]), + 'uml': ('uml:///system', + [(lambda t: t.find('.').get('type'), 'uml'), + (lambda t: t.find('./os/type').text, 'uml')])} + + common_checks = [ + (lambda t: t.find('.').tag, 'domain'), + (lambda t: t.find('./devices/interface/filterref/parameter').\ + get('name'), 'IP'), + (lambda t: t.find('./devices/interface/filterref/parameter').\ + get('value'), '10.11.12.13')] + + for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): + FLAGS.libvirt_type = libvirt_type + conn = libvirt_conn.LibvirtConnection(True) + + uri, _template, _rescue = conn.get_uri_and_templates() + self.assertEquals(uri, expected_uri) + + xml = conn.to_xml(instance_ref) + tree = xml_to_tree(xml) + for i, (check, expected_result) in enumerate(checks): + self.assertEqual(check(tree), + expected_result, + '%s failed check %d' % (xml, i)) + + for i, (check, expected_result) in enumerate(common_checks): + self.assertEqual(check(tree), + expected_result, + '%s failed common check %d' % (xml, i)) + + # Deliberately not just assigning this string to FLAGS.libvirt_uri and + # checking against that later on. This way we make sure the + # implementation doesn't fiddle around with the FLAGS. + testuri = 'something completely different' + FLAGS.libvirt_uri = testuri + for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): + FLAGS.libvirt_type = libvirt_type + conn = libvirt_conn.LibvirtConnection(True) + uri, _template, _rescue = conn.get_uri_and_templates() + self.assertEquals(uri, testuri) + + def tearDown(self): + super(LibvirtConnTestCase, self).tearDown() + self.manager.delete_project(self.project) + self.manager.delete_user(self.user) + + +class NWFilterTestCase(test.TestCase): + + def setUp(self): + super(NWFilterTestCase, self).setUp() + + class Mock(object): + pass + + self.manager = manager.AuthManager() + self.user = self.manager.create_user('fake', 'fake', 'fake', + admin=True) + self.project = self.manager.create_project('fake', 'fake', 'fake') + self.context = context.RequestContext(self.user, self.project) + + self.fake_libvirt_connection = Mock() + + self.fw = libvirt_conn.NWFilterFirewall(self.fake_libvirt_connection) + + def tearDown(self): + self.manager.delete_project(self.project) + self.manager.delete_user(self.user) + + def test_cidr_rule_nwfilter_xml(self): + cloud_controller = cloud.CloudController() + cloud_controller.create_security_group(self.context, + 'testgroup', + 'test group description') + cloud_controller.authorize_security_group_ingress(self.context, + 'testgroup', + from_port='80', + to_port='81', + ip_protocol='tcp', + cidr_ip='0.0.0.0/0') + + security_group = db.security_group_get_by_name(self.context, + 'fake', + 'testgroup') + + xml = self.fw.security_group_to_nwfilter_xml(security_group.id) + + dom = xml_to_dom(xml) + self.assertEqual(dom.firstChild.tagName, 'filter') + + rules = dom.getElementsByTagName('rule') + self.assertEqual(len(rules), 1) + + # It's supposed to allow inbound traffic. + self.assertEqual(rules[0].getAttribute('action'), 'accept') + self.assertEqual(rules[0].getAttribute('direction'), 'in') + + # Must be lower priority than the base filter (which blocks everything) + self.assertTrue(int(rules[0].getAttribute('priority')) < 1000) + + ip_conditions = rules[0].getElementsByTagName('tcp') + self.assertEqual(len(ip_conditions), 1) + self.assertEqual(ip_conditions[0].getAttribute('srcipaddr'), '0.0.0.0') + self.assertEqual(ip_conditions[0].getAttribute('srcipmask'), '0.0.0.0') + self.assertEqual(ip_conditions[0].getAttribute('dstportstart'), '80') + self.assertEqual(ip_conditions[0].getAttribute('dstportend'), '81') + self.teardown_security_group() + + def teardown_security_group(self): + cloud_controller = cloud.CloudController() + cloud_controller.delete_security_group(self.context, 'testgroup') + + def setup_and_return_security_group(self): + cloud_controller = cloud.CloudController() + cloud_controller.create_security_group(self.context, + 'testgroup', + 'test group description') + cloud_controller.authorize_security_group_ingress(self.context, + 'testgroup', + from_port='80', + to_port='81', + ip_protocol='tcp', + cidr_ip='0.0.0.0/0') + + return db.security_group_get_by_name(self.context, 'fake', 'testgroup') + + def test_creates_base_rule_first(self): + # These come pre-defined by libvirt + self.defined_filters = ['no-mac-spoofing', + 'no-ip-spoofing', + 'no-arp-spoofing', + 'allow-dhcp-server'] + + self.recursive_depends = {} + for f in self.defined_filters: + self.recursive_depends[f] = [] + + def _filterDefineXMLMock(xml): + dom = xml_to_dom(xml) + name = dom.firstChild.getAttribute('name') + self.recursive_depends[name] = [] + for f in dom.getElementsByTagName('filterref'): + ref = f.getAttribute('filter') + self.assertTrue(ref in self.defined_filters, + ('%s referenced filter that does ' + + 'not yet exist: %s') % (name, ref)) + dependencies = [ref] + self.recursive_depends[ref] + self.recursive_depends[name] += dependencies + + self.defined_filters.append(name) + return True + + self.fake_libvirt_connection.nwfilterDefineXML = _filterDefineXMLMock + + instance_ref = db.instance_create(self.context, + {'user_id': 'fake', + 'project_id': 'fake'}) + inst_id = instance_ref['id'] + + def _ensure_all_called(): + instance_filter = 'nova-instance-%s' % instance_ref['name'] + secgroup_filter = 'nova-secgroup-%s' % self.security_group['id'] + for required in [secgroup_filter, 'allow-dhcp-server', + 'no-arp-spoofing', 'no-ip-spoofing', + 'no-mac-spoofing']: + self.assertTrue(required in + self.recursive_depends[instance_filter], + "Instance's filter does not include %s" % + required) + + self.security_group = self.setup_and_return_security_group() + + db.instance_add_security_group(self.context, inst_id, + self.security_group.id) + instance = db.instance_get(self.context, inst_id) + + d = self.fw.setup_nwfilters_for_instance(instance) + _ensure_all_called() + self.teardown_security_group() + return d diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py new file mode 100644 index 000000000..b13455fb0 --- /dev/null +++ b/nova/tests/test_volume.py @@ -0,0 +1,175 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests for Volume Code. + +""" +import logging + +from nova import context +from nova import exception +from nova import db +from nova import flags +from nova import test +from nova import utils + +FLAGS = flags.FLAGS + + +class VolumeTestCase(test.TestCase): + """Test Case for volumes.""" + + def setUp(self): + logging.getLogger().setLevel(logging.DEBUG) + super(VolumeTestCase, self).setUp() + self.compute = utils.import_object(FLAGS.compute_manager) + self.flags(connection_type='fake') + self.volume = utils.import_object(FLAGS.volume_manager) + self.context = context.get_admin_context() + + @staticmethod + def _create_volume(size='0'): + """Create a volume object.""" + vol = {} + vol['size'] = size + vol['user_id'] = 'fake' + vol['project_id'] = 'fake' + vol['availability_zone'] = FLAGS.storage_availability_zone + vol['status'] = "creating" + vol['attach_status'] = "detached" + return db.volume_create(context.get_admin_context(), vol)['id'] + + def test_create_delete_volume(self): + """Test volume can be created and deleted.""" + volume_id = self._create_volume() + self.volume.create_volume(self.context, volume_id) + self.assertEqual(volume_id, db.volume_get(context.get_admin_context(), + volume_id).id) + + self.volume.delete_volume(self.context, volume_id) + self.assertRaises(exception.NotFound, + db.volume_get, + self.context, + volume_id) + + def test_too_big_volume(self): + """Ensure failure if a too large of a volume is requested.""" + # FIXME(vish): validation needs to move into the data layer in + # volume_create + return True + try: + volume_id = self._create_volume('1001') + self.volume.create_volume(self.context, volume_id) + self.fail("Should have thrown TypeError") + except TypeError: + pass + + def test_too_many_volumes(self): + """Ensure that NoMoreTargets is raised when we run out of volumes.""" + vols = [] + total_slots = FLAGS.iscsi_num_targets + for _index in xrange(total_slots): + volume_id = self._create_volume() + self.volume.create_volume(self.context, volume_id) + vols.append(volume_id) + volume_id = self._create_volume() + self.assertRaises(db.NoMoreTargets, + self.volume.create_volume, + self.context, + volume_id) + db.volume_destroy(context.get_admin_context(), volume_id) + for volume_id in vols: + self.volume.delete_volume(self.context, volume_id) + + def test_run_attach_detach_volume(self): + """Make sure volume can be attached and detached from instance.""" + inst = {} + inst['image_id'] = 'ami-test' + inst['reservation_id'] = 'r-fakeres' + inst['launch_time'] = '10' + inst['user_id'] = 'fake' + inst['project_id'] = 'fake' + inst['instance_type'] = 'm1.tiny' + inst['mac_address'] = utils.generate_mac() + inst['ami_launch_index'] = 0 + instance_id = db.instance_create(self.context, inst)['id'] + mountpoint = "/dev/sdf" + volume_id = self._create_volume() + self.volume.create_volume(self.context, volume_id) + if FLAGS.fake_tests: + db.volume_attached(self.context, volume_id, instance_id, + mountpoint) + else: + self.compute.attach_volume(self.context, + instance_id, + volume_id, + mountpoint) + vol = db.volume_get(context.get_admin_context(), volume_id) + self.assertEqual(vol['status'], "in-use") + self.assertEqual(vol['attach_status'], "attached") + self.assertEqual(vol['mountpoint'], mountpoint) + instance_ref = db.volume_get_instance(self.context, volume_id) + self.assertEqual(instance_ref['id'], instance_id) + + self.assertRaises(exception.Error, + self.volume.delete_volume, + self.context, + volume_id) + if FLAGS.fake_tests: + db.volume_detached(self.context, volume_id) + else: + self.compute.detach_volume(self.context, + instance_id, + volume_id) + vol = db.volume_get(self.context, volume_id) + self.assertEqual(vol['status'], "available") + + self.volume.delete_volume(self.context, volume_id) + self.assertRaises(exception.Error, + db.volume_get, + self.context, + volume_id) + db.instance_destroy(self.context, instance_id) + + def test_concurrent_volumes_get_different_targets(self): + """Ensure multiple concurrent volumes get different targets.""" + volume_ids = [] + targets = [] + + def _check(volume_id): + """Make sure targets aren't duplicated.""" + volume_ids.append(volume_id) + admin_context = context.get_admin_context() + iscsi_target = db.volume_get_iscsi_target_num(admin_context, + volume_id) + self.assert_(iscsi_target not in targets) + targets.append(iscsi_target) + logging.debug("Target %s allocated", iscsi_target) + total_slots = FLAGS.iscsi_num_targets + for _index in xrange(total_slots): + volume_id = self._create_volume() + d = self.volume.create_volume(self.context, volume_id) + _check(d) + for volume_id in volume_ids: + self.volume.delete_volume(self.context, volume_id) + + def test_multi_node(self): + # TODO(termie): Figure out how to test with two nodes, + # each of them having a different FLAG for storage_node + # This will allow us to test cross-node interactions + pass diff --git a/nova/tests/twistd_unittest.py b/nova/tests/twistd_unittest.py deleted file mode 100644 index 75007b9c8..000000000 --- a/nova/tests/twistd_unittest.py +++ /dev/null @@ -1,53 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import StringIO -import sys - -from nova import twistd -from nova import exception -from nova import flags -from nova import test - - -FLAGS = flags.FLAGS - - -class TwistdTestCase(test.TrialTestCase): - def setUp(self): - super(TwistdTestCase, self).setUp() - self.Options = twistd.WrapTwistedOptions(twistd.TwistdServerOptions) - sys.stdout = StringIO.StringIO() - - def tearDown(self): - super(TwistdTestCase, self).tearDown() - sys.stdout = sys.__stdout__ - - def test_basic(self): - options = self.Options() - argv = options.parseOptions() - - def test_logfile(self): - options = self.Options() - argv = options.parseOptions(['--logfile=foo']) - self.assertEqual(FLAGS.logfile, 'foo') - - def test_help(self): - options = self.Options() - self.assertRaises(SystemExit, options.parseOptions, ['--help']) - self.assert_('pidfile' in sys.stdout.getvalue()) diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py deleted file mode 100644 index 85e569858..000000000 --- a/nova/tests/virt_unittest.py +++ /dev/null @@ -1,258 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2010 OpenStack LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from xml.etree.ElementTree import fromstring as xml_to_tree -from xml.dom.minidom import parseString as xml_to_dom - -from nova import context -from nova import db -from nova import flags -from nova import test -from nova import utils -from nova.api.ec2 import cloud -from nova.auth import manager -from nova.virt import libvirt_conn - -FLAGS = flags.FLAGS -flags.DECLARE('instances_path', 'nova.compute.manager') - - -class LibvirtConnTestCase(test.TestCase): - def setUp(self): - super(LibvirtConnTestCase, self).setUp() - self.manager = manager.AuthManager() - self.user = self.manager.create_user('fake', 'fake', 'fake', - admin=True) - self.project = self.manager.create_project('fake', 'fake', 'fake') - self.network = utils.import_object(FLAGS.network_manager) - FLAGS.instances_path = '' - - def test_get_uri_and_template(self): - ip = '10.11.12.13' - - instance = {'internal_id': 1, - 'memory_kb': '1024000', - 'basepath': '/some/path', - 'bridge_name': 'br100', - 'mac_address': '02:12:34:46:56:67', - 'vcpus': 2, - 'project_id': 'fake', - 'bridge': 'br101', - 'instance_type': 'm1.small'} - - user_context = context.RequestContext(project=self.project, - user=self.user) - instance_ref = db.instance_create(user_context, instance) - network_ref = self.network.get_network(user_context) - self.network.set_network_host(context.get_admin_context(), - network_ref['id']) - - fixed_ip = {'address': ip, - 'network_id': network_ref['id']} - - ctxt = context.get_admin_context() - fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip) - db.fixed_ip_update(ctxt, ip, {'allocated': True, - 'instance_id': instance_ref['id']}) - - type_uri_map = {'qemu': ('qemu:///system', - [(lambda t: t.find('.').get('type'), 'qemu'), - (lambda t: t.find('./os/type').text, 'hvm'), - (lambda t: t.find('./devices/emulator'), None)]), - 'kvm': ('qemu:///system', - [(lambda t: t.find('.').get('type'), 'kvm'), - (lambda t: t.find('./os/type').text, 'hvm'), - (lambda t: t.find('./devices/emulator'), None)]), - 'uml': ('uml:///system', - [(lambda t: t.find('.').get('type'), 'uml'), - (lambda t: t.find('./os/type').text, 'uml')])} - - common_checks = [ - (lambda t: t.find('.').tag, 'domain'), - (lambda t: t.find('./devices/interface/filterref/parameter').\ - get('name'), 'IP'), - (lambda t: t.find('./devices/interface/filterref/parameter').\ - get('value'), '10.11.12.13')] - - for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): - FLAGS.libvirt_type = libvirt_type - conn = libvirt_conn.LibvirtConnection(True) - - uri, _template, _rescue = conn.get_uri_and_templates() - self.assertEquals(uri, expected_uri) - - xml = conn.to_xml(instance_ref) - tree = xml_to_tree(xml) - for i, (check, expected_result) in enumerate(checks): - self.assertEqual(check(tree), - expected_result, - '%s failed check %d' % (xml, i)) - - for i, (check, expected_result) in enumerate(common_checks): - self.assertEqual(check(tree), - expected_result, - '%s failed common check %d' % (xml, i)) - - # Deliberately not just assigning this string to FLAGS.libvirt_uri and - # checking against that later on. This way we make sure the - # implementation doesn't fiddle around with the FLAGS. - testuri = 'something completely different' - FLAGS.libvirt_uri = testuri - for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): - FLAGS.libvirt_type = libvirt_type - conn = libvirt_conn.LibvirtConnection(True) - uri, _template, _rescue = conn.get_uri_and_templates() - self.assertEquals(uri, testuri) - - def tearDown(self): - super(LibvirtConnTestCase, self).tearDown() - self.manager.delete_project(self.project) - self.manager.delete_user(self.user) - - -class NWFilterTestCase(test.TestCase): - - def setUp(self): - super(NWFilterTestCase, self).setUp() - - class Mock(object): - pass - - self.manager = manager.AuthManager() - self.user = self.manager.create_user('fake', 'fake', 'fake', - admin=True) - self.project = self.manager.create_project('fake', 'fake', 'fake') - self.context = context.RequestContext(self.user, self.project) - - self.fake_libvirt_connection = Mock() - - self.fw = libvirt_conn.NWFilterFirewall(self.fake_libvirt_connection) - - def tearDown(self): - self.manager.delete_project(self.project) - self.manager.delete_user(self.user) - - def test_cidr_rule_nwfilter_xml(self): - cloud_controller = cloud.CloudController() - cloud_controller.create_security_group(self.context, - 'testgroup', - 'test group description') - cloud_controller.authorize_security_group_ingress(self.context, - 'testgroup', - from_port='80', - to_port='81', - ip_protocol='tcp', - cidr_ip='0.0.0.0/0') - - security_group = db.security_group_get_by_name(self.context, - 'fake', - 'testgroup') - - xml = self.fw.security_group_to_nwfilter_xml(security_group.id) - - dom = xml_to_dom(xml) - self.assertEqual(dom.firstChild.tagName, 'filter') - - rules = dom.getElementsByTagName('rule') - self.assertEqual(len(rules), 1) - - # It's supposed to allow inbound traffic. - self.assertEqual(rules[0].getAttribute('action'), 'accept') - self.assertEqual(rules[0].getAttribute('direction'), 'in') - - # Must be lower priority than the base filter (which blocks everything) - self.assertTrue(int(rules[0].getAttribute('priority')) < 1000) - - ip_conditions = rules[0].getElementsByTagName('tcp') - self.assertEqual(len(ip_conditions), 1) - self.assertEqual(ip_conditions[0].getAttribute('srcipaddr'), '0.0.0.0') - self.assertEqual(ip_conditions[0].getAttribute('srcipmask'), '0.0.0.0') - self.assertEqual(ip_conditions[0].getAttribute('dstportstart'), '80') - self.assertEqual(ip_conditions[0].getAttribute('dstportend'), '81') - self.teardown_security_group() - - def teardown_security_group(self): - cloud_controller = cloud.CloudController() - cloud_controller.delete_security_group(self.context, 'testgroup') - - def setup_and_return_security_group(self): - cloud_controller = cloud.CloudController() - cloud_controller.create_security_group(self.context, - 'testgroup', - 'test group description') - cloud_controller.authorize_security_group_ingress(self.context, - 'testgroup', - from_port='80', - to_port='81', - ip_protocol='tcp', - cidr_ip='0.0.0.0/0') - - return db.security_group_get_by_name(self.context, 'fake', 'testgroup') - - def test_creates_base_rule_first(self): - # These come pre-defined by libvirt - self.defined_filters = ['no-mac-spoofing', - 'no-ip-spoofing', - 'no-arp-spoofing', - 'allow-dhcp-server'] - - self.recursive_depends = {} - for f in self.defined_filters: - self.recursive_depends[f] = [] - - def _filterDefineXMLMock(xml): - dom = xml_to_dom(xml) - name = dom.firstChild.getAttribute('name') - self.recursive_depends[name] = [] - for f in dom.getElementsByTagName('filterref'): - ref = f.getAttribute('filter') - self.assertTrue(ref in self.defined_filters, - ('%s referenced filter that does ' + - 'not yet exist: %s') % (name, ref)) - dependencies = [ref] + self.recursive_depends[ref] - self.recursive_depends[name] += dependencies - - self.defined_filters.append(name) - return True - - self.fake_libvirt_connection.nwfilterDefineXML = _filterDefineXMLMock - - instance_ref = db.instance_create(self.context, - {'user_id': 'fake', - 'project_id': 'fake'}) - inst_id = instance_ref['id'] - - def _ensure_all_called(): - instance_filter = 'nova-instance-%s' % instance_ref['name'] - secgroup_filter = 'nova-secgroup-%s' % self.security_group['id'] - for required in [secgroup_filter, 'allow-dhcp-server', - 'no-arp-spoofing', 'no-ip-spoofing', - 'no-mac-spoofing']: - self.assertTrue(required in - self.recursive_depends[instance_filter], - "Instance's filter does not include %s" % - required) - - self.security_group = self.setup_and_return_security_group() - - db.instance_add_security_group(self.context, inst_id, - self.security_group.id) - instance = db.instance_get(self.context, inst_id) - - d = self.fw.setup_nwfilters_for_instance(instance) - _ensure_all_called() - self.teardown_security_group() - return d diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py deleted file mode 100644 index b13455fb0..000000000 --- a/nova/tests/volume_unittest.py +++ /dev/null @@ -1,175 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests for Volume Code. - -""" -import logging - -from nova import context -from nova import exception -from nova import db -from nova import flags -from nova import test -from nova import utils - -FLAGS = flags.FLAGS - - -class VolumeTestCase(test.TestCase): - """Test Case for volumes.""" - - def setUp(self): - logging.getLogger().setLevel(logging.DEBUG) - super(VolumeTestCase, self).setUp() - self.compute = utils.import_object(FLAGS.compute_manager) - self.flags(connection_type='fake') - self.volume = utils.import_object(FLAGS.volume_manager) - self.context = context.get_admin_context() - - @staticmethod - def _create_volume(size='0'): - """Create a volume object.""" - vol = {} - vol['size'] = size - vol['user_id'] = 'fake' - vol['project_id'] = 'fake' - vol['availability_zone'] = FLAGS.storage_availability_zone - vol['status'] = "creating" - vol['attach_status'] = "detached" - return db.volume_create(context.get_admin_context(), vol)['id'] - - def test_create_delete_volume(self): - """Test volume can be created and deleted.""" - volume_id = self._create_volume() - self.volume.create_volume(self.context, volume_id) - self.assertEqual(volume_id, db.volume_get(context.get_admin_context(), - volume_id).id) - - self.volume.delete_volume(self.context, volume_id) - self.assertRaises(exception.NotFound, - db.volume_get, - self.context, - volume_id) - - def test_too_big_volume(self): - """Ensure failure if a too large of a volume is requested.""" - # FIXME(vish): validation needs to move into the data layer in - # volume_create - return True - try: - volume_id = self._create_volume('1001') - self.volume.create_volume(self.context, volume_id) - self.fail("Should have thrown TypeError") - except TypeError: - pass - - def test_too_many_volumes(self): - """Ensure that NoMoreTargets is raised when we run out of volumes.""" - vols = [] - total_slots = FLAGS.iscsi_num_targets - for _index in xrange(total_slots): - volume_id = self._create_volume() - self.volume.create_volume(self.context, volume_id) - vols.append(volume_id) - volume_id = self._create_volume() - self.assertRaises(db.NoMoreTargets, - self.volume.create_volume, - self.context, - volume_id) - db.volume_destroy(context.get_admin_context(), volume_id) - for volume_id in vols: - self.volume.delete_volume(self.context, volume_id) - - def test_run_attach_detach_volume(self): - """Make sure volume can be attached and detached from instance.""" - inst = {} - inst['image_id'] = 'ami-test' - inst['reservation_id'] = 'r-fakeres' - inst['launch_time'] = '10' - inst['user_id'] = 'fake' - inst['project_id'] = 'fake' - inst['instance_type'] = 'm1.tiny' - inst['mac_address'] = utils.generate_mac() - inst['ami_launch_index'] = 0 - instance_id = db.instance_create(self.context, inst)['id'] - mountpoint = "/dev/sdf" - volume_id = self._create_volume() - self.volume.create_volume(self.context, volume_id) - if FLAGS.fake_tests: - db.volume_attached(self.context, volume_id, instance_id, - mountpoint) - else: - self.compute.attach_volume(self.context, - instance_id, - volume_id, - mountpoint) - vol = db.volume_get(context.get_admin_context(), volume_id) - self.assertEqual(vol['status'], "in-use") - self.assertEqual(vol['attach_status'], "attached") - self.assertEqual(vol['mountpoint'], mountpoint) - instance_ref = db.volume_get_instance(self.context, volume_id) - self.assertEqual(instance_ref['id'], instance_id) - - self.assertRaises(exception.Error, - self.volume.delete_volume, - self.context, - volume_id) - if FLAGS.fake_tests: - db.volume_detached(self.context, volume_id) - else: - self.compute.detach_volume(self.context, - instance_id, - volume_id) - vol = db.volume_get(self.context, volume_id) - self.assertEqual(vol['status'], "available") - - self.volume.delete_volume(self.context, volume_id) - self.assertRaises(exception.Error, - db.volume_get, - self.context, - volume_id) - db.instance_destroy(self.context, instance_id) - - def test_concurrent_volumes_get_different_targets(self): - """Ensure multiple concurrent volumes get different targets.""" - volume_ids = [] - targets = [] - - def _check(volume_id): - """Make sure targets aren't duplicated.""" - volume_ids.append(volume_id) - admin_context = context.get_admin_context() - iscsi_target = db.volume_get_iscsi_target_num(admin_context, - volume_id) - self.assert_(iscsi_target not in targets) - targets.append(iscsi_target) - logging.debug("Target %s allocated", iscsi_target) - total_slots = FLAGS.iscsi_num_targets - for _index in xrange(total_slots): - volume_id = self._create_volume() - d = self.volume.create_volume(self.context, volume_id) - _check(d) - for volume_id in volume_ids: - self.volume.delete_volume(self.context, volume_id) - - def test_multi_node(self): - # TODO(termie): Figure out how to test with two nodes, - # each of them having a different FLAG for storage_node - # This will allow us to test cross-node interactions - pass diff --git a/run_tests.py b/run_tests.py deleted file mode 100644 index 6a4b7f1ab..000000000 --- a/run_tests.py +++ /dev/null @@ -1,125 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -This is our basic test running framework based on Twisted's Trial. - -Usage Examples: - - # to run all the tests - python run_tests.py - - # to run a specific test suite imported here - python run_tests.py NodeConnectionTestCase - - # to run a specific test imported here - python run_tests.py NodeConnectionTestCase.test_reboot - - # to run some test suites elsewhere - python run_tests.py nova.tests.node_unittest - python run_tests.py nova.tests.node_unittest.NodeConnectionTestCase - -Due to our use of multiprocessing it we frequently get some ignorable -'Interrupted system call' exceptions after test completion. - -""" - -import eventlet -eventlet.monkey_patch() - -import __main__ -import gettext -import os -import sys - -gettext.install('nova', unicode=1) - -from twisted.scripts import trial as trial_script - -from nova import flags -from nova import twistd - -from nova.tests.access_unittest import * -from nova.tests.api_unittest import * -from nova.tests.auth_unittest import * -from nova.tests.cloud_unittest import * -from nova.tests.compute_unittest import * -from nova.tests.flags_unittest import * -from nova.tests.misc_unittest import * -from nova.tests.network_unittest import * -#from nova.tests.objectstore_unittest import * -from nova.tests.quota_unittest import * -from nova.tests.rpc_unittest import * -from nova.tests.scheduler_unittest import * -from nova.tests.service_unittest import * -from nova.tests.twistd_unittest import * -from nova.tests.virt_unittest import * -from nova.tests.volume_unittest import * - - -FLAGS = flags.FLAGS -flags.DEFINE_bool('flush_db', True, - 'Flush the database before running fake tests') -flags.DEFINE_string('tests_stderr', 'run_tests.err.log', - 'Path to where to pipe STDERR during test runs.' - ' Default = "run_tests.err.log"') - - -if __name__ == '__main__': - OptionsClass = twistd.WrapTwistedOptions(trial_script.Options) - config = OptionsClass() - argv = config.parseOptions() - - FLAGS.verbose = True - - # TODO(termie): these should make a call instead of doing work on import - if FLAGS.fake_tests: - from nova.tests.fake_flags import * - else: - from nova.tests.real_flags import * - - # Establish redirect for STDERR - sys.stderr.flush() - err = open(FLAGS.tests_stderr, 'w+', 0) - os.dup2(err.fileno(), sys.stderr.fileno()) - - if len(argv) == 1 and len(config['tests']) == 0: - # If no tests were specified run the ones imported in this file - # NOTE(termie): "tests" is not a flag, just some Trial related stuff - config['tests'].update(['__main__']) - elif len(config['tests']): - # If we specified tests check first whether they are in __main__ - for arg in config['tests']: - key = arg.split('.')[0] - if hasattr(__main__, key): - config['tests'].remove(arg) - config['tests'].add('__main__.%s' % arg) - - trial_script._initialDebugSetup(config) - trialRunner = trial_script._makeRunner(config) - suite = trial_script._getSuite(config) - if config['until-failure']: - test_result = trialRunner.runUntilFailure(suite) - else: - test_result = trialRunner.run(suite) - if config.tracer: - sys.settrace(None) - results = config.tracer.results() - results.write_results(show_missing=1, summary=False, - coverdir=config.coverdir) - sys.exit(not test_result.wasSuccessful()) -- cgit From b1d4579404f9e49fcdea23c21733fdf65edc1da3 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Thu, 16 Dec 2010 17:29:26 -0800 Subject: Fixed network test (thanks Vish!) and fixed run_tests.sh. --- bin/nova-dhcpbridge | 1 - nova/tests/network_unittest.py | 347 ----------------------------------------- nova/tests/test_network.py | 347 +++++++++++++++++++++++++++++++++++++++++ run_tests.sh | 12 +- 4 files changed, 355 insertions(+), 352 deletions(-) delete mode 100644 nova/tests/network_unittest.py create mode 100644 nova/tests/test_network.py diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index 81b9b6dd3..828aba3d1 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -110,7 +110,6 @@ def main(): FLAGS.num_networks = 5 path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', - '_trial_temp', 'nova.sqlite')) FLAGS.sql_connection = 'sqlite:///%s' % path action = argv[1] diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py deleted file mode 100644 index bcac20585..000000000 --- a/nova/tests/network_unittest.py +++ /dev/null @@ -1,347 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Unit Tests for network code -""" -import IPy -import os -import logging - -from nova import context -from nova import db -from nova import exception -from nova import flags -from nova import test -from nova import utils -from nova.auth import manager - -FLAGS = flags.FLAGS - - -class NetworkTestCase(test.TestCase): - """Test cases for network code""" - def setUp(self): - super(NetworkTestCase, self).setUp() - # NOTE(vish): if you change these flags, make sure to change the - # flags in the corresponding section in nova-dhcpbridge - self.flags(connection_type='fake', - fake_network=True, - network_size=16, - num_networks=5) - logging.getLogger().setLevel(logging.DEBUG) - self.manager = manager.AuthManager() - self.user = self.manager.create_user('netuser', 'netuser', 'netuser') - self.projects = [] - self.network = utils.import_object(FLAGS.network_manager) - self.context = context.RequestContext(project=None, user=self.user) - for i in range(5): - name = 'project%s' % i - project = self.manager.create_project(name, 'netuser', name) - self.projects.append(project) - # create the necessary network data for the project - user_context = context.RequestContext(project=self.projects[i], - user=self.user) - network_ref = self.network.get_network(user_context) - self.network.set_network_host(context.get_admin_context(), - network_ref['id']) - instance_ref = self._create_instance(0) - self.instance_id = instance_ref['id'] - instance_ref = self._create_instance(1) - self.instance2_id = instance_ref['id'] - - def tearDown(self): - super(NetworkTestCase, self).tearDown() - # TODO(termie): this should really be instantiating clean datastores - # in between runs, one failure kills all the tests - db.instance_destroy(context.get_admin_context(), self.instance_id) - db.instance_destroy(context.get_admin_context(), self.instance2_id) - for project in self.projects: - self.manager.delete_project(project) - self.manager.delete_user(self.user) - - def _create_instance(self, project_num, mac=None): - if not mac: - mac = utils.generate_mac() - project = self.projects[project_num] - self.context._project = project - self.context.project_id = project.id - return db.instance_create(self.context, - {'project_id': project.id, - 'mac_address': mac}) - - def _create_address(self, project_num, instance_id=None): - """Create an address in given project num""" - if instance_id is None: - instance_id = self.instance_id - self.context._project = self.projects[project_num] - self.context.project_id = self.projects[project_num].id - return self.network.allocate_fixed_ip(self.context, instance_id) - - def _deallocate_address(self, project_num, address): - self.context._project = self.projects[project_num] - self.context.project_id = self.projects[project_num].id - self.network.deallocate_fixed_ip(self.context, address) - - def test_public_network_association(self): - """Makes sure that we can allocaate a public ip""" - # TODO(vish): better way of adding floating ips - self.context._project = self.projects[0] - self.context.project_id = self.projects[0].id - pubnet = IPy.IP(flags.FLAGS.floating_range) - address = str(pubnet[0]) - try: - db.floating_ip_get_by_address(context.get_admin_context(), address) - except exception.NotFound: - db.floating_ip_create(context.get_admin_context(), - {'address': address, - 'host': FLAGS.host}) - float_addr = self.network.allocate_floating_ip(self.context, - self.projects[0].id) - fix_addr = self._create_address(0) - lease_ip(fix_addr) - self.assertEqual(float_addr, str(pubnet[0])) - self.network.associate_floating_ip(self.context, float_addr, fix_addr) - address = db.instance_get_floating_address(context.get_admin_context(), - self.instance_id) - self.assertEqual(address, float_addr) - self.network.disassociate_floating_ip(self.context, float_addr) - address = db.instance_get_floating_address(context.get_admin_context(), - self.instance_id) - self.assertEqual(address, None) - self.network.deallocate_floating_ip(self.context, float_addr) - self.network.deallocate_fixed_ip(self.context, fix_addr) - release_ip(fix_addr) - db.floating_ip_destroy(context.get_admin_context(), float_addr) - - def test_allocate_deallocate_fixed_ip(self): - """Makes sure that we can allocate and deallocate a fixed ip""" - address = self._create_address(0) - self.assertTrue(is_allocated_in_project(address, self.projects[0].id)) - lease_ip(address) - self._deallocate_address(0, address) - - # Doesn't go away until it's dhcp released - self.assertTrue(is_allocated_in_project(address, self.projects[0].id)) - - release_ip(address) - self.assertFalse(is_allocated_in_project(address, self.projects[0].id)) - - def test_side_effects(self): - """Ensures allocating and releasing has no side effects""" - address = self._create_address(0) - address2 = self._create_address(1, self.instance2_id) - - self.assertTrue(is_allocated_in_project(address, self.projects[0].id)) - self.assertTrue(is_allocated_in_project(address2, self.projects[1].id)) - self.assertFalse(is_allocated_in_project(address, self.projects[1].id)) - - # Addresses are allocated before they're issued - lease_ip(address) - lease_ip(address2) - - self._deallocate_address(0, address) - release_ip(address) - self.assertFalse(is_allocated_in_project(address, self.projects[0].id)) - - # First address release shouldn't affect the second - self.assertTrue(is_allocated_in_project(address2, self.projects[1].id)) - - self._deallocate_address(1, address2) - release_ip(address2) - self.assertFalse(is_allocated_in_project(address2, - self.projects[1].id)) - - def test_subnet_edge(self): - """Makes sure that private ips don't overlap""" - first = self._create_address(0) - lease_ip(first) - instance_ids = [] - for i in range(1, 5): - instance_ref = self._create_instance(i, mac=utils.generate_mac()) - instance_ids.append(instance_ref['id']) - address = self._create_address(i, instance_ref['id']) - instance_ref = self._create_instance(i, mac=utils.generate_mac()) - instance_ids.append(instance_ref['id']) - address2 = self._create_address(i, instance_ref['id']) - instance_ref = self._create_instance(i, mac=utils.generate_mac()) - instance_ids.append(instance_ref['id']) - address3 = self._create_address(i, instance_ref['id']) - lease_ip(address) - lease_ip(address2) - lease_ip(address3) - self.context._project = self.projects[i] - self.context.project_id = self.projects[i].id - self.assertFalse(is_allocated_in_project(address, - self.projects[0].id)) - self.assertFalse(is_allocated_in_project(address2, - self.projects[0].id)) - self.assertFalse(is_allocated_in_project(address3, - self.projects[0].id)) - self.network.deallocate_fixed_ip(self.context, address) - self.network.deallocate_fixed_ip(self.context, address2) - self.network.deallocate_fixed_ip(self.context, address3) - release_ip(address) - release_ip(address2) - release_ip(address3) - for instance_id in instance_ids: - db.instance_destroy(context.get_admin_context(), instance_id) - self.context._project = self.projects[0] - self.context.project_id = self.projects[0].id - self.network.deallocate_fixed_ip(self.context, first) - self._deallocate_address(0, first) - release_ip(first) - - def test_vpn_ip_and_port_looks_valid(self): - """Ensure the vpn ip and port are reasonable""" - self.assert_(self.projects[0].vpn_ip) - self.assert_(self.projects[0].vpn_port >= FLAGS.vpn_start) - self.assert_(self.projects[0].vpn_port <= FLAGS.vpn_start + - FLAGS.num_networks) - - def test_too_many_networks(self): - """Ensure error is raised if we run out of networks""" - projects = [] - networks_left = (FLAGS.num_networks - - db.network_count(context.get_admin_context())) - for i in range(networks_left): - project = self.manager.create_project('many%s' % i, self.user) - projects.append(project) - db.project_get_network(context.get_admin_context(), project.id) - project = self.manager.create_project('last', self.user) - projects.append(project) - self.assertRaises(db.NoMoreNetworks, - db.project_get_network, - context.get_admin_context(), - project.id) - for project in projects: - self.manager.delete_project(project) - - def test_ips_are_reused(self): - """Makes sure that ip addresses that are deallocated get reused""" - address = self._create_address(0) - lease_ip(address) - self.network.deallocate_fixed_ip(self.context, address) - release_ip(address) - - address2 = self._create_address(0) - self.assertEqual(address, address2) - lease_ip(address) - self.network.deallocate_fixed_ip(self.context, address2) - release_ip(address) - - def test_available_ips(self): - """Make sure the number of available ips for the network is correct - - The number of available IP addresses depends on the test - environment's setup. - - Network size is set in test fixture's setUp method. - - There are ips reserved at the bottom and top of the range. - services (network, gateway, CloudPipe, broadcast) - """ - network = db.project_get_network(context.get_admin_context(), - self.projects[0].id) - net_size = flags.FLAGS.network_size - admin_context = context.get_admin_context() - total_ips = (db.network_count_available_ips(admin_context, - network['id']) + - db.network_count_reserved_ips(admin_context, - network['id']) + - db.network_count_allocated_ips(admin_context, - network['id'])) - self.assertEqual(total_ips, net_size) - - def test_too_many_addresses(self): - """Test for a NoMoreAddresses exception when all fixed ips are used. - """ - admin_context = context.get_admin_context() - network = db.project_get_network(admin_context, self.projects[0].id) - num_available_ips = db.network_count_available_ips(admin_context, - network['id']) - addresses = [] - instance_ids = [] - for i in range(num_available_ips): - instance_ref = self._create_instance(0) - instance_ids.append(instance_ref['id']) - address = self._create_address(0, instance_ref['id']) - addresses.append(address) - lease_ip(address) - - ip_count = db.network_count_available_ips(context.get_admin_context(), - network['id']) - self.assertEqual(ip_count, 0) - self.assertRaises(db.NoMoreAddresses, - self.network.allocate_fixed_ip, - self.context, - 'foo') - - for i in range(num_available_ips): - self.network.deallocate_fixed_ip(self.context, addresses[i]) - release_ip(addresses[i]) - db.instance_destroy(context.get_admin_context(), instance_ids[i]) - ip_count = db.network_count_available_ips(context.get_admin_context(), - network['id']) - self.assertEqual(ip_count, num_available_ips) - - -def is_allocated_in_project(address, project_id): - """Returns true if address is in specified project""" - project_net = db.project_get_network(context.get_admin_context(), - project_id) - network = db.fixed_ip_get_network(context.get_admin_context(), address) - instance = db.fixed_ip_get_instance(context.get_admin_context(), address) - # instance exists until release - return instance is not None and network['id'] == project_net['id'] - - -def binpath(script): - """Returns the absolute path to a script in bin""" - return os.path.abspath(os.path.join(__file__, "../../../bin", script)) - - -def lease_ip(private_ip): - """Run add command on dhcpbridge""" - network_ref = db.fixed_ip_get_network(context.get_admin_context(), - private_ip) - instance_ref = db.fixed_ip_get_instance(context.get_admin_context(), - private_ip) - cmd = "%s add %s %s fake" % (binpath('nova-dhcpbridge'), - instance_ref['mac_address'], - private_ip) - env = {'DNSMASQ_INTERFACE': network_ref['bridge'], - 'TESTING': '1', - 'FLAGFILE': FLAGS.dhcpbridge_flagfile} - (out, err) = utils.execute(cmd, addl_env=env) - logging.debug("ISSUE_IP: %s, %s ", out, err) - - -def release_ip(private_ip): - """Run del command on dhcpbridge""" - network_ref = db.fixed_ip_get_network(context.get_admin_context(), - private_ip) - instance_ref = db.fixed_ip_get_instance(context.get_admin_context(), - private_ip) - cmd = "%s del %s %s fake" % (binpath('nova-dhcpbridge'), - instance_ref['mac_address'], - private_ip) - env = {'DNSMASQ_INTERFACE': network_ref['bridge'], - 'TESTING': '1', - 'FLAGFILE': FLAGS.dhcpbridge_flagfile} - (out, err) = utils.execute(cmd, addl_env=env) - logging.debug("RELEASE_IP: %s, %s ", out, err) diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py new file mode 100644 index 000000000..bcac20585 --- /dev/null +++ b/nova/tests/test_network.py @@ -0,0 +1,347 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for network code +""" +import IPy +import os +import logging + +from nova import context +from nova import db +from nova import exception +from nova import flags +from nova import test +from nova import utils +from nova.auth import manager + +FLAGS = flags.FLAGS + + +class NetworkTestCase(test.TestCase): + """Test cases for network code""" + def setUp(self): + super(NetworkTestCase, self).setUp() + # NOTE(vish): if you change these flags, make sure to change the + # flags in the corresponding section in nova-dhcpbridge + self.flags(connection_type='fake', + fake_network=True, + network_size=16, + num_networks=5) + logging.getLogger().setLevel(logging.DEBUG) + self.manager = manager.AuthManager() + self.user = self.manager.create_user('netuser', 'netuser', 'netuser') + self.projects = [] + self.network = utils.import_object(FLAGS.network_manager) + self.context = context.RequestContext(project=None, user=self.user) + for i in range(5): + name = 'project%s' % i + project = self.manager.create_project(name, 'netuser', name) + self.projects.append(project) + # create the necessary network data for the project + user_context = context.RequestContext(project=self.projects[i], + user=self.user) + network_ref = self.network.get_network(user_context) + self.network.set_network_host(context.get_admin_context(), + network_ref['id']) + instance_ref = self._create_instance(0) + self.instance_id = instance_ref['id'] + instance_ref = self._create_instance(1) + self.instance2_id = instance_ref['id'] + + def tearDown(self): + super(NetworkTestCase, self).tearDown() + # TODO(termie): this should really be instantiating clean datastores + # in between runs, one failure kills all the tests + db.instance_destroy(context.get_admin_context(), self.instance_id) + db.instance_destroy(context.get_admin_context(), self.instance2_id) + for project in self.projects: + self.manager.delete_project(project) + self.manager.delete_user(self.user) + + def _create_instance(self, project_num, mac=None): + if not mac: + mac = utils.generate_mac() + project = self.projects[project_num] + self.context._project = project + self.context.project_id = project.id + return db.instance_create(self.context, + {'project_id': project.id, + 'mac_address': mac}) + + def _create_address(self, project_num, instance_id=None): + """Create an address in given project num""" + if instance_id is None: + instance_id = self.instance_id + self.context._project = self.projects[project_num] + self.context.project_id = self.projects[project_num].id + return self.network.allocate_fixed_ip(self.context, instance_id) + + def _deallocate_address(self, project_num, address): + self.context._project = self.projects[project_num] + self.context.project_id = self.projects[project_num].id + self.network.deallocate_fixed_ip(self.context, address) + + def test_public_network_association(self): + """Makes sure that we can allocaate a public ip""" + # TODO(vish): better way of adding floating ips + self.context._project = self.projects[0] + self.context.project_id = self.projects[0].id + pubnet = IPy.IP(flags.FLAGS.floating_range) + address = str(pubnet[0]) + try: + db.floating_ip_get_by_address(context.get_admin_context(), address) + except exception.NotFound: + db.floating_ip_create(context.get_admin_context(), + {'address': address, + 'host': FLAGS.host}) + float_addr = self.network.allocate_floating_ip(self.context, + self.projects[0].id) + fix_addr = self._create_address(0) + lease_ip(fix_addr) + self.assertEqual(float_addr, str(pubnet[0])) + self.network.associate_floating_ip(self.context, float_addr, fix_addr) + address = db.instance_get_floating_address(context.get_admin_context(), + self.instance_id) + self.assertEqual(address, float_addr) + self.network.disassociate_floating_ip(self.context, float_addr) + address = db.instance_get_floating_address(context.get_admin_context(), + self.instance_id) + self.assertEqual(address, None) + self.network.deallocate_floating_ip(self.context, float_addr) + self.network.deallocate_fixed_ip(self.context, fix_addr) + release_ip(fix_addr) + db.floating_ip_destroy(context.get_admin_context(), float_addr) + + def test_allocate_deallocate_fixed_ip(self): + """Makes sure that we can allocate and deallocate a fixed ip""" + address = self._create_address(0) + self.assertTrue(is_allocated_in_project(address, self.projects[0].id)) + lease_ip(address) + self._deallocate_address(0, address) + + # Doesn't go away until it's dhcp released + self.assertTrue(is_allocated_in_project(address, self.projects[0].id)) + + release_ip(address) + self.assertFalse(is_allocated_in_project(address, self.projects[0].id)) + + def test_side_effects(self): + """Ensures allocating and releasing has no side effects""" + address = self._create_address(0) + address2 = self._create_address(1, self.instance2_id) + + self.assertTrue(is_allocated_in_project(address, self.projects[0].id)) + self.assertTrue(is_allocated_in_project(address2, self.projects[1].id)) + self.assertFalse(is_allocated_in_project(address, self.projects[1].id)) + + # Addresses are allocated before they're issued + lease_ip(address) + lease_ip(address2) + + self._deallocate_address(0, address) + release_ip(address) + self.assertFalse(is_allocated_in_project(address, self.projects[0].id)) + + # First address release shouldn't affect the second + self.assertTrue(is_allocated_in_project(address2, self.projects[1].id)) + + self._deallocate_address(1, address2) + release_ip(address2) + self.assertFalse(is_allocated_in_project(address2, + self.projects[1].id)) + + def test_subnet_edge(self): + """Makes sure that private ips don't overlap""" + first = self._create_address(0) + lease_ip(first) + instance_ids = [] + for i in range(1, 5): + instance_ref = self._create_instance(i, mac=utils.generate_mac()) + instance_ids.append(instance_ref['id']) + address = self._create_address(i, instance_ref['id']) + instance_ref = self._create_instance(i, mac=utils.generate_mac()) + instance_ids.append(instance_ref['id']) + address2 = self._create_address(i, instance_ref['id']) + instance_ref = self._create_instance(i, mac=utils.generate_mac()) + instance_ids.append(instance_ref['id']) + address3 = self._create_address(i, instance_ref['id']) + lease_ip(address) + lease_ip(address2) + lease_ip(address3) + self.context._project = self.projects[i] + self.context.project_id = self.projects[i].id + self.assertFalse(is_allocated_in_project(address, + self.projects[0].id)) + self.assertFalse(is_allocated_in_project(address2, + self.projects[0].id)) + self.assertFalse(is_allocated_in_project(address3, + self.projects[0].id)) + self.network.deallocate_fixed_ip(self.context, address) + self.network.deallocate_fixed_ip(self.context, address2) + self.network.deallocate_fixed_ip(self.context, address3) + release_ip(address) + release_ip(address2) + release_ip(address3) + for instance_id in instance_ids: + db.instance_destroy(context.get_admin_context(), instance_id) + self.context._project = self.projects[0] + self.context.project_id = self.projects[0].id + self.network.deallocate_fixed_ip(self.context, first) + self._deallocate_address(0, first) + release_ip(first) + + def test_vpn_ip_and_port_looks_valid(self): + """Ensure the vpn ip and port are reasonable""" + self.assert_(self.projects[0].vpn_ip) + self.assert_(self.projects[0].vpn_port >= FLAGS.vpn_start) + self.assert_(self.projects[0].vpn_port <= FLAGS.vpn_start + + FLAGS.num_networks) + + def test_too_many_networks(self): + """Ensure error is raised if we run out of networks""" + projects = [] + networks_left = (FLAGS.num_networks - + db.network_count(context.get_admin_context())) + for i in range(networks_left): + project = self.manager.create_project('many%s' % i, self.user) + projects.append(project) + db.project_get_network(context.get_admin_context(), project.id) + project = self.manager.create_project('last', self.user) + projects.append(project) + self.assertRaises(db.NoMoreNetworks, + db.project_get_network, + context.get_admin_context(), + project.id) + for project in projects: + self.manager.delete_project(project) + + def test_ips_are_reused(self): + """Makes sure that ip addresses that are deallocated get reused""" + address = self._create_address(0) + lease_ip(address) + self.network.deallocate_fixed_ip(self.context, address) + release_ip(address) + + address2 = self._create_address(0) + self.assertEqual(address, address2) + lease_ip(address) + self.network.deallocate_fixed_ip(self.context, address2) + release_ip(address) + + def test_available_ips(self): + """Make sure the number of available ips for the network is correct + + The number of available IP addresses depends on the test + environment's setup. + + Network size is set in test fixture's setUp method. + + There are ips reserved at the bottom and top of the range. + services (network, gateway, CloudPipe, broadcast) + """ + network = db.project_get_network(context.get_admin_context(), + self.projects[0].id) + net_size = flags.FLAGS.network_size + admin_context = context.get_admin_context() + total_ips = (db.network_count_available_ips(admin_context, + network['id']) + + db.network_count_reserved_ips(admin_context, + network['id']) + + db.network_count_allocated_ips(admin_context, + network['id'])) + self.assertEqual(total_ips, net_size) + + def test_too_many_addresses(self): + """Test for a NoMoreAddresses exception when all fixed ips are used. + """ + admin_context = context.get_admin_context() + network = db.project_get_network(admin_context, self.projects[0].id) + num_available_ips = db.network_count_available_ips(admin_context, + network['id']) + addresses = [] + instance_ids = [] + for i in range(num_available_ips): + instance_ref = self._create_instance(0) + instance_ids.append(instance_ref['id']) + address = self._create_address(0, instance_ref['id']) + addresses.append(address) + lease_ip(address) + + ip_count = db.network_count_available_ips(context.get_admin_context(), + network['id']) + self.assertEqual(ip_count, 0) + self.assertRaises(db.NoMoreAddresses, + self.network.allocate_fixed_ip, + self.context, + 'foo') + + for i in range(num_available_ips): + self.network.deallocate_fixed_ip(self.context, addresses[i]) + release_ip(addresses[i]) + db.instance_destroy(context.get_admin_context(), instance_ids[i]) + ip_count = db.network_count_available_ips(context.get_admin_context(), + network['id']) + self.assertEqual(ip_count, num_available_ips) + + +def is_allocated_in_project(address, project_id): + """Returns true if address is in specified project""" + project_net = db.project_get_network(context.get_admin_context(), + project_id) + network = db.fixed_ip_get_network(context.get_admin_context(), address) + instance = db.fixed_ip_get_instance(context.get_admin_context(), address) + # instance exists until release + return instance is not None and network['id'] == project_net['id'] + + +def binpath(script): + """Returns the absolute path to a script in bin""" + return os.path.abspath(os.path.join(__file__, "../../../bin", script)) + + +def lease_ip(private_ip): + """Run add command on dhcpbridge""" + network_ref = db.fixed_ip_get_network(context.get_admin_context(), + private_ip) + instance_ref = db.fixed_ip_get_instance(context.get_admin_context(), + private_ip) + cmd = "%s add %s %s fake" % (binpath('nova-dhcpbridge'), + instance_ref['mac_address'], + private_ip) + env = {'DNSMASQ_INTERFACE': network_ref['bridge'], + 'TESTING': '1', + 'FLAGFILE': FLAGS.dhcpbridge_flagfile} + (out, err) = utils.execute(cmd, addl_env=env) + logging.debug("ISSUE_IP: %s, %s ", out, err) + + +def release_ip(private_ip): + """Run del command on dhcpbridge""" + network_ref = db.fixed_ip_get_network(context.get_admin_context(), + private_ip) + instance_ref = db.fixed_ip_get_instance(context.get_admin_context(), + private_ip) + cmd = "%s del %s %s fake" % (binpath('nova-dhcpbridge'), + instance_ref['mac_address'], + private_ip) + env = {'DNSMASQ_INTERFACE': network_ref['bridge'], + 'TESTING': '1', + 'FLAGFILE': FLAGS.dhcpbridge_flagfile} + (out, err) = utils.execute(cmd, addl_env=env) + logging.debug("RELEASE_IP: %s, %s ", out, err) diff --git a/run_tests.sh b/run_tests.sh index a11dcd7cc..67214996d 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -36,7 +36,8 @@ done if [ $never_venv -eq 1 ]; then # Just run the test suites in current environment - python run_tests.py + rm -f nova.sqlite + nosetests -v exit fi @@ -47,7 +48,8 @@ if [ $force -eq 1 ]; then fi if [ -e ${venv} ]; then - ${with_venv} python run_tests.py $@ + ${with_venv} rm -f nova.sqlite + ${with_venv} nosetests -v $@ else if [ $always_venv -eq 1 ]; then # Automatically install the virtualenv @@ -59,9 +61,11 @@ else # Install the virtualenv and run the test suite in it python tools/install_venv.py else - python run_tests.py + rm -f nova.sqlite + nosetests -v exit fi fi - ${with_venv} python run_tests.py $@ + ${with_venv} rm -f nova.sqlite + ${with_venv} nosetests -v $@ fi -- cgit From ca81b0c12a3853942e9ce85154c38dad381ead0e Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Sat, 18 Dec 2010 00:50:49 +0000 Subject: fixed unittests and further clean-up post-eventlet merge --- nova/tests/xenapi_unittest.py | 48 +++++++++++++-------------------------- nova/virt/xenapi/__init__.py | 2 +- nova/virt/xenapi/network_utils.py | 2 +- nova/virt/xenapi/vm_utils.py | 3 +-- nova/virt/xenapi/vmops.py | 8 ++++--- nova/virt/xenapi/volume_utils.py | 2 +- nova/virt/xenapi/volumeops.py | 1 - nova/virt/xenapi_conn.py | 8 +++---- 8 files changed, 29 insertions(+), 45 deletions(-) diff --git a/nova/tests/xenapi_unittest.py b/nova/tests/xenapi_unittest.py index 839d6aa44..74401ce5d 100644 --- a/nova/tests/xenapi_unittest.py +++ b/nova/tests/xenapi_unittest.py @@ -78,13 +78,10 @@ class XenAPIVolumeTestCase(test.TrialTestCase): helper = volume_utils.VolumeHelper helper.XenAPI = session.get_imported_xenapi() vol = self._create_volume() - info = yield helper.parse_volume_info(vol['ec2_id'], '/dev/sdc') + info = helper.parse_volume_info(vol['ec2_id'], '/dev/sdc') label = 'SR-%s' % vol['ec2_id'] description = 'Test-SR' - sr_ref = helper.create_iscsi_storage_blocking(session, - info, - label, - description) + sr_ref = helper.create_iscsi_storage(session, info, label, description) srs = fake.get_all('SR') self.assertEqual(sr_ref, srs[0]) db.volume_destroy(context.get_admin_context(), vol['id']) @@ -97,13 +94,10 @@ class XenAPIVolumeTestCase(test.TrialTestCase): helper.XenAPI = session.get_imported_xenapi() vol = self._create_volume() # oops, wrong mount point! - info = helper.parse_volume_info(vol['ec2_id'], '/dev/sd') - - def check(exc): - """ handler """ - self.assertIsInstance(exc.value, volume_utils.StorageError) - - info.addErrback(check) + self.assertRaises(volume_utils.StorageError, + helper.parse_volume_info, + vol['ec2_id'], + '/dev/sd') db.volume_destroy(context.get_admin_context(), vol['id']) def test_attach_volume(self): @@ -116,8 +110,7 @@ class XenAPIVolumeTestCase(test.TrialTestCase): result = conn.attach_volume(instance.name, volume['ec2_id'], '/dev/sdc') - def check(_): - """ handler """ + def check(): # check that the VM has a VBD attached to it # Get XenAPI reference for the VM vms = fake.get_all('VM') @@ -127,8 +120,7 @@ class XenAPIVolumeTestCase(test.TrialTestCase): vm_ref = vbd['VM'] self.assertEqual(vm_ref, vms[0]) - result.addCallback(check) - return result + check() def test_attach_volume_raise_exception(self): """ This shows how to test when exceptions are raised """ @@ -138,17 +130,11 @@ class XenAPIVolumeTestCase(test.TrialTestCase): volume = self._create_volume() instance = db.instance_create(self.values) fake.create_vm(instance.name, 'Running') - result = conn.attach_volume(instance.name, volume['ec2_id'], - '/dev/sdc') - - def check_exception(exc): - """ handler """ - if exc: - pass - else: - self.fail('Oops, no exception has been raised!') - result.addErrback(check_exception) - return result + self.assertRaises(Exception, + conn.attach_volume, + instance.name, + volume['ec2_id'], + '/dev/sdc') def tearDown(self): super(XenAPIVolumeTestCase, self).tearDown() @@ -192,10 +178,9 @@ class XenAPIVMTestCase(test.TrialTestCase): } conn = xenapi_conn.get_connection(False) instance = db.instance_create(values) - result = conn.spawn(instance) + conn.spawn(instance) - def check(_): - """ handler """ + def check(): instances = conn.list_instances() self.assertEquals(instances, [1]) @@ -225,8 +210,7 @@ class XenAPIVMTestCase(test.TrialTestCase): # Check that the VM is running according to XenAPI. self.assertEquals(vm['power_state'], 'Running') - result.addCallback(check) - return result + check() def tearDown(self): super(XenAPIVMTestCase, self).tearDown() diff --git a/nova/virt/xenapi/__init__.py b/nova/virt/xenapi/__init__.py index c7038deae..c75162f08 100644 --- a/nova/virt/xenapi/__init__.py +++ b/nova/virt/xenapi/__init__.py @@ -20,7 +20,7 @@ """ -class HelperBase(): +class HelperBase(object): """ The base for helper classes. This adds the XenAPI class attribute """ diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py index c292383b6..e783120fe 100644 --- a/nova/virt/xenapi/network_utils.py +++ b/nova/virt/xenapi/network_utils.py @@ -29,7 +29,7 @@ class NetworkHelper(HelperBase): The class that wraps the helper methods together. """ def __init__(self): - return + super(NetworkHelper, self).__init__() @classmethod def find_network_with_bridge(cls, session, bridge): diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 57d419773..911fcc9b2 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -24,7 +24,6 @@ import urllib from xml.dom import minidom from nova import flags -from nova import utils from nova.auth.manager import AuthManager from nova.compute import instance_types from nova.compute import power_state @@ -48,7 +47,7 @@ class VMHelper(HelperBase): The class that wraps the helper methods together. """ def __init__(self): - return + super(VMHelper, self).__init__() @classmethod def create_vm(cls, session, instance, kernel, ramdisk): diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 0be4ed07d..aa3a3ae53 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -22,7 +22,6 @@ import logging from nova import db from nova import context -from nova import flags from nova import exception from nova import utils @@ -78,6 +77,7 @@ class VMOps(object): logging.info('Spawning VM %s created %s.', instance.name, vm_ref) + # NOTE(armando): Do we really need to do this in virt? timer = utils.LoopingCall(f=None) def _wait_for_boot(): @@ -88,7 +88,8 @@ class VMOps(object): if state == power_state.RUNNING: logging.debug('Instance %s: booted', instance['name']) timer.stop() - except: + except Exception, exc: + logging.warn(exc) logging.exception('instance %s: failed to boot', instance['name']) db.instance_set_state(context.get_admin_context(), @@ -131,6 +132,7 @@ class VMOps(object): self._session.wait_for_task(task) except self.XenAPI.Failure, exc: logging.warn(exc) + # VM Destroy try: task = self._session.call_xenapi('Async.VM.destroy', vm) self._session.wait_for_task(task) @@ -149,7 +151,7 @@ class VMOps(object): """Return data about VM diagnostics""" vm = VMHelper.lookup(self._session, instance_id) if vm is None: - raise exception.Exception("Instance not found %s" % instance_id) + raise exception.NotFound("Instance not found %s" % instance_id) rec = self._session.get_xenapi().VM.get_record(vm) return VMHelper.compile_diagnostics(self._session, rec) diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 255f23d88..5366078ce 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -43,7 +43,7 @@ class VolumeHelper(HelperBase): The class that wraps the helper methods together. """ def __init__(self): - return + super(VolumeHelper, self).__init__() @classmethod def create_iscsi_storage(cls, session, info, label, description): diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index 6c7516073..9dbb1bb14 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -19,7 +19,6 @@ Management class for Storage-related functions (attach, detach, etc). """ import logging -from nova import flags from nova.virt.xenapi.vm_utils import VMHelper from nova.virt.xenapi.volume_utils import VolumeHelper from nova.virt.xenapi.volume_utils import StorageError diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 207222744..2a8614cfd 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -119,11 +119,11 @@ class XenAPIConnection(object): def spawn(self, instance): """ Create VM instance """ - return self._vmops.spawn(instance) + self._vmops.spawn(instance) def reboot(self, instance): """ Reboot VM instance """ - return self._vmops.reboot(instance) + self._vmops.reboot(instance) def destroy(self, instance): """ Destroy VM instance """ @@ -143,13 +143,13 @@ class XenAPIConnection(object): def attach_volume(self, instance_name, device_path, mountpoint): """ Attach volume storage to VM instance """ - return self._volumeops.attach_volume(instance_name, + self._volumeops.attach_volume(instance_name, device_path, mountpoint) def detach_volume(self, instance_name, mountpoint): """ Detach volume storage to VM instance """ - return self._volumeops.detach_volume(instance_name, mountpoint) + self._volumeops.detach_volume(instance_name, mountpoint) class XenAPISession(object): -- cgit From c273c2b93471ad0d3ab4990458147c253d22bdc5 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 23 Dec 2010 08:57:04 -0800 Subject: Added reference in setup.py so that python setup.py test works now. --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index d88bc1e6f..1abf4d9fe 100644 --- a/setup.py +++ b/setup.py @@ -58,6 +58,7 @@ setup(name='nova', 'build_sphinx' : local_BuildDoc }, packages=find_packages(exclude=['bin', 'smoketests']), include_package_data=True, + test_suite='nose.collector', scripts=['bin/nova-api', 'bin/nova-compute', 'bin/nova-dhcpbridge', -- cgit