diff options
| author | Brian Waldon <bcwaldon@gmail.com> | 2012-01-13 11:22:50 -0800 |
|---|---|---|
| committer | Brian Waldon <bcwaldon@gmail.com> | 2012-01-13 16:59:50 -0800 |
| commit | bb10721ffc14abb86ab4d58b2b30fc676e88d394 (patch) | |
| tree | 22da01aef372d76efd580a6d6afe495bed23dd5c | |
| parent | a51c93ab3c646642330c4eeba3ede0772a8ab734 (diff) | |
| download | nova-bb10721ffc14abb86ab4d58b2b30fc676e88d394.tar.gz nova-bb10721ffc14abb86ab4d58b2b30fc676e88d394.tar.xz nova-bb10721ffc14abb86ab4d58b2b30fc676e88d394.zip | |
Convert nova.volume.api.API to use volume objects
Change-Id: If6b78f7de814116bc93b273ec300dba02e63593d
| -rw-r--r-- | nova/api/ec2/cloud.py | 51 | ||||
| -rw-r--r-- | nova/api/openstack/compute/contrib/virtual_storage_arrays.py | 7 | ||||
| -rw-r--r-- | nova/api/openstack/compute/contrib/volumes.py | 12 | ||||
| -rw-r--r-- | nova/api/openstack/volume/snapshots.py | 12 | ||||
| -rw-r--r-- | nova/api/openstack/volume/volumes.py | 37 | ||||
| -rw-r--r-- | nova/compute/api.py | 14 | ||||
| -rw-r--r-- | nova/compute/manager.py | 83 | ||||
| -rw-r--r-- | nova/tests/api/ec2/test_cloud.py | 1 | ||||
| -rw-r--r-- | nova/tests/api/openstack/compute/contrib/test_vsa.py | 13 | ||||
| -rw-r--r-- | nova/tests/test_compute.py | 40 | ||||
| -rw-r--r-- | nova/tests/test_quota.py | 12 | ||||
| -rw-r--r-- | nova/tests/test_volume.py | 81 | ||||
| -rw-r--r-- | nova/tests/test_vsa.py | 2 | ||||
| -rw-r--r-- | nova/tests/test_vsa_volumes.py | 13 | ||||
| -rw-r--r-- | nova/volume/api.py | 88 | ||||
| -rw-r--r-- | nova/vsa/api.py | 4 | ||||
| -rw-r--r-- | nova/vsa/manager.py | 2 |
17 files changed, 232 insertions, 240 deletions
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 646fd42bd..8049d3bd7 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -204,9 +204,8 @@ class CloudController(object): self.image_service = s3.S3ImageService() self.network_api = network.API() self.volume_api = volume.API() - self.compute_api = compute.API( - network_api=self.network_api, - volume_api=self.volume_api) + self.compute_api = compute.API(network_api=self.network_api, + volume_api=self.volume_api) self.setup() def __str__(self): @@ -360,16 +359,18 @@ class CloudController(object): LOG.audit(_("Create snapshot of volume %s"), volume_id, context=context) volume_id = ec2utils.ec2_id_to_id(volume_id) + volume = self.volume_api.get(context, volume_id) snapshot = self.volume_api.create_snapshot( context, - volume_id=volume_id, - name=kwargs.get('display_name'), - description=kwargs.get('display_description')) + volume, + kwargs.get('display_name'), + kwargs.get('display_description')) return self._format_snapshot(context, snapshot) def delete_snapshot(self, context, snapshot_id, **kwargs): snapshot_id = ec2utils.ec2_id_to_id(snapshot_id) - self.volume_api.delete_snapshot(context, snapshot_id=snapshot_id) + snapshot = self.volume_api.get_snapshot(context, snapshot_id) + self.volume_api.delete_snapshot(context, snapshot) return True def describe_key_pairs(self, context, key_name=None, **kwargs): @@ -857,7 +858,7 @@ class CloudController(object): volumes = [] for ec2_id in volume_id: internal_id = ec2utils.ec2_id_to_id(ec2_id) - volume = self.volume_api.get(context, volume_id=internal_id) + volume = self.volume_api.get(context, internal_id) volumes.append(volume) else: volumes = self.volume_api.get_all(context) @@ -907,18 +908,18 @@ class CloudController(object): size = kwargs.get('size') if kwargs.get('snapshot_id') is not None: snapshot_id = ec2utils.ec2_id_to_id(kwargs['snapshot_id']) + snapshot = self.volume_api.get_snapshot(context, snapshot_id) LOG.audit(_("Create volume from snapshot %s"), snapshot_id, context=context) else: - snapshot_id = None + snapshot = None LOG.audit(_("Create volume of %s GB"), size, context=context) - volume = self.volume_api.create( - context, - size=size, - snapshot_id=snapshot_id, - name=kwargs.get('display_name'), - description=kwargs.get('display_description')) + volume = self.volume_api.create(context, + size, + kwargs.get('display_name'), + kwargs.get('display_description'), + snapshot) # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. @@ -926,7 +927,8 @@ class CloudController(object): def delete_volume(self, context, volume_id, **kwargs): volume_id = ec2utils.ec2_id_to_id(volume_id) - self.volume_api.delete(context, volume_id=volume_id) + volume = self.volume_api.get(context, volume_id) + self.volume_api.delete(context, volume) return True def update_volume(self, context, volume_id, **kwargs): @@ -937,9 +939,8 @@ class CloudController(object): if field in kwargs: changes[field] = kwargs[field] if changes: - self.volume_api.update(context, - volume_id=volume_id, - fields=changes) + volume = self.volume_api.get(context, volume_id) + self.volume_api.update(context, volume, fields=changes) return True def attach_volume(self, context, volume_id, instance_id, device, **kwargs): @@ -950,7 +951,7 @@ class CloudController(object): " at %(device)s") % locals() LOG.audit(msg, context=context) self.compute_api.attach_volume(context, instance, volume_id, device) - volume = self.volume_api.get(context, volume_id=volume_id) + volume = self.volume_api.get(context, volume_id) return {'attachTime': volume['attach_time'], 'device': volume['mountpoint'], 'instanceId': ec2utils.id_to_ec2_id(instance_id), @@ -961,7 +962,7 @@ class CloudController(object): def detach_volume(self, context, volume_id, **kwargs): volume_id = ec2utils.ec2_id_to_id(volume_id) LOG.audit(_("Detach volume %s"), volume_id, context=context) - volume = self.volume_api.get(context, volume_id=volume_id) + volume = self.volume_api.get(context, volume_id) instance = self.compute_api.detach_volume(context, volume_id=volume_id) return {'attachTime': volume['attach_time'], 'device': volume['mountpoint'], @@ -1089,7 +1090,7 @@ class CloudController(object): assert not bdm['virtual_name'] root_device_type = 'ebs' - vol = self.volume_api.get(context, volume_id=volume_id) + vol = self.volume_api.get(context, volume_id) LOG.debug(_("vol = %s\n"), vol) # TODO(yamahata): volume attach time ebs = {'volumeId': volume_id, @@ -1624,13 +1625,13 @@ class CloudController(object): volume_id = m.get('volume_id') if m.get('snapshot_id') and volume_id: # create snapshot based on volume_id - vol = self.volume_api.get(context, volume_id=volume_id) + volume = self.volume_api.get(context, volume_id) # NOTE(yamahata): Should we wait for snapshot creation? # Linux LVM snapshot creation completes in # short time, it doesn't matter for now. snapshot = self.volume_api.create_snapshot_force( - context, volume_id=volume_id, name=vol['display_name'], - description=vol['display_description']) + context, volume, volume['display_name'], + volume['display_description']) m['snapshot_id'] = snapshot['id'] del m['volume_id'] diff --git a/nova/api/openstack/compute/contrib/virtual_storage_arrays.py b/nova/api/openstack/compute/contrib/virtual_storage_arrays.py index 440be6ad4..10e8a1001 100644 --- a/nova/api/openstack/compute/contrib/virtual_storage_arrays.py +++ b/nova/api/openstack/compute/contrib/virtual_storage_arrays.py @@ -278,7 +278,7 @@ class VsaVolumeDriveController(volumes.VolumeController): def _check_volume_ownership(self, context, vsa_id, id): obj = self.object try: - volume_ref = self.volume_api.get(context, volume_id=id) + volume_ref = self.volume_api.get(context, id) except exception.NotFound: LOG.error(_("%(obj)s with ID %(id)s not found"), locals()) raise @@ -333,9 +333,9 @@ class VsaVolumeDriveController(volumes.VolumeController): new_volume = self.volume_api.create(context, size, - None, vol.get('displayName'), vol.get('displayDescription'), + None, volume_type=volume_type, metadata=dict(from_vsa_id=str(vsa_id))) @@ -371,7 +371,8 @@ class VsaVolumeDriveController(volumes.VolumeController): locals(), context=context) try: - self.volume_api.update(context, volume_id=id, fields=changes) + volume = self.volume_api.get(context, id) + self.volume_api.update(context, volume, fields=changes) except exception.NotFound: raise exc.HTTPNotFound() return webob.Response(status_int=202) diff --git a/nova/api/openstack/compute/contrib/volumes.py b/nova/api/openstack/compute/contrib/volumes.py index 972c000ef..4509a4e1f 100644 --- a/nova/api/openstack/compute/contrib/volumes.py +++ b/nova/api/openstack/compute/contrib/volumes.py @@ -145,7 +145,8 @@ class VolumeController(object): LOG.audit(_("Delete volume with id: %s"), id, context=context) try: - self.volume_api.delete(context, volume_id=id) + volume = self.volume_api.get(context, id) + self.volume_api.delete(context, volume) except exception.NotFound: raise exc.HTTPNotFound() return webob.Response(status_int=202) @@ -191,10 +192,17 @@ class VolumeController(object): metadata = vol.get('metadata', None) + snapshot_id = vol.get('snapshot_id'), + + if snapshot_id is not None: + snapshot = self.volume_api.get_snapshot(context, snapshot_id) + else: + snapshot = None + new_volume = self.volume_api.create(context, size, - vol.get('snapshot_id'), vol.get('display_name'), vol.get('display_description'), + snapshot=snapshot, volume_type=vol_type, metadata=metadata) diff --git a/nova/api/openstack/volume/snapshots.py b/nova/api/openstack/volume/snapshots.py index d86332ba8..ec12c052b 100644 --- a/nova/api/openstack/volume/snapshots.py +++ b/nova/api/openstack/volume/snapshots.py @@ -82,7 +82,8 @@ class SnapshotsController(object): LOG.audit(_("Delete snapshot with id: %s"), id, context=context) try: - self.volume_api.delete_snapshot(context, snapshot_id=id) + snapshot = self.volume_api.get_snapshot(context, id) + self.volume_api.delete_snapshot(context, snapshot) except exception.NotFound: return exc.HTTPNotFound() return webob.Response(status_int=202) @@ -113,18 +114,19 @@ class SnapshotsController(object): snapshot = body['snapshot'] volume_id = snapshot['volume_id'] + volume = self.volume_api.get(context, volume_id) force = snapshot.get('force', False) - LOG.audit(_("Create snapshot from volume %s"), volume_id, - context=context) + msg = _("Create snapshot from volume %s") + LOG.audit(msg, volume_id, context=context) if force: new_snapshot = self.volume_api.create_snapshot_force(context, - volume_id, + volume, snapshot.get('display_name'), snapshot.get('display_description')) else: new_snapshot = self.volume_api.create_snapshot(context, - volume_id, + volume, snapshot.get('display_name'), snapshot.get('display_description')) diff --git a/nova/api/openstack/volume/volumes.py b/nova/api/openstack/volume/volumes.py index cdecb967a..6783244d6 100644 --- a/nova/api/openstack/volume/volumes.py +++ b/nova/api/openstack/volume/volumes.py @@ -106,7 +106,8 @@ class VolumeController(object): LOG.audit(_("Delete volume with id: %s"), id, context=context) try: - self.volume_api.delete(context, volume_id=id) + volume = self.volume_api.get(context, id) + self.volume_api.delete(context, volume) except exception.NotFound: raise exc.HTTPNotFound() return webob.Response(status_int=202) @@ -135,26 +136,34 @@ class VolumeController(object): if not body: raise exc.HTTPUnprocessableEntity() - vol = body['volume'] - size = vol['size'] + volume = body['volume'] + size = volume['size'] LOG.audit(_("Create volume of %s GB"), size, context=context) - vol_type = vol.get('volume_type', None) - if vol_type: + kwargs = {} + + req_volume_type = volume.get('volume_type', None) + if req_volume_type: try: - vol_type = volume_types.get_volume_type_by_name(context, - vol_type) + kwargs['volume_type'] = volume_types.get_volume_type_by_name( + context, req_volume_type) except exception.NotFound: raise exc.HTTPNotFound() - metadata = vol.get('metadata', None) + kwargs['metadata'] = volume.get('metadata', None) + + snapshot_id = volume.get('snapshot_id') + if snapshot_id is not None: + snapshot = self.volume_api.get_snapshot(context, snapshot_id) + else: + snapshot = None - new_volume = self.volume_api.create(context, size, - vol.get('snapshot_id'), - vol.get('display_name'), - vol.get('display_description'), - volume_type=vol_type, - metadata=metadata) + new_volume = self.volume_api.create(context, + size, + snapshot, + volume.get('display_name'), + volume.get('display_description'), + **kwargs) # Work around problem that instance is lazy-loaded... new_volume = self.volume_api.get(context, new_volume['id']) diff --git a/nova/compute/api.py b/nova/compute/api.py index 0ed562a1b..77a377368 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -111,12 +111,8 @@ class API(base.Base): self.image_service = image_service or \ nova.image.get_default_image_service() - if not network_api: - network_api = network.API() - self.network_api = network_api - if not volume_api: - volume_api = volume.API() - self.volume_api = volume_api + self.network_api = network_api or network.API() + self.volume_api = volume_api or volume.API() super(API, self).__init__(**kwargs) def _check_injected_file_quota(self, context, injected_files): @@ -1644,7 +1640,8 @@ class API(base.Base): if not re.match("^/dev/x{0,1}[a-z]d[a-z]+$", device): raise exception.ApiError(_("Invalid device specified: %s. " "Example device: /dev/vdb") % device) - self.volume_api.check_attach(context, volume_id=volume_id) + volume = self.volume_api.get(context, volume_id) + self.volume_api.check_attach(context, volume) host = instance['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), @@ -1661,7 +1658,8 @@ class API(base.Base): check_policy(context, 'detach_volume', instance) - self.volume_api.check_detach(context, volume_id=volume_id) + volume = self.volume_api.get(context, volume_id) + self.volume_api.check_detach(context, volume) host = instance['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 2e24ffbae..cca6d4913 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -301,21 +301,24 @@ class ComputeManager(manager.SchedulerDependentManager): if ((bdm['snapshot_id'] is not None) and (bdm['volume_id'] is None)): # TODO(yamahata): default name and description + snapshot = self.volume_api.get_snapshot(context, + bdm['snapshot_id']) vol = self.volume_api.create(context, bdm['volume_size'], - bdm['snapshot_id'], '', '') + '', '', snapshot) # TODO(yamahata): creating volume simultaneously # reduces creation time? - self.volume_api.wait_creation(context, vol['id']) + self.volume_api.wait_creation(context, vol) self.db.block_device_mapping_update( context, bdm['id'], {'volume_id': vol['id']}) bdm['volume_id'] = vol['id'] if bdm['volume_id'] is not None: - self.volume_api.check_attach(context, - volume_id=bdm['volume_id']) - cinfo = self._attach_volume_boot(context, instance, - bdm['volume_id'], - bdm['device_name']) + volume = self.volume_api.get(context, bdm['volume_id']) + self.volume_api.check_attach(context, volume) + cinfo = self._attach_volume_boot(context, + instance, + volume, + bdm['device_name']) self.db.block_device_mapping_update( context, bdm['id'], {'connection_info': utils.dumps(cinfo)}) @@ -597,10 +600,11 @@ class ComputeManager(manager.SchedulerDependentManager): try: # NOTE(vish): actual driver detach done in driver.destroy, so # just tell nova-volume that we are done with it. + volume = self.volume_api.get(context, bdm['volume_id']) self.volume_api.terminate_connection(context, - bdm['volume_id'], + volume, FLAGS.my_ip) - self.volume_api.detach(context, bdm['volume_id']) + self.volume_api.detach(context, volume) except exception.DiskNotFound as exc: LOG.warn(_("Ignoring DiskNotFound: %s") % exc) @@ -610,7 +614,8 @@ class ComputeManager(manager.SchedulerDependentManager): for bdm in bdms: LOG.debug(_("terminating bdm %s") % bdm) if bdm['volume_id'] and bdm['delete_on_termination']: - self.volume_api.delete(context, bdm['volume_id']) + volume = self.volume_api.get(context, bdm['volume_id']) + self.volume_api.delete(context, volume) # NOTE(vish): bdms will be deleted on instance destroy def _delete_instance(self, context, instance): @@ -1487,21 +1492,22 @@ class ComputeManager(manager.SchedulerDependentManager): instance_ref = self.db.instance_get_by_uuid(context, instance_uuid) return self.driver.get_vnc_console(instance_ref) - def _attach_volume_boot(self, context, instance, volume_id, mountpoint): + def _attach_volume_boot(self, context, instance, volume, mountpoint): """Attach a volume to an instance at boot time. So actual attach is done by instance creation""" instance_id = instance['id'] instance_uuid = instance['uuid'] + volume_id = volume['id'] context = context.elevated() - LOG.audit(_("instance %(instance_uuid)s: booting with " - "volume %(volume_id)s at %(mountpoint)s") % - locals(), context=context) + msg = _("instance %(instance_uuid)s: booting with " + "volume %(volume_id)s at %(mountpoint)s") + LOG.audit(msg % locals(), context=context) address = FLAGS.my_ip connection_info = self.volume_api.initialize_connection(context, - volume_id, + volume, address) - self.volume_api.attach(context, volume_id, instance_id, mountpoint) + self.volume_api.attach(context, volume, instance_id, mountpoint) return connection_info @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @@ -1509,15 +1515,16 @@ class ComputeManager(manager.SchedulerDependentManager): @wrap_instance_fault def attach_volume(self, context, instance_uuid, volume_id, mountpoint): """Attach a volume to an instance.""" + volume = self.volume_api.get(context, volume_id) context = context.elevated() instance_ref = self.db.instance_get_by_uuid(context, instance_uuid) instance_id = instance_ref['id'] - LOG.audit( - _("instance %(instance_uuid)s: attaching volume %(volume_id)s" - " to %(mountpoint)s") % locals(), context=context) + msg = _("instance %(instance_uuid)s: attaching volume %(volume_id)s" + " to %(mountpoint)s") + LOG.audit(msg % locals(), context=context) address = FLAGS.my_ip connection_info = self.volume_api.initialize_connection(context, - volume_id, + volume, address) try: self.driver.attach_volume(connection_info, @@ -1525,13 +1532,14 @@ class ComputeManager(manager.SchedulerDependentManager): mountpoint) except Exception: # pylint: disable=W0702 with utils.save_and_reraise_exception(): - LOG.exception(_("instance %(instance_uuid)s: attach failed" - " %(mountpoint)s, removing") % locals(), - context=context) - self.volume_api.terminate_connection(context, volume_id, + msg = _("instance %(instance_uuid)s: attach failed" + " %(mountpoint)s, removing") + LOG.exception(msg % locals(), context=context) + self.volume_api.terminate_connection(context, + volume, address) - self.volume_api.attach(context, volume_id, instance_id, mountpoint) + self.volume_api.attach(context, volume, instance_id, mountpoint) values = { 'instance_id': instance_id, 'connection_info': utils.dumps(connection_info), @@ -1571,8 +1579,9 @@ class ComputeManager(manager.SchedulerDependentManager): instance_id = instance_ref['id'] bdm = self._get_instance_volume_bdm(context, instance_id, volume_id) self._detach_volume(context, instance_ref, bdm) - self.volume_api.terminate_connection(context, volume_id, FLAGS.my_ip) - self.volume_api.detach(context.elevated(), volume_id) + volume = self.volume_api.get(context, volume_id) + self.volume_api.terminate_connection(context, volume, FLAGS.my_ip) + self.volume_api.detach(context.elevated(), volume) self.db.block_device_mapping_destroy_by_instance_and_volume( context, instance_id, volume_id) return True @@ -1588,11 +1597,9 @@ class ComputeManager(manager.SchedulerDependentManager): bdm = self._get_instance_volume_bdm(context, instance_id, volume_id) - self._detach_volume(context, instance_ref, - bdm['volume_id'], bdm['device_name']) - self.volume_api.terminate_connection(context, - volume_id, - FLAGS.my_ip) + self._detach_volume(context, instance_ref, bdm) + volume = self.volume_api.get(context, volume_id) + self.volume_api.terminate_connection(context, volume, FLAGS.my_ip) except exception.NotFound: pass @@ -1863,8 +1870,7 @@ class ComputeManager(manager.SchedulerDependentManager): # Restore volume state for volume_ref in instance_ref['volumes']: - volume_id = volume_ref['id'] - self.db.volume_update(ctxt, volume_id, {'status': 'in-use'}) + self.volume_api.update(ctxt, volume_ref, {'status': 'in-use'}) # No instance booting at source host, but instance dir # must be deleted for preparing next block migration @@ -1922,9 +1928,12 @@ class ComputeManager(manager.SchedulerDependentManager): for bdm in self._get_instance_volume_bdms(context, instance_ref['id']): volume_id = bdm['volume_id'] - self.db.volume_update(context, volume_id, {'status': 'in-use'}) - self.volume_api.remove_from_compute(context, instance_ref['id'], - volume_id, dest) + volume = self.volume_api.get(context, volume_id) + self.volume_api.update(context, volume, {'status': 'in-use'}) + self.volume_api.remove_from_compute(context, + volume, + instance_ref['id'], + dest) # Block migration needs empty image at destination host # before migration starts, so if any failure occurs, diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py index 9af1c14c8..d4e4d7d1a 100644 --- a/nova/tests/api/ec2/test_cloud.py +++ b/nova/tests/api/ec2/test_cloud.py @@ -1735,6 +1735,7 @@ class CloudTestCase(test.TestCase): for snapshot_id in (ec2_snapshot1_id, ec2_snapshot2_id): self.cloud.delete_snapshot(self.context, snapshot_id) + db.volume_destroy(self.context, vol['id']) def test_create_image(self): diff --git a/nova/tests/api/openstack/compute/contrib/test_vsa.py b/nova/tests/api/openstack/compute/contrib/test_vsa.py index e19aeedba..ce5ada2a6 100644 --- a/nova/tests/api/openstack/compute/contrib/test_vsa.py +++ b/nova/tests/api/openstack/compute/contrib/test_vsa.py @@ -251,24 +251,25 @@ def stub_get_vsa_volume_type(self, context): 'extra_specs': {'type': 'vsa_volume'}} -def stub_volume_create(self, context, size, snapshot_id, name, description, +def stub_volume_create(self, context, size, name, description, snapshot, **param): LOG.debug(_("_create: param=%s"), size) vol = _get_default_volume_param() vol['size'] = size vol['display_name'] = name vol['display_description'] = description - vol['snapshot_id'] = snapshot_id + try: + vol['snapshot_id'] = snapshot['id'] + except (KeyError, TypeError): + vol['snapshot_id'] = None return vol -def stub_volume_update(self, context, **param): - LOG.debug(_("_volume_update: param=%s"), param) +def stub_volume_update(self, context, *args, **param): pass -def stub_volume_delete(self, context, **param): - LOG.debug(_("_volume_delete: param=%s"), param) +def stub_volume_delete(self, context, *args, **param): pass diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index bf36e7431..06eaf8ef5 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -2828,42 +2828,6 @@ class ComputeAPITestCase(BaseTestCase): None, '/dev/invalid') - def test_attach_volume(self): - instance_id = 1 - instance_uuid = utils.gen_uuid() - volume_id = 1 - - for device in ('/dev/sda', '/dev/xvda'): - # creating mocks - self.mox.StubOutWithMock(self.compute_api.volume_api, - 'check_attach') - self.mox.StubOutWithMock(self.compute_api, 'get') - self.mox.StubOutWithMock(rpc, 'cast') - - rpc.cast( - mox.IgnoreArg(), - mox.IgnoreArg(), {"method": "attach_volume", - "args": {'volume_id': volume_id, - 'instance_uuid': instance_uuid, - 'mountpoint': device}}) - - self.compute_api.volume_api.check_attach( - mox.IgnoreArg(), - volume_id=volume_id).AndReturn( - {'id': volume_id, 'status': 'available', - 'attach_status': 'detached'}) - - self.compute_api.get( - mox.IgnoreArg(), - mox.IgnoreArg()).AndReturn({ - 'id': instance_id, - 'uuid': instance_uuid, - 'host': 'fake'}) - - self.mox.ReplayAll() - self.compute_api.attach_volume(None, None, volume_id, device) - self.mox.UnsetStubs() - def test_vnc_console(self): """Make sure we can a vnc console for an instance.""" def vnc_rpc_call_wrapper(*args, **kwargs): @@ -2896,6 +2860,10 @@ class ComputeAPITestCase(BaseTestCase): def fake_check_attach(*args, **kwargs): pass + def fake_volume_get(self, context, volume_id): + return {'id': volume_id} + + self.stubs.Set(nova.volume.api.API, 'get', fake_volume_get) self.stubs.Set(nova.volume.api.API, 'check_attach', fake_check_attach) instance = self._create_fake_instance() diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py index 83c9e36df..6f2f1161b 100644 --- a/nova/tests/test_quota.py +++ b/nova/tests/test_quota.py @@ -251,11 +251,7 @@ class QuotaTestCase(test.TestCase): volume_ids.append(volume_id) self.assertRaises(exception.QuotaError, volume.API().create, - self.context, - size=10, - snapshot_id=None, - name='', - description='') + self.context, 10, '', '', None) for volume_id in volume_ids: db.volume_destroy(self.context, volume_id) @@ -265,11 +261,7 @@ class QuotaTestCase(test.TestCase): volume_ids.append(volume_id) self.assertRaises(exception.QuotaError, volume.API().create, - self.context, - size=10, - snapshot_id=None, - name='', - description='') + self.context, 10, '', '', None) for volume_id in volume_ids: db.volume_destroy(self.context, volume_id) diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py index 6c81facf8..aeefcd020 100644 --- a/nova/tests/test_volume.py +++ b/nova/tests/test_volume.py @@ -30,7 +30,7 @@ from nova import log as logging from nova import rpc from nova import test from nova import utils -from nova import volume +import nova.volume.api FLAGS = flags.FLAGS LOG = logging.getLogger('nova.tests.volume') @@ -62,11 +62,12 @@ class VolumeTestCase(test.TestCase): vol['availability_zone'] = FLAGS.storage_availability_zone vol['status'] = "creating" vol['attach_status'] = "detached" - return db.volume_create(context.get_admin_context(), vol)['id'] + return db.volume_create(context.get_admin_context(), vol) def test_create_delete_volume(self): """Test volume can be created and deleted.""" - volume_id = self._create_volume() + volume = self._create_volume() + volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) self.assertEqual(volume_id, db.volume_get(context.get_admin_context(), volume_id).id) @@ -79,22 +80,24 @@ class VolumeTestCase(test.TestCase): def test_create_volume_from_snapshot(self): """Test volume can be created from a snapshot.""" - volume_src_id = self._create_volume() - self.volume.create_volume(self.context, volume_src_id) - snapshot_id = self._create_snapshot(volume_src_id) - self.volume.create_snapshot(self.context, volume_src_id, snapshot_id) - volume_dst_id = self._create_volume(0, snapshot_id) - self.volume.create_volume(self.context, volume_dst_id, snapshot_id) - self.assertEqual(volume_dst_id, db.volume_get( - context.get_admin_context(), - volume_dst_id).id) + volume_src = self._create_volume() + self.volume.create_volume(self.context, volume_src['id']) + snapshot_id = self._create_snapshot(volume_src['id']) + self.volume.create_snapshot(self.context, volume_src['id'], + snapshot_id) + volume_dst = self._create_volume(0, snapshot_id) + self.volume.create_volume(self.context, volume_dst['id'], snapshot_id) + self.assertEqual(volume_dst['id'], + db.volume_get( + context.get_admin_context(), + volume_dst['id']).id) self.assertEqual(snapshot_id, db.volume_get( context.get_admin_context(), - volume_dst_id).snapshot_id) + volume_dst['id']).snapshot_id) - self.volume.delete_volume(self.context, volume_dst_id) + self.volume.delete_volume(self.context, volume_dst['id']) self.volume.delete_snapshot(self.context, snapshot_id) - self.volume.delete_volume(self.context, volume_src_id) + self.volume.delete_volume(self.context, volume_src['id']) def test_too_big_volume(self): """Ensure failure if a too large of a volume is requested.""" @@ -102,8 +105,8 @@ class VolumeTestCase(test.TestCase): # volume_create return True try: - volume_id = self._create_volume('1001') - self.volume.create_volume(self.context, volume_id) + volume = self._create_volume('1001') + self.volume.create_volume(self.context, volume) self.fail("Should have thrown TypeError") except TypeError: pass @@ -113,15 +116,15 @@ class VolumeTestCase(test.TestCase): vols = [] total_slots = FLAGS.iscsi_num_targets for _index in xrange(total_slots): - volume_id = self._create_volume() - self.volume.create_volume(self.context, volume_id) - vols.append(volume_id) - volume_id = self._create_volume() + volume = self._create_volume() + self.volume.create_volume(self.context, volume['id']) + vols.append(volume['id']) + volume = self._create_volume() self.assertRaises(db.NoMoreTargets, self.volume.create_volume, self.context, - volume_id) - db.volume_destroy(context.get_admin_context(), volume_id) + volume['id']) + db.volume_destroy(context.get_admin_context(), volume['id']) for volume_id in vols: self.volume.delete_volume(self.context, volume_id) @@ -137,7 +140,8 @@ class VolumeTestCase(test.TestCase): inst['ami_launch_index'] = 0 instance_id = db.instance_create(self.context, inst)['id'] mountpoint = "/dev/sdf" - volume_id = self._create_volume() + volume = self._create_volume() + volume_id = volume['id'] self.volume.create_volume(self.context, volume_id) if FLAGS.fake_tests: db.volume_attached(self.context, volume_id, instance_id, @@ -190,8 +194,8 @@ class VolumeTestCase(test.TestCase): LOG.debug(_("Target %s allocated"), iscsi_target) total_slots = FLAGS.iscsi_num_targets for _index in xrange(total_slots): - volume_id = self._create_volume() - d = self.volume.create_volume(self.context, volume_id) + volume = self._create_volume() + d = self.volume.create_volume(self.context, volume['id']) _check(d) for volume_id in volume_ids: self.volume.delete_volume(self.context, volume_id) @@ -215,10 +219,10 @@ class VolumeTestCase(test.TestCase): def test_create_delete_snapshot(self): """Test snapshot can be created and deleted.""" - volume_id = self._create_volume() - self.volume.create_volume(self.context, volume_id) - snapshot_id = self._create_snapshot(volume_id) - self.volume.create_snapshot(self.context, volume_id, snapshot_id) + volume = self._create_volume() + self.volume.create_volume(self.context, volume['id']) + snapshot_id = self._create_snapshot(volume['id']) + self.volume.create_snapshot(self.context, volume['id'], snapshot_id) self.assertEqual(snapshot_id, db.snapshot_get(context.get_admin_context(), snapshot_id).id) @@ -228,7 +232,7 @@ class VolumeTestCase(test.TestCase): db.snapshot_get, self.context, snapshot_id) - self.volume.delete_volume(self.context, volume_id) + self.volume.delete_volume(self.context, volume['id']) def test_create_snapshot_force(self): """Test snapshot in use can be created forcibly.""" @@ -237,22 +241,23 @@ class VolumeTestCase(test.TestCase): pass self.stubs.Set(rpc, 'cast', fake_cast) - volume_id = self._create_volume() - self.volume.create_volume(self.context, volume_id) - db.volume_attached(self.context, volume_id, self.instance_id, + volume = self._create_volume() + self.volume.create_volume(self.context, volume['id']) + db.volume_attached(self.context, volume['id'], self.instance_id, '/dev/sda1') - volume_api = volume.api.API() + volume_api = nova.volume.api.API() + volume = volume_api.get(self.context, volume['id']) self.assertRaises(exception.ApiError, volume_api.create_snapshot, - self.context, volume_id, + self.context, volume, 'fake_name', 'fake_description') snapshot_ref = volume_api.create_snapshot_force(self.context, - volume_id, + volume, 'fake_name', 'fake_description') db.snapshot_destroy(self.context, snapshot_ref['id']) - db.volume_destroy(self.context, volume_id) + db.volume_destroy(self.context, volume['id']) class DriverTestCase(test.TestCase): diff --git a/nova/tests/test_vsa.py b/nova/tests/test_vsa.py index d7c8f09f5..132874616 100644 --- a/nova/tests/test_vsa.py +++ b/nova/tests/test_vsa.py @@ -24,7 +24,6 @@ from nova import flags from nova import log as logging from nova import test from nova import vsa -from nova import volume from nova.volume import volume_types from nova.vsa import utils as vsa_utils @@ -40,7 +39,6 @@ class VsaTestCase(test.TestCase): super(VsaTestCase, self).setUp() self.stubs = stubout.StubOutForTesting() self.vsa_api = vsa.API() - self.volume_api = volume.API() FLAGS.quota_volumes = 100 FLAGS.quota_gigabytes = 10000 diff --git a/nova/tests/test_vsa_volumes.py b/nova/tests/test_vsa_volumes.py index ef6396ff4..7840dd481 100644 --- a/nova/tests/test_vsa_volumes.py +++ b/nova/tests/test_vsa_volumes.py @@ -56,7 +56,7 @@ class VsaVolumesTestCase(test.TestCase): def _default_volume_param(self): return { 'size': 1, - 'snapshot_id': None, + 'snapshot': None, 'name': 'Test volume name', 'description': 'Test volume desc name', 'volume_type': self.default_vol_type, @@ -95,8 +95,10 @@ class VsaVolumesTestCase(test.TestCase): 'creating') self.volume_api.update(self.context, - volume_ref['id'], {'status': 'available'}) - self.volume_api.delete(self.context, volume_ref['id']) + volume_ref, + {'status': 'available'}) + volume_ref = self.volume_api.get(self.context, volume_ref['id']) + self.volume_api.delete(self.context, volume_ref) vols3 = self._get_all_volumes_by_vsa() self.assertEqual(1, len(vols2)) @@ -110,10 +112,11 @@ class VsaVolumesTestCase(test.TestCase): volume_ref = self.volume_api.create(self.context, **volume_param) self.volume_api.update(self.context, - volume_ref['id'], {'status': 'in-use'}) + volume_ref, + {'status': 'in-use'}) self.assertRaises(exception.ApiError, self.volume_api.delete, - self.context, volume_ref['id']) + self.context, volume_ref) def test_vsa_volume_delete_vsa_with_volumes(self): """ Check volume deleton in different states. """ diff --git a/nova/volume/api.py b/nova/volume/api.py index 1cfe8a3b6..a41e01274 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -40,16 +40,19 @@ LOG = logging.getLogger('nova.volume') class API(base.Base): """API for interacting with the volume manager.""" - def create(self, context, size, snapshot_id, name, description, + def create(self, context, size, name, description, snapshot=None, volume_type=None, metadata=None, availability_zone=None): - if snapshot_id is not None: - snapshot = self.get_snapshot(context, snapshot_id) + if snapshot is not None: if snapshot['status'] != "available": raise exception.ApiError( _("Snapshot status must be available")) if not size: size = snapshot['volume_size'] + snapshot_id = snapshot['id'] + else: + snapshot_id = None + if quota.allowed_volumes(context, 1, size) < 1: pid = context.project_id LOG.warn(_("Quota exceeded for %(pid)s, tried to create" @@ -89,15 +92,16 @@ class API(base.Base): return volume # TODO(yamahata): eliminate dumb polling - def wait_creation(self, context, volume_id): + def wait_creation(self, context, volume): + volume_id = volume['id'] while True: volume = self.get(context, volume_id) if volume['status'] != 'creating': return greenthread.sleep(1) - def delete(self, context, volume_id): - volume = self.get(context, volume_id) + def delete(self, context, volume): + volume_id = volume['id'] if volume['status'] != "available": raise exception.ApiError(_("Volume status must be available")) now = utils.utcnow() @@ -109,8 +113,8 @@ class API(base.Base): {"method": "delete_volume", "args": {"volume_id": volume_id}}) - def update(self, context, volume_id, fields): - self.db.volume_update(context, volume_id, fields) + def update(self, context, volume, fields): + self.db.volume_update(context, volume['id'], fields) def get(self, context, volume_id): rv = self.db.volume_get(context, volume_id) @@ -165,72 +169,65 @@ class API(base.Base): return self.db.snapshot_get_all(context) return self.db.snapshot_get_all_by_project(context, context.project_id) - def check_attach(self, context, volume_id): - volume = self.get(context, volume_id) + def check_attach(self, context, volume): # TODO(vish): abstract status checking? if volume['status'] != "available": raise exception.ApiError(_("Volume status must be available")) if volume['attach_status'] == "attached": raise exception.ApiError(_("Volume is already attached")) - def check_detach(self, context, volume_id): - volume = self.get(context, volume_id) + def check_detach(self, context, volume): # TODO(vish): abstract status checking? if volume['status'] == "available": raise exception.ApiError(_("Volume is already detached")) - def remove_from_compute(self, context, instance_id, volume_id, host): + def remove_from_compute(self, context, volume, instance_id, host): """Remove volume from specified compute host.""" rpc.call(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "remove_volume_connection", "args": {'instance_id': instance_id, - 'volume_id': volume_id}}) + 'volume_id': volume['id']}}) - def attach(self, context, volume_id, instance_id, mountpoint): - volume = self.get(context, volume_id) + def attach(self, context, volume, instance_id, mountpoint): host = volume['host'] queue = self.db.queue_get_for(context, FLAGS.volume_topic, host) return rpc.call(context, queue, {"method": "attach_volume", - "args": {"volume_id": volume_id, + "args": {"volume_id": volume['id'], "instance_id": instance_id, "mountpoint": mountpoint}}) - def detach(self, context, volume_id): - volume = self.get(context, volume_id) + def detach(self, context, volume): host = volume['host'] queue = self.db.queue_get_for(context, FLAGS.volume_topic, host) return rpc.call(context, queue, {"method": "detach_volume", - "args": {"volume_id": volume_id}}) + "args": {"volume_id": volume['id']}}) - def initialize_connection(self, context, volume_id, address): - volume = self.get(context, volume_id) + def initialize_connection(self, context, volume, address): host = volume['host'] queue = self.db.queue_get_for(context, FLAGS.volume_topic, host) return rpc.call(context, queue, {"method": "initialize_connection", - "args": {"volume_id": volume_id, + "args": {"volume_id": volume['id'], "address": address}}) - def terminate_connection(self, context, volume_id, address): - volume = self.get(context, volume_id) + def terminate_connection(self, context, volume, address): host = volume['host'] queue = self.db.queue_get_for(context, FLAGS.volume_topic, host) return rpc.call(context, queue, {"method": "terminate_connection", - "args": {"volume_id": volume_id, + "args": {"volume_id": volume['id'], "address": address}}) - def _create_snapshot(self, context, volume_id, name, description, + def _create_snapshot(self, context, volume, name, description, force=False): - volume = self.get(context, volume_id) if ((not force) and (volume['status'] != "available")): raise exception.ApiError(_("Volume status must be available")) options = { - 'volume_id': volume_id, + 'volume_id': volume['id'], 'user_id': context.user_id, 'project_id': context.project_id, 'status': "creating", @@ -244,40 +241,39 @@ class API(base.Base): FLAGS.scheduler_topic, {"method": "create_snapshot", "args": {"topic": FLAGS.volume_topic, - "volume_id": volume_id, + "volume_id": volume['id'], "snapshot_id": snapshot['id']}}) return snapshot - def create_snapshot(self, context, volume_id, name, description): - return self._create_snapshot(context, volume_id, name, description, + def create_snapshot(self, context, volume, name, description): + return self._create_snapshot(context, volume, name, description, False) - def create_snapshot_force(self, context, volume_id, name, description): - return self._create_snapshot(context, volume_id, name, description, + def create_snapshot_force(self, context, volume, name, description): + return self._create_snapshot(context, volume, name, description, True) - def delete_snapshot(self, context, snapshot_id): - snapshot = self.get_snapshot(context, snapshot_id) + def delete_snapshot(self, context, snapshot): if snapshot['status'] != "available": raise exception.ApiError(_("Snapshot status must be available")) - self.db.snapshot_update(context, snapshot_id, {'status': 'deleting'}) + self.db.snapshot_update(context, snapshot['id'], + {'status': 'deleting'}) rpc.cast(context, FLAGS.scheduler_topic, {"method": "delete_snapshot", "args": {"topic": FLAGS.volume_topic, - "snapshot_id": snapshot_id}}) + "snapshot_id": snapshot['id']}}) - def get_volume_metadata(self, context, volume_id): + def get_volume_metadata(self, context, volume): """Get all metadata associated with a volume.""" - rv = self.db.volume_metadata_get(context, volume_id) + rv = self.db.volume_metadata_get(context, volume['id']) return dict(rv.iteritems()) - def delete_volume_metadata(self, context, volume_id, key): + def delete_volume_metadata(self, context, volume, key): """Delete the given metadata item from an volume.""" - self.db.volume_metadata_delete(context, volume_id, key) + self.db.volume_metadata_delete(context, volume['id'], key) - def update_volume_metadata(self, context, volume_id, - metadata, delete=False): + def update_volume_metadata(self, context, volume, metadata, delete=False): """Updates or creates volume metadata. If delete is True, metadata items that are not specified in the @@ -287,10 +283,10 @@ class API(base.Base): if delete: _metadata = metadata else: - _metadata = self.get_volume_metadata(context, volume_id) + _metadata = self.get_volume_metadata(context, volume['id']) _metadata.update(metadata) - self.db.volume_metadata_update(context, volume_id, _metadata, True) + self.db.volume_metadata_update(context, volume['id'], _metadata, True) return _metadata def get_volume_metadata_value(self, volume, key): diff --git a/nova/vsa/api.py b/nova/vsa/api.py index 9f17b6e4e..27baff151 100644 --- a/nova/vsa/api.py +++ b/nova/vsa/api.py @@ -246,9 +246,9 @@ class API(base.Base): vol_ref = self.volume_api.create(context, vol_size, - None, vol_name, vol['description'], + None, volume_type=vol_type, metadata=dict(to_vsa_id=str(vsa_id)), availability_zone=availability_zone) @@ -349,7 +349,7 @@ class API(base.Base): vol_name = volume['name'] LOG.info(_("VSA ID %(vsa_id)s: Deleting %(direction)s "\ "volume %(vol_name)s"), locals()) - self.volume_api.delete(context, volume['id']) + self.volume_api.delete(context, volume) except exception.ApiError: LOG.info(_("Unable to delete volume %s"), volume['name']) if force_delete: diff --git a/nova/vsa/manager.py b/nova/vsa/manager.py index d4c414106..850d99581 100644 --- a/nova/vsa/manager.py +++ b/nova/vsa/manager.py @@ -136,7 +136,7 @@ class VsaManager(manager.SchedulerDependentManager): locals()) if status == 'available': try: - # self.volume_api.update(context, volume['id'], + # self.volume_api.update(context, volume, # dict(attach_status="attached")) pass except Exception as ex: |
