From dcda6be23c3797872c406f58578b05befd378c97 Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Tue, 19 Apr 2011 20:48:26 +0900 Subject: Add support for creating a snapshot of a nova volume with euca-create-snapshot. --- nova/api/ec2/__init__.py | 6 ++ nova/api/ec2/cloud.py | 52 ++++++++++++--- nova/db/api.py | 39 +++++++++++ nova/db/sqlalchemy/api.py | 77 ++++++++++++++++++++++ .../versions/015_add_volume_snapshot_support.py | 71 ++++++++++++++++++++ nova/db/sqlalchemy/models.py | 24 +++++++ nova/exception.py | 6 ++ nova/volume/api.py | 44 +++++++++++++ nova/volume/driver.py | 8 +++ nova/volume/manager.py | 42 ++++++++++++ 10 files changed, 361 insertions(+), 8 deletions(-) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index a3c3b25a1..a89d65a38 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -331,6 +331,12 @@ class Executor(wsgi.Application): ec2_id = ec2utils.id_to_ec2_id(ex.volume_id, 'vol-%08x') message = _('Volume %s not found') % ec2_id return self._error(req, context, type(ex).__name__, message) + except exception.SnapshotNotFound as ex: + LOG.info(_('SnapshotNotFound raised: %s'), unicode(ex), + context=context) + ec2_id = ec2utils.id_to_ec2_id(ex.snapshot_id, 'snap-%08x') + message = _('Snapshot %s not found') % ec2_id + return self._error(req, context, type(ex).__name__, message) except exception.NotFound as ex: LOG.info(_('NotFound raised: %s'), unicode(ex), context=context) return self._error(req, context, type(ex).__name__, unicode(ex)) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index bd4c9dcd4..6daf299b9 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -283,14 +283,46 @@ class CloudController(object): owner=None, restorable_by=None, **kwargs): - return {'snapshotSet': [{'snapshotId': 'fixme', - 'volumeId': 'fixme', - 'status': 'fixme', - 'startTime': 'fixme', - 'progress': 'fixme', - 'ownerId': 'fixme', - 'volumeSize': 0, - 'description': 'fixme'}]} + if snapshot_id: + snapshots = [] + for ec2_id in snapshot_id: + internal_id = ec2utils.ec2_id_to_id(ec2_id) + snapshot = self.volume_api.get_snapshot(context, snapshot_id=internal_id) + snapshots.append(snapshot) + else: + snapshots = self.volume_api.get_all_snapshots(context) + snapshots = [self._format_snapshot(context, s) for s in snapshots] + return {'snapshotSet': snapshots} + + def _format_snapshot(self, context, snapshot): + s = {} + s['snapshotId'] = ec2utils.id_to_ec2_id(snapshot['id'], 'snap-%08x') + s['volumeId'] = ec2utils.id_to_ec2_id(snapshot['volume_id'], 'vol-%08x') + s['status'] = snapshot['status'] + s['startTime'] = snapshot['created_at'] + s['progress'] = snapshot['progress'] + s['ownerId'] = snapshot['project_id'] + s['volumeSize'] = snapshot['volume_size'] + s['description'] = snapshot['display_description'] + + s['display_name'] = snapshot['display_name'] + s['display_description'] = snapshot['display_description'] + return s + + def create_snapshot(self, context, volume_id, **kwargs): + LOG.audit(_("Create snapshot of volume %s"), volume_id, context=context) + volume_id = ec2utils.ec2_id_to_id(volume_id) + snapshot = self.volume_api.create_snapshot( + context, + volume_id=volume_id, + name=kwargs.get('display_name'), + description=kwargs.get('display_description')) + return {'snapshotSet': [self._format_snapshot(context, snapshot)]} + + def delete_snapshot(self, context, snapshot_id, **kwargs): + snapshot_id = ec2utils.ec2_id_to_id(snapshot_id) + self.volume_api.delete_snapshot(context, snapshot_id=snapshot_id) + return True def describe_key_pairs(self, context, key_name=None, **kwargs): key_pairs = db.key_pair_get_all_by_user(context, context.user_id) @@ -598,6 +630,10 @@ class CloudController(object): 'volumeId': v['volumeId']}] else: v['attachmentSet'] = [{}] + if volume.get('snapshot_id') != None: + v['snapshotId'] = ec2utils.id_to_ec2_id(volume['snapshot_id'], 'snap-%08x') + else: + v['snapshotId'] = None v['display_name'] = volume['display_name'] v['display_description'] = volume['display_description'] diff --git a/nova/db/api.py b/nova/db/api.py index 63901e94d..9fc4b8c0a 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -46,6 +46,8 @@ flags.DEFINE_string('instance_name_template', 'instance-%08x', 'Template string to be used to generate instance names') flags.DEFINE_string('volume_name_template', 'volume-%08x', 'Template string to be used to generate instance names') +flags.DEFINE_string('snapshot_name_template', 'snapshot-%08x', + 'Template string to be used to generate instance names') IMPL = utils.LazyPluggable(FLAGS['db_backend'], @@ -867,6 +869,43 @@ def volume_update(context, volume_id, values): #################### +def snapshot_create(context, values): + """Create a volume from the values dictionary.""" + return IMPL.snapshot_create(context, values) + + +def snapshot_destroy(context, snapshot_id): + """Create a volume from the values dictionary.""" + return IMPL.snapshot_destroy(context, snapshot_id) + + +def snapshot_get(context, snapshot_id): + """Get a volume or raise if it does not exist.""" + return IMPL.snapshot_get(context, snapshot_id) + + +def snapshot_get_all(context): + """Get all volumes.""" + return IMPL.snapshot_get_all(context) + + +def snapshot_get_all_by_project(context, project_id): + """Get all volumes belonging to a project.""" + return IMPL.snapshot_get_all_by_project(context, project_id) + + +def snapshot_update(context, snapshot_id, values): + """Set the given properties on an snapshot and update it. + + Raises NotFound if snapshot does not exist. + + """ + return IMPL.snapshot_update(context, snapshot_id, values) + + +#################### + + def security_group_get_all(context): """Get all security groups.""" return IMPL.security_group_get_all(context) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 646675a45..059a22cb9 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1760,6 +1760,83 @@ def volume_update(context, volume_id, values): ################### +@require_context +def snapshot_create(context, values): + snapshot_ref = models.Snapshot() + snapshot_ref.update(values) + + session = get_session() + with session.begin(): + snapshot_ref.save(session=session) + return snapshot_ref + + +@require_admin_context +def snapshot_destroy(context, snapshot_id): + session = get_session() + with session.begin(): + session.query(models.Snapshot).\ + filter_by(id=snapshot_id).\ + update({'deleted': 1, + 'deleted_at': datetime.datetime.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def snapshot_get(context, snapshot_id, session=None): + if not session: + session = get_session() + result = None + + if is_admin_context(context): + result = session.query(models.Snapshot).\ + filter_by(id=snapshot_id).\ + filter_by(deleted=can_read_deleted(context)).\ + first() + elif is_user_context(context): + result = session.query(models.Snapshot).\ + filter_by(project_id=context.project_id).\ + filter_by(id=snapshot_id).\ + filter_by(deleted=False).\ + first() + if not result: + raise exception.SnapshotNotFound(_('Snapshot %s not found') % snapshot_id, + snapshot_id) + + return result + + +@require_admin_context +def snapshot_get_all(context): + session = get_session() + return session.query(models.Snapshot).\ + filter_by(deleted=can_read_deleted(context)).\ + all() + + +@require_context +def snapshot_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) + + session = get_session() + return session.query(models.Snapshot).\ + filter_by(project_id=project_id).\ + filter_by(deleted=can_read_deleted(context)).\ + all() + + +@require_context +def snapshot_update(context, snapshot_id, values): + session = get_session() + with session.begin(): + snapshot_ref = snapshot_get(context, snapshot_id, session=session) + snapshot_ref.update(values) + snapshot_ref.save(session=session) + + +################### + + @require_context def security_group_get_all(context): session = get_session() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py b/nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py new file mode 100644 index 000000000..288f63e72 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/015_add_volume_snapshot_support.py @@ -0,0 +1,71 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 MORITA Kazutaka. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + +meta = MetaData() + +snapshots = Table('snapshots', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('volume_id', Integer(), nullable=False), + Column('user_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('status', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('progress', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('volume_size', Integer()), + Column('scheduled_at', DateTime(timezone=False)), + Column('display_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + ) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + try: + snapshots.create() + except Exception: + logging.info(repr(snapshots)) + logging.exception('Exception while creating table') + meta.drop_all(tables=[snapshots]) + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + snapshots.drop() diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index f79d0f16c..9abe4d9ae 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -327,6 +327,30 @@ class Quota(BASE, NovaBase): metadata_items = Column(Integer) +class Snapshot(BASE, NovaBase): + """Represents a block storage device that can be attached to a vm.""" + __tablename__ = 'snapshots' + id = Column(Integer, primary_key=True, autoincrement=True) + + @property + def name(self): + return FLAGS.snapshot_name_template % self.id + + @property + def volume_name(self): + return FLAGS.volume_name_template % self.volume_id + + user_id = Column(String(255)) + project_id = Column(String(255)) + + volume_id = Column(Integer) + status = Column(String(255)) + progress = Column(String(255)) + volume_size = Column(Integer) + + display_name = Column(String(255)) + display_description = Column(String(255)) + class ExportDevice(BASE, NovaBase): """Represates a shelf and blade that a volume can be exported on.""" __tablename__ = 'export_devices' diff --git a/nova/exception.py b/nova/exception.py index 4e2bbdbaf..7adc3d007 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -68,6 +68,12 @@ class VolumeNotFound(NotFound): super(VolumeNotFound, self).__init__(message) +class SnapshotNotFound(NotFound): + def __init__(self, message, snapshot_id): + self.snapshot_id = snapshot_id + super(SnapshotNotFound, self).__init__(message) + + class Duplicate(Error): pass diff --git a/nova/volume/api.py b/nova/volume/api.py index 4b4bb9dc5..f5285f31f 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -90,6 +90,15 @@ class API(base.Base): return self.db.volume_get_all(context) return self.db.volume_get_all_by_project(context, context.project_id) + def get_snapshot(self, context, snapshot_id): + rv = self.db.snapshot_get(context, snapshot_id) + return dict(rv.iteritems()) + + def get_all_snapshots(self, context): + if context.is_admin: + return self.db.snapshot_get_all(context) + return self.db.snapshot_get_all_by_project(context, context.project_id) + def check_attach(self, context, volume_id): volume = self.get(context, volume_id) # TODO(vish): abstract status checking? @@ -103,3 +112,38 @@ class API(base.Base): # TODO(vish): abstract status checking? if volume['status'] == "available": raise exception.ApiError(_("Volume is already detached")) + + def create_snapshot(self, context, volume_id, name, description): + volume = self.get(context, volume_id) + if volume['status'] != "available": + raise exception.ApiError(_("Volume status must be available")) + + options = { + 'volume_id': volume_id, + 'user_id': context.user_id, + 'project_id': context.project_id, + 'status': "creating", + 'progress': '0%', + 'volume_size': volume['size'], + 'display_name': name, + 'display_description': description} + + snapshot = self.db.snapshot_create(context, options) + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "create_snapshot", + "args": {"topic": FLAGS.volume_topic, + "volume_id": volume_id, + "snapshot_id": snapshot['id']}}) + return snapshot + + def delete_snapshot(self, context, snapshot_id): + snapshot = self.get_snapshot(context, snapshot_id) + if snapshot['status'] != "available": + raise exception.ApiError(_("Snapshot status must be available")) + self.db.snapshot_update(context, snapshot_id, {'status': 'deleting'}) + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "delete_snapshot", + "args": {"topic": FLAGS.volume_topic, + "snapshot_id": snapshot_id}}) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 55307ad9b..31998e307 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -122,6 +122,14 @@ class VolumeDriver(object): (FLAGS.volume_group, volume['name'])) + def create_snapshot(self, snapshot): + """Creates a snapshot.""" + raise NotImplementedError() + + def delete_snapshot(self, snapshot): + """Deletes a snapshot.""" + raise NotImplementedError() + def local_path(self, volume): # NOTE(vish): stops deprecation warning escaped_group = FLAGS.volume_group.replace('-', '--') diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 2178389ce..87fd3bf17 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -152,6 +152,48 @@ class VolumeManager(manager.SchedulerDependentManager): LOG.debug(_("volume %s: deleted successfully"), volume_ref['name']) return True + def create_snapshot(self, context, volume_id, snapshot_id): + """Creates and exports the snapshot.""" + context = context.elevated() + snapshot_ref = self.db.snapshot_get(context, snapshot_id) + LOG.info(_("snapshot %s: creating"), snapshot_ref['name']) + + try: + snap_name = snapshot_ref['name'] + LOG.debug(_("snapshot %(snap_name)s: creating") % locals()) + model_update = self.driver.create_snapshot(snapshot_ref) + if model_update: + self.db.snapshot_update(context, snapshot_ref['id'], model_update) + + except Exception: + self.db.snapshot_update(context, + snapshot_ref['id'], {'status': 'error'}) + raise + + self.db.snapshot_update(context, + snapshot_ref['id'], {'status': 'available', + 'progress': '100%'}) + LOG.debug(_("snapshot %s: created successfully"), snapshot_ref['name']) + return snapshot_id + + def delete_snapshot(self, context, snapshot_id): + """Deletes and unexports snapshot.""" + context = context.elevated() + snapshot_ref = self.db.snapshot_get(context, snapshot_id) + + try: + LOG.debug(_("snapshot %s: deleting"), snapshot_ref['name']) + self.driver.delete_snapshot(snapshot_ref) + except Exception: + self.db.snapshot_update(context, + snapshot_ref['id'], + {'status': 'error_deleting'}) + raise + + self.db.snapshot_destroy(context, snapshot_id) + LOG.debug(_("snapshot %s: deleted successfully"), snapshot_ref['name']) + return True + def setup_compute_volume(self, context, volume_id): """Setup remote volume on compute host. -- cgit From aad857a18153792d96f300732c3bb5bb16aa02c3 Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Tue, 19 Apr 2011 20:50:10 +0900 Subject: Add support for creating a Sheepdog snapshot. --- nova/volume/driver.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 31998e307..ba0a7efef 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -620,6 +620,16 @@ class SheepdogDriver(VolumeDriver): """Deletes a logical volume""" self._try_execute('collie', 'vdi', 'delete', volume['name']) + def create_snapshot(self, snapshot): + """Creates a sheepdog snapshot""" + self._try_execute('qemu-img', 'snapshot', '-c', snapshot['name'], + "sheepdog:%s" % snapshot['volume_name']) + + def delete_snapshot(self, snapshot): + """Deletes a sheepdog snapshot""" + self._try_execute('collie', 'vdi', 'delete', snapshot['volume_name'], + '-s', snapshot['name']) + def local_path(self, volume): return "sheepdog:%s" % volume['name'] -- cgit From 321d724df5f0c4ca008da2a08ef279ca18df0733 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Fri, 13 May 2011 23:07:34 +0900 Subject: Authors: add myself to Authers file add myself to Authers file for later commit. --- Authors | 1 + 1 file changed, 1 insertion(+) diff --git a/Authors b/Authors index 8b54240c1..baf8fde4c 100644 --- a/Authors +++ b/Authors @@ -28,6 +28,7 @@ Gabe Westmaas Hisaharu Ishii Hisaki Ohara Ilya Alekseyev +Isaku Yamahata Jason Koelker Jay Pipes Jesse Andrews -- cgit From bbbea57cf6ab28c3ad1081041275e0d6d2bbd308 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Fri, 13 May 2011 23:08:57 +0900 Subject: volume/driver: factor out lvm opration Factor out lvm operation for implementing basic snapshot later. --- nova/volume/driver.py | 62 ++++++++++++++++++++++++++++----------------------- 1 file changed, 34 insertions(+), 28 deletions(-) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index ba0a7efef..ec7be37bf 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -90,37 +90,47 @@ class VolumeDriver(object): raise exception.Error(_("volume group %s doesn't exist") % FLAGS.volume_group) - def create_volume(self, volume): - """Creates a logical volume. Can optionally return a Dictionary of - changes to the volume object to be persisted.""" - if int(volume['size']) == 0: - sizestr = '100M' - else: - sizestr = '%sG' % volume['size'] + def _create_volume(self, volume_name, sizestr): self._try_execute('sudo', 'lvcreate', '-L', sizestr, '-n', - volume['name'], - FLAGS.volume_group) - - def delete_volume(self, volume): - """Deletes a logical volume.""" + volume_name, FLAGS.volume_group) + + def _copy_volume(self, srcstr, deststr, size_in_g): + self._execute('sudo', 'dd', 'if=%s' % srcstr, 'of=%s' % deststr, + 'count=%d' % (size_in_g * 1024), 'bs=1M') + + def _volume_not_present(self, volume_name): + path_name = '%s/%s' % (FLAGS.volume_group, volume_name) try: - self._try_execute('sudo', 'lvdisplay', - '%s/%s' % - (FLAGS.volume_group, - volume['name'])) + self._try_execute('sudo', 'lvdisplay', path_name) except Exception as e: - # If the volume isn't present, then don't attempt to delete + # If the volume isn't present return True + return False + def _delete_volume(self, volume, size_in_g): + """Deletes a logical volume.""" # zero out old volumes to prevent data leaking between users # TODO(ja): reclaiming space should be done lazy and low priority - self._execute('sudo', 'dd', 'if=/dev/zero', - 'of=%s' % self.local_path(volume), - 'count=%d' % (volume['size'] * 1024), - 'bs=1M') + self._copy_volume('/dev/zero', self.local_path(volume), size_in_g) self._try_execute('sudo', 'lvremove', '-f', "%s/%s" % - (FLAGS.volume_group, - volume['name'])) + (FLAGS.volume_group, volume['name'])) + + def _sizestr(self, size_in_g): + if int(size_in_g) == 0: + return '100M' + return '%sG' % size_in_g + + def create_volume(self, volume): + """Creates a logical volume. Can optionally return a Dictionary of + changes to the volume object to be persisted.""" + self._create_volume(volume['name'], self._sizestr(volume['size'])) + + def delete_volume(self, volume): + """Deletes a logical volume.""" + if self._volume_not_present(volume['name']): + # If the volume isn't present, then don't attempt to delete + return True + self._delete_volume(volume, volume['size']) def create_snapshot(self, snapshot): """Creates a snapshot.""" @@ -608,13 +618,9 @@ class SheepdogDriver(VolumeDriver): def create_volume(self, volume): """Creates a sheepdog volume""" - if int(volume['size']) == 0: - sizestr = '100M' - else: - sizestr = '%sG' % volume['size'] self._try_execute('qemu-img', 'create', "sheepdog:%s" % volume['name'], - sizestr) + self._sizestr(volume['size'])) def delete_volume(self, volume): """Deletes a logical volume""" -- cgit From 4f7cfba4a00f04b7c30c61da2946f183241a7c7f Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Fri, 13 May 2011 23:27:35 +0900 Subject: volume/driver: implement basic snapshot added basic support for snapshot to VolumeDriver base class. The implementation is not effective, but works. The effective implementation should be done by drived driver class. --- nova/exception.py | 4 ++++ nova/volume/driver.py | 37 +++++++++++++++++++++++++++++++++---- nova/volume/manager.py | 6 ++++++ 3 files changed, 43 insertions(+), 4 deletions(-) diff --git a/nova/exception.py b/nova/exception.py index 39620ccc1..bd04435ed 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -271,6 +271,10 @@ class SnapshotNotFound(NotFound): message = _("Snapshot %(snapshot_id)s could not be found.") +class VolumeIsBusy(Error): + message = _("deleting volume %(volume_name)s that has snapshot") + + class ExportDeviceNotFoundForVolume(NotFound): message = _("No export device found for volume %(volume_id)s.") diff --git a/nova/volume/driver.py b/nova/volume/driver.py index ec7be37bf..a6cf2cb46 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -113,13 +113,21 @@ class VolumeDriver(object): # TODO(ja): reclaiming space should be done lazy and low priority self._copy_volume('/dev/zero', self.local_path(volume), size_in_g) self._try_execute('sudo', 'lvremove', '-f', "%s/%s" % - (FLAGS.volume_group, volume['name'])) + (FLAGS.volume_group, + self._escape_snapshot(volume['name']))) def _sizestr(self, size_in_g): if int(size_in_g) == 0: return '100M' return '%sG' % size_in_g + # Linux LVM reserves name that starts with snapshot, so that + # such volume name can't be created. Mangle it. + def _escape_snapshot(self, snapshot_name): + if not snapshot_name.startswith('snapshot'): + return snapshot_name + return '_' + snapshot_name + def create_volume(self, volume): """Creates a logical volume. Can optionally return a Dictionary of changes to the volume object to be persisted.""" @@ -130,20 +138,41 @@ class VolumeDriver(object): if self._volume_not_present(volume['name']): # If the volume isn't present, then don't attempt to delete return True + + # TODO(yamahata): lvm can't delete origin volume only without + # deleting derived snapshots. Can we do something fancy? + out, err = self._execute('sudo', 'lvdisplay', '--noheading', + '-C', '-o', 'Attr', + '%s/%s' % (FLAGS.volume_group, + volume['name'])) + out = out.strip() + if (out[0] == 'o') or (out[0] == 'O'): + raise exception.VolumeIsBusy(volume_name=volume['name']) + self._delete_volume(volume, volume['size']) def create_snapshot(self, snapshot): """Creates a snapshot.""" - raise NotImplementedError() + orig_lv_name = "%s/%s" % (FLAGS.volume_group, snapshot['volume_name']) + self._try_execute('sudo', 'lvcreate', '-L', + self._sizestr(snapshot['volume_size']), + '--name', self._escape_snapshot(snapshot['name']), + '--snapshot', orig_lv_name) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" - raise NotImplementedError() + if self._volume_not_present(self._escape_snapshot(snapshot['name'])): + # If the snapshot isn't present, then don't attempt to delete + return True + + # TODO(yamahata): zeroing out the whole snapshot triggers COW. + # it's quite slow. + self._delete_volume(snapshot, snapshot['volume_size']) def local_path(self, volume): # NOTE(vish): stops deprecation warning escaped_group = FLAGS.volume_group.replace('-', '--') - escaped_name = volume['name'].replace('-', '--') + escaped_name = self._escape_snapshot(volume['name']).replace('-', '--') return "/dev/mapper/%s-%s" % (escaped_group, escaped_name) def ensure_export(self, context, volume): diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 87fd3bf17..fd889633d 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -142,6 +142,12 @@ class VolumeManager(manager.SchedulerDependentManager): self.driver.remove_export(context, volume_ref) LOG.debug(_("volume %s: deleting"), volume_ref['name']) self.driver.delete_volume(volume_ref) + except exception.VolumeIsBusy, e: + LOG.debug(_("volume %s: volume is busy"), volume_ref['name']) + self.driver.ensure_export(context, volume_ref) + self.db.volume_update(context, volume_ref['id'], + {'status': 'available'}) + return True except Exception: self.db.volume_update(context, volume_ref['id'], -- cgit From aaec8400be701c674bbf89badd59ee9468827ed9 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Sat, 14 May 2011 01:42:26 +0900 Subject: volume/driver: make unit test, test_volume, pass fake command executer doesn't return command result. Which return None instead of string. So add None check to make unit test pass. --- nova/volume/driver.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index a6cf2cb46..0807ff476 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -145,9 +145,11 @@ class VolumeDriver(object): '-C', '-o', 'Attr', '%s/%s' % (FLAGS.volume_group, volume['name'])) - out = out.strip() - if (out[0] == 'o') or (out[0] == 'O'): - raise exception.VolumeIsBusy(volume_name=volume['name']) + # fake_execute returns None resulting unit test error + if out: + out = out.strip() + if (out[0] == 'o') or (out[0] == 'O'): + raise exception.VolumeIsBusy(volume_name=volume['name']) self._delete_volume(volume, volume['size']) -- cgit From 8b86fb3a4d9ee3e328232c0051b9daff6838d00d Mon Sep 17 00:00:00 2001 From: Josh Durgin Date: Fri, 13 May 2011 10:26:13 -0700 Subject: Add support for rbd snapshots. --- nova/volume/driver.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 0807ff476..e0e18b9bf 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -608,6 +608,18 @@ class RBDDriver(VolumeDriver): self._try_execute('rbd', '--pool', FLAGS.rbd_pool, 'rm', volume['name']) + def create_snapshot(self, snapshot): + """Creates an rbd snapshot""" + self._try_execute('rbd', '--pool', FLAGS.rbd_pool, + 'snap', 'create', '--snap', snapshot['name'], + snapshot['volume_name']) + + def delete_snapshot(self, snapshot): + """Deletes an rbd snapshot""" + self._try_execute('rbd', '--pool', FLAGS.rbd_pool, + 'snap', 'rm', '--snap', snapshot['name'], + snapshot['volume_name']) + def local_path(self, volume): """Returns the path of the rbd volume.""" # This is the same as the remote path -- cgit From 5eb57c6191ac7c8d98539eb3967ceb00f7c55daf Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Mon, 16 May 2011 16:29:21 +0900 Subject: Add a unit test for snapshot_volume. --- nova/tests/test_volume.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py index 236d12434..c66b66959 100644 --- a/nova/tests/test_volume.py +++ b/nova/tests/test_volume.py @@ -176,6 +176,33 @@ class VolumeTestCase(test.TestCase): # This will allow us to test cross-node interactions pass + @staticmethod + def _create_snapshot(volume_id, size='0'): + """Create a snapshot object.""" + snap = {} + snap['volume_size'] = size + snap['user_id'] = 'fake' + snap['project_id'] = 'fake' + snap['volume_id'] = volume_id + snap['status'] = "creating" + return db.snapshot_create(context.get_admin_context(), snap)['id'] + + def test_create_delete_snapshot(self): + """Test snapshot can be created and deleted.""" + volume_id = self._create_volume() + self.volume.create_volume(self.context, volume_id) + snapshot_id = self._create_snapshot(volume_id) + self.volume.create_snapshot(self.context, volume_id, snapshot_id) + self.assertEqual(snapshot_id, db.snapshot_get(context.get_admin_context(), + snapshot_id).id) + + self.volume.delete_snapshot(self.context, snapshot_id) + self.assertRaises(exception.NotFound, + db.snapshot_get, + self.context, + snapshot_id) + self.volume.delete_volume(self.context, volume_id) + class DriverTestCase(test.TestCase): """Base Test class for Drivers.""" -- cgit From 0b698186b56af6580633dedd7916df2897945f29 Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Thu, 19 May 2011 21:31:14 +0900 Subject: Avoid wildcard import. --- .../migrate_repo/versions/019_add_volume_snapshot_support.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py b/nova/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py index 288f63e72..5a44bac16 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py @@ -15,8 +15,8 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import * -from migrate import * +from sqlalchemy import Column, Table, MetaData +from sqlalchemy import Integer, DateTime, Boolean, String from nova import log as logging -- cgit From a4cc51b78ae5e08227bef7a4be52953776a3e947 Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Thu, 19 May 2011 21:49:15 +0900 Subject: Add a unitest to test EC2 snapshot APIs. --- nova/tests/test_cloud.py | 46 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index c8559615a..d9169a646 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -188,6 +188,52 @@ class CloudTestCase(test.TestCase): db.service_destroy(self.context, service1['id']) db.service_destroy(self.context, service2['id']) + def test_describe_snapshots(self): + """Makes sure describe_snapshots works and filters results.""" + vol = db.volume_create(self.context, {}) + snap1 = db.snapshot_create(self.context, {'volume_id': vol['id']}) + snap2 = db.snapshot_create(self.context, {'volume_id': vol['id']}) + result = self.cloud.describe_snapshots(self.context) + self.assertEqual(len(result['snapshotSet']), 2) + snapshot_id = ec2utils.id_to_ec2_id(snap2['id'], 'snap-%08x') + result = self.cloud.describe_snapshots(self.context, + snapshot_id=[snapshot_id]) + self.assertEqual(len(result['snapshotSet']), 1) + self.assertEqual( + ec2utils.ec2_id_to_id(result['snapshotSet'][0]['snapshotId']), + snap2['id']) + db.snapshot_destroy(self.context, snap1['id']) + db.snapshot_destroy(self.context, snap2['id']) + db.volume_destroy(self.context, vol['id']) + + def test_create_snapshot(self): + """Makes sure create_snapshot works.""" + vol = db.volume_create(self.context, {'status': "available"}) + volume_id = ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x') + + result = self.cloud.create_snapshot(self.context, + volume_id=volume_id) + snapshot_id = result['snapshotId'] + result = self.cloud.describe_snapshots(self.context) + self.assertEqual(len(result['snapshotSet']), 1) + self.assertEqual(result['snapshotSet'][0]['snapshotId'], snapshot_id) + + db.snapshot_destroy(self.context, ec2utils.ec2_id_to_id(snapshot_id)) + db.volume_destroy(self.context, vol['id']) + + def test_delete_snapshot(self): + """Makes sure delete_snapshot works.""" + vol = db.volume_create(self.context, {'status': "available"}) + snap = db.snapshot_create(self.context, {'volume_id': vol['id'], + 'status': "available"}) + snapshot_id = ec2utils.id_to_ec2_id(snap['id'], 'snap-%08x') + + result = self.cloud.delete_snapshot(self.context, + snapshot_id=snapshot_id) + self.assertTrue(result) + + db.volume_destroy(self.context, vol['id']) + def test_describe_instances(self): """Makes sure describe_instances works and filters results.""" inst1 = db.instance_create(self.context, {'reservation_id': 'a', -- cgit From c04a59fefbcbd0e5e21cbc8c70eb3147785cf22d Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Thu, 19 May 2011 22:06:18 +0900 Subject: Fix comments. --- nova/db/api.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nova/db/api.py b/nova/db/api.py index 1ef82b461..3597732b9 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -884,27 +884,27 @@ def volume_update(context, volume_id, values): def snapshot_create(context, values): - """Create a volume from the values dictionary.""" + """Create a snapshot from the values dictionary.""" return IMPL.snapshot_create(context, values) def snapshot_destroy(context, snapshot_id): - """Create a volume from the values dictionary.""" + """Destroy the snapshot or raise if it does not exist.""" return IMPL.snapshot_destroy(context, snapshot_id) def snapshot_get(context, snapshot_id): - """Get a volume or raise if it does not exist.""" + """Get a snapshot or raise if it does not exist.""" return IMPL.snapshot_get(context, snapshot_id) def snapshot_get_all(context): - """Get all volumes.""" + """Get all snapshots.""" return IMPL.snapshot_get_all(context) def snapshot_get_all_by_project(context, project_id): - """Get all volumes belonging to a project.""" + """Get all snapshots belonging to a project.""" return IMPL.snapshot_get_all_by_project(context, project_id) -- cgit From fdd27860724cd57db6df059a97e98289f88ce6ac Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:24 -0700 Subject: add support to rpc for multicall --- nova/rpc.py | 99 +++++++++++++++++++++++++++++++++++++------------- nova/tests/test_rpc.py | 17 +++++++++ 2 files changed, 90 insertions(+), 26 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index 2116f22c3..04198a4a6 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -32,8 +32,11 @@ import uuid from carrot import connection as carrot_connection from carrot import messaging +import eventlet from eventlet import greenpool from eventlet import greenthread +from eventlet import queue + from nova import context from nova import exception @@ -131,7 +134,8 @@ class Consumer(messaging.Consumer): self.connection = Connection.recreate() self.backend = self.connection.create_backend() self.declare() - super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks) + return super(Consumer, self).fetch( + no_ack, auto_ack, enable_callbacks) if self.failed_connection: LOG.error(_('Reconnected to queue')) self.failed_connection = False @@ -347,8 +351,9 @@ def _unpack_context(msg): if key.startswith('_context_'): value = msg.pop(key) context_dict[key[9:]] = value + context_dict['msg_id'] = msg.pop('_msg_id', None) LOG.debug(_('unpacked context: %s'), context_dict) - return context.RequestContext.from_dict(context_dict) + return RpcContext.from_dict(context_dict) def _pack_context(msg, context): @@ -365,26 +370,27 @@ def _pack_context(msg, context): msg.update(context) -def call(context, topic, msg): - """Sends a message on a topic and wait for a response.""" +class RpcContext(context.RequestContext): + def __init__(self, *args, **kwargs): + msg_id = kwargs.pop('msg_id', None) + self.msg_id = msg_id + super(RpcContext, self).__init__(*args, **kwargs) + + def reply(self, *args, **kwargs): + msg_reply(self.msg_id, *args, **kwargs) + + +def multicall(context, topic, msg): + """Make a call that returns multiple times.""" LOG.debug(_('Making asynchronous call on %s ...'), topic) msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) LOG.debug(_('MSG_ID is %s') % (msg_id)) _pack_context(msg, context) - class WaitMessage(object): - def __call__(self, data, message): - """Acks message and sets result.""" - message.ack() - if data['failure']: - self.result = RemoteError(*data['failure']) - else: - self.result = data['result'] - - wait_msg = WaitMessage() conn = Connection.instance() consumer = DirectConsumer(connection=conn, msg_id=msg_id) + wait_msg = MulticallWaiter(consumer) consumer.register_callback(wait_msg) conn = Connection.instance() @@ -392,18 +398,59 @@ def call(context, topic, msg): publisher.send(msg) publisher.close() - try: - consumer.wait(limit=1) - except StopIteration: - pass - consumer.close() - # NOTE(termie): this is a little bit of a change from the original - # non-eventlet code where returning a Failure - # instance from a deferred call is very similar to - # raising an exception - if isinstance(wait_msg.result, Exception): - raise wait_msg.result - return wait_msg.result + return wait_msg + + +class MulticallWaiter(object): + def __init__(self, consumer): + self._consumer = consumer + self._results = queue.Queue() + self._closed = False + + def close(self): + self._closed = True + self._consumer.close() + + def __call__(self, data, message): + """Acks message and sets result.""" + message.ack() + if data['failure']: + self._results.put(RemoteError(*data['failure'])) + else: + self._results.put(data['result']) + + def __iter__(self): + return self.wait() + + def wait(self): + # TODO(termie): This is probably really a much simpler issue but am + # trying to solve the problem quickly. This works but + # I'd prefer to dig in and do it the best way later on. + + def _waiter(): + while not self._closed: + try: + self._consumer.wait(limit=1) + except StopIteration: + pass + eventlet.spawn(_waiter) + + while True: + result = self._results.get() + if isinstance(result, Exception): + raise result + if result == None: + self.close() + raise StopIteration + yield result + + +def call(context, topic, msg): + """Sends a message on a topic and wait for a response.""" + rv = multicall(context, topic, msg) + for x in rv: + rv.close() + return x def cast(context, topic, msg): diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index 44d7c91eb..92ddfcffc 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -49,6 +49,17 @@ class RpcTestCase(test.TestCase): "args": {"value": value}}) self.assertEqual(value, result) + def test_multicall_succeed_three_times(self): + """Get a value through rpc call""" + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo_three_times", + "args": {"value": value}}) + + for x in result: + self.assertEqual(value, x) + def test_context_passed(self): """Makes sure a context is passed through rpc call""" value = 42 @@ -126,6 +137,12 @@ class TestReceiver(object): LOG.debug(_("Received %s"), context) return context.to_dict() + @staticmethod + def echo_three_times(context, value): + context.reply(value) + context.reply(value) + context.reply(value) + @staticmethod def fail(context, value): """Raises an exception with the value sent in""" -- cgit From d46c9fffe4fab8f55483c73d3e6ef12116de9bc5 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:24 -0700 Subject: make the test more expicit --- nova/tests/test_rpc.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index 92ddfcffc..acab3e758 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -56,9 +56,10 @@ class RpcTestCase(test.TestCase): 'test', {"method": "echo_three_times", "args": {"value": value}}) - + i = 0 for x in result: - self.assertEqual(value, x) + self.assertEqual(value + i, x) + i += 1 def test_context_passed(self): """Makes sure a context is passed through rpc call""" @@ -140,8 +141,8 @@ class TestReceiver(object): @staticmethod def echo_three_times(context, value): context.reply(value) - context.reply(value) - context.reply(value) + context.reply(value + 1) + context.reply(value + 2) @staticmethod def fail(context, value): -- cgit From 7622e854ef68fbdbfc531690cf74916301956c8e Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:24 -0700 Subject: add commented out unworking code for yield-based returns --- nova/rpc.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/nova/rpc.py b/nova/rpc.py index 04198a4a6..f43291c4b 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -201,6 +201,11 @@ class AdapterConsumer(Consumer): try: rval = node_func(context=ctxt, **node_args) if msg_id: + # TODO(termie): re-enable when fix the yielding issue + #if hasattr(rval, 'send'): + # logging.error('rval! %s', rval) + # for x in rval: + # msg_reply(msg_id, x, None) msg_reply(msg_id, rval, None) except Exception as e: logging.exception('Exception during message handling') -- cgit From b44c1fe9561ee8754137d2700bab295f20a4032b Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: Add a connection pool for rpc cast/call Use the same rabbit connection for all topic listening and wait to be notified vs doing a 0.1 second poll for each. --- nova/rpc.py | 96 ++++++++++++++++++++++++++++++++++++++++++--------------- nova/service.py | 21 +++++++------ 2 files changed, 84 insertions(+), 33 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index f43291c4b..62590ca92 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -35,9 +35,9 @@ from carrot import messaging import eventlet from eventlet import greenpool from eventlet import greenthread +from eventlet import pools from eventlet import queue - from nova import context from nova import exception from nova import fakerabbit @@ -92,6 +92,11 @@ class Connection(carrot_connection.BrokerConnection): pass return cls.instance() +class Pool(pools.Pool): + def create(self): + return Connection.instance(new=True) + +ConnectionPool = Pool(max_size=20) class Consumer(messaging.Consumer): """Consumer base class. @@ -163,21 +168,9 @@ class AdapterConsumer(Consumer): self.pool = greenpool.GreenPool(FLAGS.rpc_thread_pool_size) super(AdapterConsumer, self).__init__(connection=connection, topic=topic) + self.register_callback(self.process_data) - def receive(self, *args, **kwargs): - self.pool.spawn_n(self._receive, *args, **kwargs) - - @exception.wrap_exception - def _receive(self, message_data, message): - """Magically looks for a method on the proxy object and calls it. - - Message data should be a dictionary with two keys: - method: string representing the method to call - args: dictionary of arg: value - - Example: {'method': 'echo', 'args': {'value': 42}} - - """ + def process_data(self, message_data, message): LOG.debug(_('received %s') % message_data) msg_id = message_data.pop('_msg_id', None) @@ -194,6 +187,19 @@ class AdapterConsumer(Consumer): LOG.warn(_('no method for message: %s') % message_data) msg_reply(msg_id, _('No method for message: %s') % message_data) return + self.pool.spawn_n(self._process_data, msg_id, ctxt, method, args) + + @exception.wrap_exception + def _process_data(self, msg_id, ctxt, method, args): + """Magically looks for a method on the proxy object and calls it. + + Message data should be a dictionary with two keys: + method: string representing the method to call + args: dictionary of arg: value + + Example: {'method': 'echo', 'args': {'value': 42}} + + """ node_func = getattr(self.proxy, str(method)) node_args = dict((str(k), v) for k, v in args.iteritems()) @@ -214,11 +220,6 @@ class AdapterConsumer(Consumer): return -class Publisher(messaging.Publisher): - """Publisher base class.""" - pass - - class TopicAdapterConsumer(AdapterConsumer): """Consumes messages on a specific topic.""" @@ -251,6 +252,50 @@ class FanoutAdapterConsumer(AdapterConsumer): topic=topic, proxy=proxy) +class ConsumerSet(object): + """Groups consumers to listen on together on a single connection""" + + def __init__(self, conn, consumer_list): + self.consumer_list = set(consumer_list) + self.consumer_set = None + self.init(conn) + + def init(self, conn): + if not conn: + conn = Connection.instance(new=True) + if self.consumer_set: + self.consumer_set.close() + self.consumer_set = messaging.ConsumerSet(conn) + for consumer in self.consumer_list: + consumer.connection = conn + # consumer.backend is set for us + self.consumer_set.add_consumer(consumer) + + def reconnect(self): + self.init(None) + + def wait(self, limit=None): + while True: + it = self.consumer_set.iterconsume(limit=limit) + while True: + try: + it.next() + except StopIteration: + return + except Exception as e: + LOG.error(_("Received exception %s " % str(e) + \ + "while processing consumer")) + fuck + self.reconnect() + # Break to outer loop + break + + +class Publisher(messaging.Publisher): + """Publisher base class.""" + pass + + class TopicPublisher(Publisher): """Publishes messages on a specific topic.""" @@ -315,7 +360,7 @@ def msg_reply(msg_id, reply=None, failure=None): LOG.error(_("Returning exception %s to caller"), message) LOG.error(tb) failure = (failure[0].__name__, str(failure[1]), tb) - conn = Connection.instance() + conn = ConnectionPool.get() publisher = DirectPublisher(connection=conn, msg_id=msg_id) try: publisher.send({'result': reply, 'failure': failure}) @@ -324,7 +369,9 @@ def msg_reply(msg_id, reply=None, failure=None): {'result': dict((k, repr(v)) for k, v in reply.__dict__.iteritems()), 'failure': failure}) + publisher.close() + ConnectionPool.put(conn) class RemoteError(exception.Error): @@ -393,12 +440,11 @@ def multicall(context, topic, msg): LOG.debug(_('MSG_ID is %s') % (msg_id)) _pack_context(msg, context) - conn = Connection.instance() + conn = ConnectionPool.get() consumer = DirectConsumer(connection=conn, msg_id=msg_id) wait_msg = MulticallWaiter(consumer) consumer.register_callback(wait_msg) - conn = Connection.instance() publisher = TopicPublisher(connection=conn, topic=topic) publisher.send(msg) publisher.close() @@ -462,10 +508,11 @@ def cast(context, topic, msg): """Sends a message on a topic without waiting for a response.""" LOG.debug(_('Making asynchronous cast on %s...'), topic) _pack_context(msg, context) - conn = Connection.instance() + conn = ConnectionPool.get() publisher = TopicPublisher(connection=conn, topic=topic) publisher.send(msg) publisher.close() + ConnectionPool.put(conn) def fanout_cast(context, topic, msg): @@ -511,6 +558,7 @@ def send_message(topic, message, wait=True): if wait: consumer.wait() + consumer.close() if __name__ == '__main__': diff --git a/nova/service.py b/nova/service.py index ab1238c3b..7761cfef5 100644 --- a/nova/service.py +++ b/nova/service.py @@ -91,26 +91,29 @@ class Service(object): if 'nova-compute' == self.binary: self.manager.update_available_resource(ctxt) - conn1 = rpc.Connection.instance(new=True) - conn2 = rpc.Connection.instance(new=True) - conn3 = rpc.Connection.instance(new=True) + if self.report_interval: + conn = rpc.Connection.instance(new=True) + + # Share this same connection for these Consumers consumer_all = rpc.TopicAdapterConsumer( - connection=conn1, + connection=conn, topic=self.topic, proxy=self) consumer_node = rpc.TopicAdapterConsumer( - connection=conn2, + connection=conn, topic='%s.%s' % (self.topic, self.host), proxy=self) fanout = rpc.FanoutAdapterConsumer( - connection=conn3, + connection=conn, topic=self.topic, proxy=self) - self.timers.append(consumer_all.attach_to_eventlet()) - self.timers.append(consumer_node.attach_to_eventlet()) - self.timers.append(fanout.attach_to_eventlet()) + cset = rpc.ConsumerSet(conn, [consumer_all, + consumer_node, + fanout]) + # Wait forever, processing these consumers + greenthread.spawn_n(cset.wait) pulse = utils.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, now=False) -- cgit From e1a47584cc63136280cf3ca9ef02da3efc1dff7f Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: pep8 and comment fixes --- nova/rpc.py | 25 ++++++++++++++++--------- nova/service.py | 1 - 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index 62590ca92..db5aec826 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -92,12 +92,16 @@ class Connection(carrot_connection.BrokerConnection): pass return cls.instance() + class Pool(pools.Pool): + """Class that implements a Pool of Connections""" + def create(self): return Connection.instance(new=True) ConnectionPool = Pool(max_size=20) + class Consumer(messaging.Consumer): """Consumer base class. @@ -171,6 +175,16 @@ class AdapterConsumer(Consumer): self.register_callback(self.process_data) def process_data(self, message_data, message): + """Consumer callback that parses the message for validity and + fires off a thread to call the proxy object method. + + Message data should be a dictionary with two keys: + method: string representing the method to call + args: dictionary of arg: value + + Example: {'method': 'echo', 'args': {'value': 42}} + + """ LOG.debug(_('received %s') % message_data) msg_id = message_data.pop('_msg_id', None) @@ -191,14 +205,8 @@ class AdapterConsumer(Consumer): @exception.wrap_exception def _process_data(self, msg_id, ctxt, method, args): - """Magically looks for a method on the proxy object and calls it. - - Message data should be a dictionary with two keys: - method: string representing the method to call - args: dictionary of arg: value - - Example: {'method': 'echo', 'args': {'value': 42}} - + """Thread that maigcally looks for a method on the proxy + object and calls it. """ node_func = getattr(self.proxy, str(method)) @@ -285,7 +293,6 @@ class ConsumerSet(object): except Exception as e: LOG.error(_("Received exception %s " % str(e) + \ "while processing consumer")) - fuck self.reconnect() # Break to outer loop break diff --git a/nova/service.py b/nova/service.py index 7761cfef5..c51c9b066 100644 --- a/nova/service.py +++ b/nova/service.py @@ -91,7 +91,6 @@ class Service(object): if 'nova-compute' == self.binary: self.manager.update_available_resource(ctxt) - if self.report_interval: conn = rpc.Connection.instance(new=True) -- cgit From d0be426d4e7bbfb1ecb3f078c71c1e176da441a5 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: convert fanout_cast to ConnectionPool --- nova/rpc.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/rpc.py b/nova/rpc.py index db5aec826..fdb228695 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -526,10 +526,11 @@ def fanout_cast(context, topic, msg): """Sends a message on a fanout exchange without waiting for a response.""" LOG.debug(_('Making asynchronous fanout cast...')) _pack_context(msg, context) - conn = Connection.instance() + conn = ConnectionPool.get() publisher = FanoutPublisher(topic, connection=conn) publisher.send(msg) publisher.close() + ConnectionPool.put(conn) def generic_response(message_data, message): -- cgit From f2c2a593c828fc86e298d3eb31672a09b498c41f Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: fakerabbit's declare_consumer should support more than 1 consumer. also: make fakerabbit Backend.consume be an iterator like it should be.. --- nova/fakerabbit.py | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index a7dee8caf..a29ba9d86 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -77,6 +77,10 @@ class Queue(object): class Backend(base.BaseBackend): + def __init__(self, connection, **kwargs): + super(Backend, self).__init__(connection, **kwargs) + self.consumers = [] + def queue_declare(self, queue, **kwargs): global QUEUES if queue not in QUEUES: @@ -97,16 +101,20 @@ class Backend(base.BaseBackend): EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key) def declare_consumer(self, queue, callback, *args, **kwargs): - self.current_queue = queue - self.current_callback = callback + self.consumers.append((queue, callback)) def consume(self, limit=None): + num = 0 while True: - item = self.get(self.current_queue) - if item: - self.current_callback(item) - raise StopIteration() - greenthread.sleep(0) + for (queue, callback) in self.consumers: + item = self.get(queue) + if item: + callback(item) + num += 1 + yield + if limit and num == limit: + raise StopIteration() + greenthread.sleep(0.1) def get(self, queue, no_ack=False): global QUEUES -- cgit From 90e30806a2e0c235612eb09792656cd861997f84 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 25 May 2011 15:42:24 -0700 Subject: fix consumers to actually be deleted and clean up cloud test --- nova/fakerabbit.py | 13 +++++++++---- nova/rpc.py | 13 ++++++++++--- nova/service.py | 8 +++----- nova/tests/test_cloud.py | 26 ++++++++++---------------- 4 files changed, 32 insertions(+), 28 deletions(-) diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index a29ba9d86..5f3e75c48 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -79,7 +79,7 @@ class Queue(object): class Backend(base.BaseBackend): def __init__(self, connection, **kwargs): super(Backend, self).__init__(connection, **kwargs) - self.consumers = [] + self.consumers = {} def queue_declare(self, queue, **kwargs): global QUEUES @@ -100,13 +100,18 @@ class Backend(base.BaseBackend): ' key %(routing_key)s') % locals()) EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key) - def declare_consumer(self, queue, callback, *args, **kwargs): - self.consumers.append((queue, callback)) + def declare_consumer(self, queue, callback, consumer_tag, *args, **kwargs): + LOG.debug("Adding consumer %s", consumer_tag) + self.consumers[consumer_tag] = (queue, callback) + + def cancel(self, consumer_tag): + LOG.debug("Removing consumer %s", consumer_tag) + del self.consumers[consumer_tag] def consume(self, limit=None): num = 0 while True: - for (queue, callback) in self.consumers: + for (queue, callback) in self.consumers.itervalues(): item = self.get(queue) if item: callback(item) diff --git a/nova/rpc.py b/nova/rpc.py index fdb228695..e2e962fcc 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -30,11 +30,11 @@ import time import traceback import uuid +import greenlet from carrot import connection as carrot_connection from carrot import messaging import eventlet from eventlet import greenpool -from eventlet import greenthread from eventlet import pools from eventlet import queue @@ -266,6 +266,7 @@ class ConsumerSet(object): def __init__(self, conn, consumer_list): self.consumer_list = set(consumer_list) self.consumer_set = None + self.enabled = True self.init(conn) def init(self, conn): @@ -283,15 +284,21 @@ class ConsumerSet(object): self.init(None) def wait(self, limit=None): - while True: + running = True + while running: it = self.consumer_set.iterconsume(limit=limit) + if not it: + break while True: try: it.next() except StopIteration: return + except greenlet.GreenletExit: + running = False + break except Exception as e: - LOG.error(_("Received exception %s " % str(e) + \ + LOG.error(_("Received exception %s " % type(e) + \ "while processing consumer")) self.reconnect() # Break to outer loop diff --git a/nova/service.py b/nova/service.py index c51c9b066..a0ff7c9f3 100644 --- a/nova/service.py +++ b/nova/service.py @@ -21,12 +21,8 @@ import inspect import os -import sys -import time -from eventlet import event from eventlet import greenthread -from eventlet import greenpool from nova import context from nova import db @@ -112,7 +108,7 @@ class Service(object): consumer_node, fanout]) # Wait forever, processing these consumers - greenthread.spawn_n(cset.wait) + self.csetthread = greenthread.spawn(cset.wait) pulse = utils.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, now=False) @@ -169,6 +165,8 @@ class Service(object): def kill(self): """Destroy the service object in the datastore.""" + self.csetthread.kill() + self.csetthread.wait() self.stop() try: db.service_destroy(context.get_admin_context(), self.service_id) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 54c0454de..1e14c327c 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -17,13 +17,8 @@ # under the License. from base64 import b64decode -import json from M2Crypto import BIO from M2Crypto import RSA -import os -import shutil -import tempfile -import time from eventlet import greenthread @@ -33,12 +28,10 @@ from nova import db from nova import flags from nova import log as logging from nova import rpc -from nova import service from nova import test from nova import utils from nova import exception from nova.auth import manager -from nova.compute import power_state from nova.api.ec2 import cloud from nova.api.ec2 import ec2utils from nova.image import local @@ -79,6 +72,15 @@ class CloudTestCase(test.TestCase): self.stubs.Set(local.LocalImageService, 'show', fake_show) self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show) + # NOTE(vish): set up a manual wait so rpc.cast has a chance to finish + rpc_cast = rpc.cast + + def finish_cast(*args, **kwargs): + rpc_cast(*args, **kwargs) + greenthread.sleep(0.2) + + self.stubs.Set(rpc, 'cast', finish_cast) + def tearDown(self): network_ref = db.project_get_network(self.context, self.project.id) @@ -113,7 +115,6 @@ class CloudTestCase(test.TestCase): self.cloud.describe_addresses(self.context) self.cloud.release_address(self.context, public_ip=address) - greenthread.sleep(0.3) db.floating_ip_destroy(self.context, address) def test_associate_disassociate_address(self): @@ -129,12 +130,10 @@ class CloudTestCase(test.TestCase): self.cloud.associate_address(self.context, instance_id=ec2_id, public_ip=address) - greenthread.sleep(0.3) self.cloud.disassociate_address(self.context, public_ip=address) self.cloud.release_address(self.context, public_ip=address) - greenthread.sleep(0.3) self.network.deallocate_fixed_ip(self.context, fixed) db.instance_destroy(self.context, inst['id']) db.floating_ip_destroy(self.context, address) @@ -306,31 +305,26 @@ class CloudTestCase(test.TestCase): 'instance_type': instance_type, 'max_count': max_count} rv = self.cloud.run_instances(self.context, **kwargs) - greenthread.sleep(0.3) instance_id = rv['instancesSet'][0]['instanceId'] output = self.cloud.get_console_output(context=self.context, instance_id=[instance_id]) self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE?OUTPUT') # TODO(soren): We need this until we can stop polling in the rpc code # for unit tests. - greenthread.sleep(0.3) rv = self.cloud.terminate_instances(self.context, [instance_id]) - greenthread.sleep(0.3) def test_ajax_console(self): + kwargs = {'image_id': 'ami-1'} rv = self.cloud.run_instances(self.context, **kwargs) instance_id = rv['instancesSet'][0]['instanceId'] - greenthread.sleep(0.3) output = self.cloud.get_ajax_console(context=self.context, instance_id=[instance_id]) self.assertEquals(output['url'], '%s/?token=FAKETOKEN' % FLAGS.ajax_console_proxy_url) # TODO(soren): We need this until we can stop polling in the rpc code # for unit tests. - greenthread.sleep(0.3) rv = self.cloud.terminate_instances(self.context, [instance_id]) - greenthread.sleep(0.3) def test_key_generation(self): result = self._create_key('test') -- cgit From 8f2557dcd3e3d88c0eabb63bcce90ced79347ae4 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: catch greenlet.GreenletExit when shutting service down --- nova/rpc.py | 2 +- nova/service.py | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index e2e962fcc..02052ecf5 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -24,13 +24,13 @@ No fan-out support yet. """ +import greenlet import json import sys import time import traceback import uuid -import greenlet from carrot import connection as carrot_connection from carrot import messaging import eventlet diff --git a/nova/service.py b/nova/service.py index a0ff7c9f3..c7e48544c 100644 --- a/nova/service.py +++ b/nova/service.py @@ -19,6 +19,7 @@ """Generic Node baseclass for all workers that run on hosts.""" +import greenlet import inspect import os @@ -166,7 +167,10 @@ class Service(object): def kill(self): """Destroy the service object in the datastore.""" self.csetthread.kill() - self.csetthread.wait() + try: + self.csetthread.wait() + except greenlet.GreenletExit: + pass self.stop() try: db.service_destroy(context.get_admin_context(), self.service_id) -- cgit From 5f3adfc3110ed8095cdac43cc651aa46087c5490 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: Always create Service consumers no matter if report_interval is 0 Fix tests to handle how Service loads Consumers now --- nova/service.py | 46 +++++++++++++++++++------------------ nova/tests/test_service.py | 57 +++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 75 insertions(+), 28 deletions(-) diff --git a/nova/service.py b/nova/service.py index c7e48544c..3a364b6c6 100644 --- a/nova/service.py +++ b/nova/service.py @@ -88,29 +88,31 @@ class Service(object): if 'nova-compute' == self.binary: self.manager.update_available_resource(ctxt) - if self.report_interval: - conn = rpc.Connection.instance(new=True) - - # Share this same connection for these Consumers - consumer_all = rpc.TopicAdapterConsumer( - connection=conn, - topic=self.topic, - proxy=self) - consumer_node = rpc.TopicAdapterConsumer( - connection=conn, - topic='%s.%s' % (self.topic, self.host), - proxy=self) - fanout = rpc.FanoutAdapterConsumer( - connection=conn, - topic=self.topic, - proxy=self) - - cset = rpc.ConsumerSet(conn, [consumer_all, - consumer_node, - fanout]) - # Wait forever, processing these consumers - self.csetthread = greenthread.spawn(cset.wait) + conn = rpc.Connection.instance(new=True) + logging.debug("Creating Consumer connection for Service %s" % \ + self.topic) + + # Share this same connection for these Consumers + consumer_all = rpc.TopicAdapterConsumer( + connection=conn, + topic=self.topic, + proxy=self) + consumer_node = rpc.TopicAdapterConsumer( + connection=conn, + topic='%s.%s' % (self.topic, self.host), + proxy=self) + fanout = rpc.FanoutAdapterConsumer( + connection=conn, + topic=self.topic, + proxy=self) + + cset = rpc.ConsumerSet(conn, [consumer_all, + consumer_node, + fanout]) + # Wait forever, processing these consumers + self.csetthread = greenthread.spawn(cset.wait) + if self.report_interval: pulse = utils.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, now=False) self.timers.append(pulse) diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py index d48de2057..0bba01d92 100644 --- a/nova/tests/test_service.py +++ b/nova/tests/test_service.py @@ -106,7 +106,10 @@ class ServiceTestCase(test.TestCase): # NOTE(vish): Create was moved out of mox replay to make sure that # the looping calls are created in StartService. - app = service.Service.create(host=host, binary=binary) + app = service.Service.create(host=host, binary=binary, topic=topic) + + self.mox.StubOutWithMock(service.rpc.Connection, 'instance') + service.rpc.Connection.instance(new=mox.IgnoreArg()) self.mox.StubOutWithMock(rpc, 'TopicAdapterConsumer', @@ -114,6 +117,11 @@ class ServiceTestCase(test.TestCase): self.mox.StubOutWithMock(rpc, 'FanoutAdapterConsumer', use_mock_anything=True) + + self.mox.StubOutWithMock(rpc, + 'ConsumerSet', + use_mock_anything=True) + rpc.TopicAdapterConsumer(connection=mox.IgnoreArg(), topic=topic, proxy=mox.IsA(service.Service)).AndReturn( @@ -129,9 +137,13 @@ class ServiceTestCase(test.TestCase): proxy=mox.IsA(service.Service)).AndReturn( rpc.FanoutAdapterConsumer) - rpc.TopicAdapterConsumer.attach_to_eventlet() - rpc.TopicAdapterConsumer.attach_to_eventlet() - rpc.FanoutAdapterConsumer.attach_to_eventlet() + def wait_func(self, limit=None): + return None + + mock_cset = self.mox.CreateMock(rpc.ConsumerSet, + {'wait': wait_func}) + rpc.ConsumerSet(mox.IgnoreArg(), mox.IsA(list)).AndReturn(mock_cset) + wait_func(mox.IgnoreArg()) service_create = {'host': host, 'binary': binary, @@ -287,8 +299,41 @@ class ServiceTestCase(test.TestCase): # Creating mocks self.mox.StubOutWithMock(service.rpc.Connection, 'instance') service.rpc.Connection.instance(new=mox.IgnoreArg()) - service.rpc.Connection.instance(new=mox.IgnoreArg()) - service.rpc.Connection.instance(new=mox.IgnoreArg()) + + self.mox.StubOutWithMock(rpc, + 'TopicAdapterConsumer', + use_mock_anything=True) + self.mox.StubOutWithMock(rpc, + 'FanoutAdapterConsumer', + use_mock_anything=True) + + self.mox.StubOutWithMock(rpc, + 'ConsumerSet', + use_mock_anything=True) + + rpc.TopicAdapterConsumer(connection=mox.IgnoreArg(), + topic=topic, + proxy=mox.IsA(service.Service)).AndReturn( + rpc.TopicAdapterConsumer) + + rpc.TopicAdapterConsumer(connection=mox.IgnoreArg(), + topic='%s.%s' % (topic, host), + proxy=mox.IsA(service.Service)).AndReturn( + rpc.TopicAdapterConsumer) + + rpc.FanoutAdapterConsumer(connection=mox.IgnoreArg(), + topic=topic, + proxy=mox.IsA(service.Service)).AndReturn( + rpc.FanoutAdapterConsumer) + + def wait_func(self, limit=None): + return None + + mock_cset = self.mox.CreateMock(rpc.ConsumerSet, + {'wait': wait_func}) + rpc.ConsumerSet(mox.IgnoreArg(), mox.IsA(list)).AndReturn(mock_cset) + wait_func(mox.IgnoreArg()) + self.mox.StubOutWithMock(serv.manager.driver, 'update_available_resource') serv.manager.driver.update_available_resource(mox.IgnoreArg(), host) -- cgit From 11d3672ad655c39265e5d2477a30db3a12adc65c Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: Add rpc_conn_pool_size flag for the new connection pool --- nova/rpc.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index 02052ecf5..82869fc46 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -50,7 +50,10 @@ LOG = logging.getLogger('nova.rpc') FLAGS = flags.FLAGS -flags.DEFINE_integer('rpc_thread_pool_size', 1024, 'Size of RPC thread pool') +flags.DEFINE_integer('rpc_thread_pool_size', 1024, + 'Size of RPC thread pool') +flags.DEFINE_integer('rpc_conn_pool_size', 30, + 'Size of RPC connection pool') class Connection(carrot_connection.BrokerConnection): @@ -99,7 +102,7 @@ class Pool(pools.Pool): def create(self): return Connection.instance(new=True) -ConnectionPool = Pool(max_size=20) +ConnectionPool = Pool(max_size=FLAGS.rpc_conn_pool_size) class Consumer(messaging.Consumer): -- cgit From b193b97054f11664a72cd53547f355d1c9044f88 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: connection pool tests and make the pool LIFO --- nova/rpc.py | 8 +++++++- nova/tests/test_rpc.py | 42 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 1 deletion(-) diff --git a/nova/rpc.py b/nova/rpc.py index 82869fc46..3cc0dadd4 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -99,10 +99,16 @@ class Connection(carrot_connection.BrokerConnection): class Pool(pools.Pool): """Class that implements a Pool of Connections""" + # TODO(comstud): Timeout connections not used in a while def create(self): return Connection.instance(new=True) -ConnectionPool = Pool(max_size=FLAGS.rpc_conn_pool_size) +# Create a ConnectionPool to use for RPC calls. We'll order the +# pool as a stack (LIFO), so that we can potentially loop through and +# timeout old unused connections at some point +ConnectionPool = Pool( + max_size=FLAGS.rpc_conn_pool_size, + order_as_stack=True) class Consumer(messaging.Consumer): diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index acab3e758..f64209596 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -120,6 +120,48 @@ class RpcTestCase(test.TestCase): "value": value}}) self.assertEqual(value, result) + def test_connectionpool_single(self): + """Test that ConnectionPool recycles a single connection""" + + conn1 = rpc.ConnectionPool.get() + rpc.ConnectionPool.put(conn1) + conn2 = rpc.ConnectionPool.get() + rpc.ConnectionPool.put(conn2) + self.assertEqual(conn1, conn2) + + def test_connectionpool_double(self): + """Test that ConnectionPool returns 2 separate connections + when called consecutively and the pool returns connections LIFO + """ + + conn1 = rpc.ConnectionPool.get() + conn2 = rpc.ConnectionPool.get() + + self.assertNotEqual(conn1, conn2) + rpc.ConnectionPool.put(conn1) + rpc.ConnectionPool.put(conn2) + + conn3 = rpc.ConnectionPool.get() + conn4 = rpc.ConnectionPool.get() + self.assertEqual(conn2, conn3) + self.assertEqual(conn1, conn4) + + def test_connectionpool_limit(self): + """Test connection pool limit and verify all connections + are unique + """ + + max_size = FLAGS.rpc_conn_pool_size + conns = [] + + for i in xrange(max_size): + conns.append(rpc.ConnectionPool.get()) + + self.assertFalse(rpc.ConnectionPool.free_items) + self.assertEqual(rpc.ConnectionPool.current_size, + rpc.ConnectionPool.max_size) + self.assertEqual(len(set(conns)), max_size) + class TestReceiver(object): """Simple Proxy class so the consumer has methods to call -- cgit From 51e8eeb9b3a23f811bcbf52d9700d94c5c8b15e4 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:24 -0700 Subject: bring back commits lost in merge --- nova/rpc.py | 107 +++++++++++++++++++++++++++++-------------------- nova/tests/test_rpc.py | 19 +++++++++ 2 files changed, 82 insertions(+), 44 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index 3cc0dadd4..d7d7bb014 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -35,6 +35,7 @@ from carrot import connection as carrot_connection from carrot import messaging import eventlet from eventlet import greenpool +from eventlet import greenthread from eventlet import pools from eventlet import queue @@ -140,30 +141,30 @@ class Consumer(messaging.Consumer): FLAGS.rabbit_max_retries) sys.exit(1) - def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): - """Wraps the parent fetch with some logic for failed connection.""" - # TODO(vish): the logic for failed connections and logging should be - # refactored into some sort of connection manager object - try: - if self.failed_connection: - # NOTE(vish): connection is defined in the parent class, we can - # recreate it as long as we create the backend too - # pylint: disable=W0201 - self.connection = Connection.recreate() - self.backend = self.connection.create_backend() - self.declare() - return super(Consumer, self).fetch( - no_ack, auto_ack, enable_callbacks) - if self.failed_connection: - LOG.error(_('Reconnected to queue')) - self.failed_connection = False - # NOTE(vish): This is catching all errors because we really don't - # want exceptions to be logged 10 times a second if some - # persistent failure occurs. - except Exception, e: # pylint: disable=W0703 - if not self.failed_connection: - LOG.exception(_('Failed to fetch message from queue: %s' % e)) - self.failed_connection = True + #def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): + # """Wraps the parent fetch with some logic for failed connection.""" + # # TODO(vish): the logic for failed connections and logging should be + # # refactored into some sort of connection manager object + # try: + # if self.failed_connection: + # # NOTE(vish): connection is defined in the parent class, we can + # # recreate it as long as we create the backend too + # # pylint: disable=W0201 + # self.connection = Connection.recreate() + # self.backend = self.connection.create_backend() + # self.declare() + # return super(Consumer, self).fetch( + # no_ack, auto_ack, enable_callbacks) + # if self.failed_connection: + # LOG.error(_('Reconnected to queue')) + # self.failed_connection = False + # # NOTE(vish): This is catching all errors because we really don't + # # want exceptions to be logged 10 times a second if some + # # persistent failure occurs. + # except Exception, e: # pylint: disable=W0703 + # if not self.failed_connection: + # LOG.exception(_('Failed to fetch message from queue: %s' % e)) + # self.failed_connection = True def attach_to_eventlet(self): """Only needed for unit tests!""" @@ -195,7 +196,7 @@ class AdapterConsumer(Consumer): """ LOG.debug(_('received %s') % message_data) - msg_id = message_data.pop('_msg_id', None) + msg_id = message_data.get('_msg_id', None) ctxt = _unpack_context(message_data) @@ -225,11 +226,14 @@ class AdapterConsumer(Consumer): rval = node_func(context=ctxt, **node_args) if msg_id: # TODO(termie): re-enable when fix the yielding issue - #if hasattr(rval, 'send'): - # logging.error('rval! %s', rval) - # for x in rval: - # msg_reply(msg_id, x, None) - msg_reply(msg_id, rval, None) + if hasattr(rval, 'send'): + logging.error('rval! %s', rval) + for x in rval: + msg_reply(msg_id, x, None) + msg_reply(msg_id, None, None) + else: + msg_reply(msg_id, rval, None) + #msg_reply(msg_id, rval, None) except Exception as e: logging.exception('Exception during message handling') if msg_id: @@ -355,7 +359,7 @@ class DirectConsumer(Consumer): self.routing_key = msg_id self.exchange = msg_id self.auto_delete = True - self.exclusive = True + self.exclusive = False super(DirectConsumer, self).__init__(connection=connection) @@ -387,7 +391,9 @@ def msg_reply(msg_id, reply=None, failure=None): publisher = DirectPublisher(connection=conn, msg_id=msg_id) try: publisher.send({'result': reply, 'failure': failure}) + LOG.error('MSG REPLY SUCCESS') except TypeError: + LOG.error('MSG REPLY FAILURE') publisher.send( {'result': dict((k, repr(v)) for k, v in reply.__dict__.iteritems()), @@ -440,9 +446,9 @@ def _pack_context(msg, context): for args at some point. """ - context = dict([('_context_%s' % key, value) - for (key, value) in context.to_dict().iteritems()]) - msg.update(context) + context_d = dict([('_context_%s' % key, value) + for (key, value) in context.to_dict().iteritems()]) + msg.update(context_d) class RpcContext(context.RequestContext): @@ -463,12 +469,13 @@ def multicall(context, topic, msg): LOG.debug(_('MSG_ID is %s') % (msg_id)) _pack_context(msg, context) - conn = ConnectionPool.get() - consumer = DirectConsumer(connection=conn, msg_id=msg_id) + con_conn = ConnectionPool.get() + consumer = DirectConsumer(connection=con_conn, msg_id=msg_id) wait_msg = MulticallWaiter(consumer) consumer.register_callback(wait_msg) - publisher = TopicPublisher(connection=conn, topic=topic) + pub_conn = ConnectionPool.get() + publisher = TopicPublisher(connection=pub_conn, topic=topic) publisher.send(msg) publisher.close() @@ -484,6 +491,7 @@ class MulticallWaiter(object): def close(self): self._closed = True self._consumer.close() + ConnectionPool.put(self._consumer.connection) def __call__(self, data, message): """Acks message and sets result.""" @@ -501,15 +509,26 @@ class MulticallWaiter(object): # trying to solve the problem quickly. This works but # I'd prefer to dig in and do it the best way later on. - def _waiter(): - while not self._closed: - try: - self._consumer.wait(limit=1) - except StopIteration: - pass - eventlet.spawn(_waiter) + #def _waiter(): + # i = 0 + # while not self._closed: + # LOG.error('Iteration #%s (%s)', i, self._consumer.consumer_tag) + # i += 1 + # try: + # self._consumer.wait(limit=1) + # except StopIteration: + # pass + # self._consumer.close() + # ConnectionPool.put(self._consumer.connection) + #eventlet.spawn(_waiter) while True: + rv = None + while rv is None and not self._closed: + rv = self._consumer.fetch(enable_callbacks=True) + time.sleep(0.01) + + LOG.error('RV %s', rv) result = self._results.get() if isinstance(result, Exception): raise result diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index f64209596..e5d99474d 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -61,6 +61,18 @@ class RpcTestCase(test.TestCase): self.assertEqual(value + i, x) i += 1 + def test_multicall_succeed_three_times_yield(self): + """Get a value through rpc call""" + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo_three_times_yield", + "args": {"value": value}}) + i = 0 + for x in result: + self.assertEqual(value + i, x) + i += 1 + def test_context_passed(self): """Makes sure a context is passed through rpc call""" value = 42 @@ -83,6 +95,7 @@ class RpcTestCase(test.TestCase): 'test', {"method": "fail", "args": {"value": value}}) + LOG.error('INNNNNNN BETTTWWWWWWWWWWEEEEEEEEEEN') try: rpc.call(self.context, 'test', @@ -186,6 +199,12 @@ class TestReceiver(object): context.reply(value + 1) context.reply(value + 2) + @staticmethod + def echo_three_times_yield(context, value): + yield value + yield value + 1 + yield value + 2 + @staticmethod def fail(context, value): """Raises an exception with the value sent in""" -- cgit From 64b13a2aad676d2310947e3bf8b9e3dde6b763e7 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:24 -0700 Subject: almost everything working with fake_rabbit --- nova/rpc.py | 16 +++++++++++++++- nova/service.py | 22 ++++++++++++++++------ nova/test.py | 1 + nova/tests/test_cloud.py | 4 ++-- run_tests.py | 1 + 5 files changed, 35 insertions(+), 9 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index d7d7bb014..e1f594a99 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -102,6 +102,7 @@ class Pool(pools.Pool): # TODO(comstud): Timeout connections not used in a while def create(self): + LOG.debug('Creating new connection') return Connection.instance(new=True) # Create a ConnectionPool to use for RPC calls. We'll order the @@ -166,6 +167,10 @@ class Consumer(messaging.Consumer): # LOG.exception(_('Failed to fetch message from queue: %s' % e)) # self.failed_connection = True + def close(self, *args, **kwargs): + LOG.debug('Closing consumer %s', self.consumer_tag) + return super(Consumer, self).close(*args, **kwargs) + def attach_to_eventlet(self): """Only needed for unit tests!""" timer = utils.LoopingCall(self.fetch, enable_callbacks=True) @@ -317,6 +322,8 @@ class ConsumerSet(object): # Break to outer loop break + def close(self): + self.consumer_set.close() class Publisher(messaging.Publisher): """Publisher base class.""" @@ -525,12 +532,19 @@ class MulticallWaiter(object): while True: rv = None while rv is None and not self._closed: - rv = self._consumer.fetch(enable_callbacks=True) + try: + rv = self._consumer.fetch(enable_callbacks=True) + except Exception: + self.close() + raise + #rv = self._consumer.fetch(enable_callbacks=True) time.sleep(0.01) LOG.error('RV %s', rv) result = self._results.get() + LOG.error('RESULT %s', result) if isinstance(result, Exception): + self.close() raise result if result == None: self.close() diff --git a/nova/service.py b/nova/service.py index 3a364b6c6..2da510140 100644 --- a/nova/service.py +++ b/nova/service.py @@ -88,29 +88,39 @@ class Service(object): if 'nova-compute' == self.binary: self.manager.update_available_resource(ctxt) - conn = rpc.Connection.instance(new=True) + conn1 = rpc.Connection.instance(new=True) + conn2 = rpc.Connection.instance(new=True) + conn3 = rpc.Connection.instance(new=True) logging.debug("Creating Consumer connection for Service %s" % \ self.topic) # Share this same connection for these Consumers consumer_all = rpc.TopicAdapterConsumer( - connection=conn, + connection=conn1, topic=self.topic, proxy=self) consumer_node = rpc.TopicAdapterConsumer( - connection=conn, + connection=conn1, topic='%s.%s' % (self.topic, self.host), proxy=self) fanout = rpc.FanoutAdapterConsumer( - connection=conn, + connection=conn1, topic=self.topic, proxy=self) - cset = rpc.ConsumerSet(conn, [consumer_all, + cset = rpc.ConsumerSet(conn1, [consumer_all, consumer_node, fanout]) # Wait forever, processing these consumers - self.csetthread = greenthread.spawn(cset.wait) + def _wait(): + cset.wait() + cset.close() + + self.csetthread = greenthread.spawn(_wait) + + #self.timers.append(consumer_all.attach_to_eventlet()) + #self.timers.append(consumer_node.attach_to_eventlet()) + #self.timers.append(fanout.attach_to_eventlet()) if self.report_interval: pulse = utils.LoopingCall(self.report_state) diff --git a/nova/test.py b/nova/test.py index 4deb2a175..7b2cf94b6 100644 --- a/nova/test.py +++ b/nova/test.py @@ -85,6 +85,7 @@ class TestCase(unittest.TestCase): self._monkey_patch_attach() self._monkey_patch_wsgi() self._original_flags = FLAGS.FlagValuesDict() + rpc.ConnectionPool = rpc.Pool(max_size=30) def tearDown(self): """Runs after each test method to tear down test environment.""" diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 1e14c327c..a838dd530 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -87,8 +87,8 @@ class CloudTestCase(test.TestCase): db.network_disassociate(self.context, network_ref['id']) self.manager.delete_project(self.project) self.manager.delete_user(self.user) - self.compute.kill() - self.network.kill() + #self.compute.kill() + #self.network.kill() super(CloudTestCase, self).tearDown() def _create_key(self, name): diff --git a/run_tests.py b/run_tests.py index d5d8acd16..509a60caa 100644 --- a/run_tests.py +++ b/run_tests.py @@ -285,6 +285,7 @@ if __name__ == '__main__': # If any argument looks like a test name but doesn't have "nova.tests" in # front of it, automatically add that so we don't have to type as much argv = [] + logging.getLogger('amqplib').setLevel(logging.DEBUG) for x in sys.argv: if x.startswith('test_'): argv.append('nova.tests.%s' % x) -- cgit From e3a88390fd62308cde3d4c597d653c8dc245bed4 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:24 -0700 Subject: don't need to use a separate connection --- nova/rpc.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index e1f594a99..a212383fd 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -481,8 +481,7 @@ def multicall(context, topic, msg): wait_msg = MulticallWaiter(consumer) consumer.register_callback(wait_msg) - pub_conn = ConnectionPool.get() - publisher = TopicPublisher(connection=pub_conn, topic=topic) + publisher = TopicPublisher(connection=con_conn, topic=topic) publisher.send(msg) publisher.close() -- cgit From c9b21b0619891c069251c568e4d89be791af56c3 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 25 May 2011 15:42:24 -0700 Subject: lots of fixes for rpc and extra imports --- nova/fakerabbit.py | 12 +++-- nova/rpc.py | 68 +++++++++++------------------ nova/service.py | 9 ++-- nova/test.py | 8 ++-- nova/tests/integrated/integrated_helpers.py | 5 +-- 5 files changed, 45 insertions(+), 57 deletions(-) diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index 5f3e75c48..ff993e29a 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -31,6 +31,7 @@ LOG = logging.getLogger("nova.fakerabbit") EXCHANGES = {} QUEUES = {} +CONSUMERS = {} class Message(base.BaseMessage): @@ -101,17 +102,20 @@ class Backend(base.BaseBackend): EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key) def declare_consumer(self, queue, callback, consumer_tag, *args, **kwargs): + global CONSUMERS LOG.debug("Adding consumer %s", consumer_tag) - self.consumers[consumer_tag] = (queue, callback) + CONSUMERS[consumer_tag] = (queue, callback) def cancel(self, consumer_tag): + global CONSUMERS LOG.debug("Removing consumer %s", consumer_tag) - del self.consumers[consumer_tag] + del CONSUMERS[consumer_tag] def consume(self, limit=None): + global CONSUMERS num = 0 while True: - for (queue, callback) in self.consumers.itervalues(): + for (queue, callback) in CONSUMERS.itervalues(): item = self.get(queue) if item: callback(item) @@ -147,5 +151,7 @@ class Backend(base.BaseBackend): def reset_all(): global EXCHANGES global QUEUES + global CONSUMERS EXCHANGES = {} QUEUES = {} + CONSUMERS = {} diff --git a/nova/rpc.py b/nova/rpc.py index a212383fd..7faed4d3a 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -33,9 +33,7 @@ import uuid from carrot import connection as carrot_connection from carrot import messaging -import eventlet from eventlet import greenpool -from eventlet import greenthread from eventlet import pools from eventlet import queue @@ -142,30 +140,30 @@ class Consumer(messaging.Consumer): FLAGS.rabbit_max_retries) sys.exit(1) - #def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): - # """Wraps the parent fetch with some logic for failed connection.""" - # # TODO(vish): the logic for failed connections and logging should be - # # refactored into some sort of connection manager object - # try: - # if self.failed_connection: - # # NOTE(vish): connection is defined in the parent class, we can - # # recreate it as long as we create the backend too - # # pylint: disable=W0201 - # self.connection = Connection.recreate() - # self.backend = self.connection.create_backend() - # self.declare() - # return super(Consumer, self).fetch( - # no_ack, auto_ack, enable_callbacks) - # if self.failed_connection: - # LOG.error(_('Reconnected to queue')) - # self.failed_connection = False - # # NOTE(vish): This is catching all errors because we really don't - # # want exceptions to be logged 10 times a second if some - # # persistent failure occurs. - # except Exception, e: # pylint: disable=W0703 - # if not self.failed_connection: - # LOG.exception(_('Failed to fetch message from queue: %s' % e)) - # self.failed_connection = True + def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): + """Wraps the parent fetch with some logic for failed connection.""" + # TODO(vish): the logic for failed connections and logging should be + # refactored into some sort of connection manager object + try: + if self.failed_connection: + # NOTE(vish): connection is defined in the parent class, we can + # recreate it as long as we create the backend too + # pylint: disable=W0201 + self.connection = Connection.recreate() + self.backend = self.connection.create_backend() + self.declare() + return super(Consumer, self).fetch( + no_ack, auto_ack, enable_callbacks) + if self.failed_connection: + LOG.error(_('Reconnected to queue')) + self.failed_connection = False + # NOTE(vish): This is catching all errors because we really don't + # want exceptions to be logged 10 times a second if some + # persistent failure occurs. + except Exception, e: # pylint: disable=W0703 + if not self.failed_connection: + LOG.exception(_('Failed to fetch message from queue: %s' % e)) + self.failed_connection = True def close(self, *args, **kwargs): LOG.debug('Closing consumer %s', self.consumer_tag) @@ -325,6 +323,7 @@ class ConsumerSet(object): def close(self): self.consumer_set.close() + class Publisher(messaging.Publisher): """Publisher base class.""" pass @@ -511,23 +510,6 @@ class MulticallWaiter(object): return self.wait() def wait(self): - # TODO(termie): This is probably really a much simpler issue but am - # trying to solve the problem quickly. This works but - # I'd prefer to dig in and do it the best way later on. - - #def _waiter(): - # i = 0 - # while not self._closed: - # LOG.error('Iteration #%s (%s)', i, self._consumer.consumer_tag) - # i += 1 - # try: - # self._consumer.wait(limit=1) - # except StopIteration: - # pass - # self._consumer.close() - # ConnectionPool.put(self._consumer.connection) - #eventlet.spawn(_waiter) - while True: rv = None while rv is None and not self._closed: diff --git a/nova/service.py b/nova/service.py index 2da510140..2626c49ae 100644 --- a/nova/service.py +++ b/nova/service.py @@ -89,8 +89,6 @@ class Service(object): self.manager.update_available_resource(ctxt) conn1 = rpc.Connection.instance(new=True) - conn2 = rpc.Connection.instance(new=True) - conn3 = rpc.Connection.instance(new=True) logging.debug("Creating Consumer connection for Service %s" % \ self.topic) @@ -111,10 +109,13 @@ class Service(object): cset = rpc.ConsumerSet(conn1, [consumer_all, consumer_node, fanout]) + # Wait forever, processing these consumers def _wait(): - cset.wait() - cset.close() + try: + cset.wait() + finally: + cset.close() self.csetthread = greenthread.spawn(_wait) diff --git a/nova/test.py b/nova/test.py index 7b2cf94b6..df48afbb1 100644 --- a/nova/test.py +++ b/nova/test.py @@ -31,17 +31,15 @@ import uuid import unittest import mox -import shutil import stubout from eventlet import greenthread -from nova import context -from nova import db from nova import fakerabbit from nova import flags from nova import rpc from nova import service from nova import wsgi +from nova.virt import fake FLAGS = flags.FLAGS @@ -100,6 +98,10 @@ class TestCase(unittest.TestCase): if FLAGS.fake_rabbit: fakerabbit.reset_all() + if FLAGS.connection_type == 'fake': + if hasattr(fake.FakeConnection, '_instance'): + del fake.FakeConnection._instance + # Reset any overriden flags self.reset_flags() diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py index bc98921f0..7f590441e 100644 --- a/nova/tests/integrated/integrated_helpers.py +++ b/nova/tests/integrated/integrated_helpers.py @@ -154,10 +154,7 @@ class _IntegratedTestBase(test.TestCase): # set up services self.start_service('compute') self.start_service('volume') - # NOTE(justinsb): There's a bug here which is eluding me... - # If we start the network_service, all is good, but then subsequent - # tests fail: CloudTestCase.test_ajax_console in particular. - #self.start_service('network') + self.start_service('network') self.start_service('scheduler') self._start_api_service() -- cgit From 9334d41c6fe638a3119327702094695cfbd38271 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:25 -0700 Subject: make sure that using multicall on a call with a single result still functions --- nova/rpc.py | 4 ++-- nova/tests/test_rpc.py | 29 +++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 2 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index 7faed4d3a..84493271f 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -233,10 +233,10 @@ class AdapterConsumer(Consumer): logging.error('rval! %s', rval) for x in rval: msg_reply(msg_id, x, None) - msg_reply(msg_id, None, None) else: msg_reply(msg_id, rval, None) - #msg_reply(msg_id, rval, None) + # This final None tells multicall that it is done. + msg_reply(msg_id, None, None) except Exception as e: logging.exception('Exception during message handling') if msg_id: diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index e5d99474d..c1ef60ff6 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -49,6 +49,35 @@ class RpcTestCase(test.TestCase): "args": {"value": value}}) self.assertEqual(value, result) + def test_call_succeed_despite_multiple_returns(self): + """Get a value through rpc call""" + value = 42 + result = rpc.call(self.context, 'test', {"method": "echo_three_times", + "args": {"value": value}}) + self.assertEqual(value, result) + + def test_call_succeed_despite_multiple_returns_yield(self): + """Get a value through rpc call""" + value = 42 + result = rpc.call(self.context, 'test', + {"method": "echo_three_times_yield", + "args": {"value": value}}) + self.assertEqual(value, result) + + def test_multicall_succeed_once(self): + """Get a value through rpc call""" + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo", + "args": {"value": value}}) + i = 0 + for x in result: + if i > 0: + self.fail('should only receive one response') + self.assertEqual(value + i, x) + i += 1 + def test_multicall_succeed_three_times(self): """Get a value through rpc call""" value = 42 -- cgit From c7fe7e5e28b9f4bb999c8309f56953f6609cbc57 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:49 -0700 Subject: cleanup the code for merging --- nova/fakerabbit.py | 4 --- nova/rpc.py | 78 ++++++++++++++++++++++-------------------------- nova/service.py | 23 +++++++------- nova/test.py | 2 +- nova/tests/test_cloud.py | 3 -- nova/tests/test_rpc.py | 1 - run_tests.py | 1 - 7 files changed, 46 insertions(+), 66 deletions(-) diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index ff993e29a..e7e9dab77 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -78,10 +78,6 @@ class Queue(object): class Backend(base.BaseBackend): - def __init__(self, connection, **kwargs): - super(Backend, self).__init__(connection, **kwargs) - self.consumers = {} - def queue_declare(self, queue, **kwargs): global QUEUES if queue not in QUEUES: diff --git a/nova/rpc.py b/nova/rpc.py index 84493271f..8d14494f0 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -24,7 +24,6 @@ No fan-out support yet. """ -import greenlet import json import sys import time @@ -36,6 +35,7 @@ from carrot import messaging from eventlet import greenpool from eventlet import pools from eventlet import queue +import greenlet from nova import context from nova import exception @@ -50,9 +50,9 @@ LOG = logging.getLogger('nova.rpc') FLAGS = flags.FLAGS flags.DEFINE_integer('rpc_thread_pool_size', 1024, - 'Size of RPC thread pool') + 'Size of RPC thread pool') flags.DEFINE_integer('rpc_conn_pool_size', 30, - 'Size of RPC connection pool') + 'Size of RPC connection pool') class Connection(carrot_connection.BrokerConnection): @@ -96,7 +96,7 @@ class Connection(carrot_connection.BrokerConnection): class Pool(pools.Pool): - """Class that implements a Pool of Connections""" + """Class that implements a Pool of Connections.""" # TODO(comstud): Timeout connections not used in a while def create(self): @@ -152,8 +152,9 @@ class Consumer(messaging.Consumer): self.connection = Connection.recreate() self.backend = self.connection.create_backend() self.declare() - return super(Consumer, self).fetch( - no_ack, auto_ack, enable_callbacks) + return super(Consumer, self).fetch(no_ack, + auto_ack, + enable_callbacks) if self.failed_connection: LOG.error(_('Reconnected to queue')) self.failed_connection = False @@ -165,10 +166,6 @@ class Consumer(messaging.Consumer): LOG.exception(_('Failed to fetch message from queue: %s' % e)) self.failed_connection = True - def close(self, *args, **kwargs): - LOG.debug('Closing consumer %s', self.consumer_tag) - return super(Consumer, self).close(*args, **kwargs) - def attach_to_eventlet(self): """Only needed for unit tests!""" timer = utils.LoopingCall(self.fetch, enable_callbacks=True) @@ -188,8 +185,10 @@ class AdapterConsumer(Consumer): self.register_callback(self.process_data) def process_data(self, message_data, message): - """Consumer callback that parses the message for validity and - fires off a thread to call the proxy object method. + """Consumer callback to call a method on a proxy object. + + Parses the message for validity and fires off a thread to call the + proxy object method. Message data should be a dictionary with two keys: method: string representing the method to call @@ -199,8 +198,8 @@ class AdapterConsumer(Consumer): """ LOG.debug(_('received %s') % message_data) + # This will be popped off in _unpack_context msg_id = message_data.get('_msg_id', None) - ctxt = _unpack_context(message_data) method = message_data.get('method') @@ -228,13 +227,13 @@ class AdapterConsumer(Consumer): try: rval = node_func(context=ctxt, **node_args) if msg_id: - # TODO(termie): re-enable when fix the yielding issue + # Check if the result was a generator if hasattr(rval, 'send'): - logging.error('rval! %s', rval) for x in rval: msg_reply(msg_id, x, None) else: msg_reply(msg_id, rval, None) + # This final None tells multicall that it is done. msg_reply(msg_id, None, None) except Exception as e: @@ -277,7 +276,7 @@ class FanoutAdapterConsumer(AdapterConsumer): class ConsumerSet(object): - """Groups consumers to listen on together on a single connection""" + """Groups consumers to listen on together on a single connection.""" def __init__(self, conn, consumer_list): self.consumer_list = set(consumer_list) @@ -365,7 +364,7 @@ class DirectConsumer(Consumer): self.routing_key = msg_id self.exchange = msg_id self.auto_delete = True - self.exclusive = False + self.exclusive = True super(DirectConsumer, self).__init__(connection=connection) @@ -393,20 +392,18 @@ def msg_reply(msg_id, reply=None, failure=None): LOG.error(_("Returning exception %s to caller"), message) LOG.error(tb) failure = (failure[0].__name__, str(failure[1]), tb) - conn = ConnectionPool.get() - publisher = DirectPublisher(connection=conn, msg_id=msg_id) - try: - publisher.send({'result': reply, 'failure': failure}) - LOG.error('MSG REPLY SUCCESS') - except TypeError: - LOG.error('MSG REPLY FAILURE') - publisher.send( - {'result': dict((k, repr(v)) - for k, v in reply.__dict__.iteritems()), - 'failure': failure}) - publisher.close() - ConnectionPool.put(conn) + with ConnectionPool.item() as conn: + publisher = DirectPublisher(connection=conn, msg_id=msg_id) + try: + publisher.send({'result': reply, 'failure': failure}) + except TypeError: + publisher.send( + {'result': dict((k, repr(v)) + for k, v in reply.__dict__.iteritems()), + 'failure': failure}) + + publisher.close() class RemoteError(exception.Error): @@ -518,12 +515,9 @@ class MulticallWaiter(object): except Exception: self.close() raise - #rv = self._consumer.fetch(enable_callbacks=True) time.sleep(0.01) - LOG.error('RV %s', rv) result = self._results.get() - LOG.error('RESULT %s', result) if isinstance(result, Exception): self.close() raise result @@ -545,22 +539,20 @@ def cast(context, topic, msg): """Sends a message on a topic without waiting for a response.""" LOG.debug(_('Making asynchronous cast on %s...'), topic) _pack_context(msg, context) - conn = ConnectionPool.get() - publisher = TopicPublisher(connection=conn, topic=topic) - publisher.send(msg) - publisher.close() - ConnectionPool.put(conn) + with ConnectionPool.item() as conn: + publisher = TopicPublisher(connection=conn, topic=topic) + publisher.send(msg) + publisher.close() def fanout_cast(context, topic, msg): """Sends a message on a fanout exchange without waiting for a response.""" LOG.debug(_('Making asynchronous fanout cast...')) _pack_context(msg, context) - conn = ConnectionPool.get() - publisher = FanoutPublisher(topic, connection=conn) - publisher.send(msg) - publisher.close() - ConnectionPool.put(conn) + with ConnectionPool.item() as conn: + publisher = FanoutPublisher(topic, connection=conn) + publisher.send(msg) + publisher.close() def generic_response(message_data, message): diff --git a/nova/service.py b/nova/service.py index 2626c49ae..94afd5f78 100644 --- a/nova/service.py +++ b/nova/service.py @@ -88,27 +88,27 @@ class Service(object): if 'nova-compute' == self.binary: self.manager.update_available_resource(ctxt) - conn1 = rpc.Connection.instance(new=True) - logging.debug("Creating Consumer connection for Service %s" % \ - self.topic) + self.conn = rpc.Connection.instance(new=True) + logging.debug("Creating Consumer connection for Service %s" % + self.topic) # Share this same connection for these Consumers consumer_all = rpc.TopicAdapterConsumer( - connection=conn1, + connection=self.conn, topic=self.topic, proxy=self) consumer_node = rpc.TopicAdapterConsumer( - connection=conn1, + connection=self.conn, topic='%s.%s' % (self.topic, self.host), proxy=self) fanout = rpc.FanoutAdapterConsumer( - connection=conn1, + connection=self.conn, topic=self.topic, proxy=self) - cset = rpc.ConsumerSet(conn1, [consumer_all, - consumer_node, - fanout]) + cset = rpc.ConsumerSet(self.conn, [consumer_all, + consumer_node, + fanout]) # Wait forever, processing these consumers def _wait(): @@ -119,10 +119,6 @@ class Service(object): self.csetthread = greenthread.spawn(_wait) - #self.timers.append(consumer_all.attach_to_eventlet()) - #self.timers.append(consumer_node.attach_to_eventlet()) - #self.timers.append(fanout.attach_to_eventlet()) - if self.report_interval: pulse = utils.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, now=False) @@ -185,6 +181,7 @@ class Service(object): except greenlet.GreenletExit: pass self.stop() + rpc.ConnectionPool.put(self.conn) try: db.service_destroy(context.get_admin_context(), self.service_id) except exception.NotFound: diff --git a/nova/test.py b/nova/test.py index df48afbb1..80b2d0a74 100644 --- a/nova/test.py +++ b/nova/test.py @@ -83,7 +83,7 @@ class TestCase(unittest.TestCase): self._monkey_patch_attach() self._monkey_patch_wsgi() self._original_flags = FLAGS.FlagValuesDict() - rpc.ConnectionPool = rpc.Pool(max_size=30) + rpc.ConnectionPool = rpc.Pool(max_size=FLAGS.rpc_conn_pool_size) def tearDown(self): """Runs after each test method to tear down test environment.""" diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index a838dd530..ca3ef7ffe 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -87,8 +87,6 @@ class CloudTestCase(test.TestCase): db.network_disassociate(self.context, network_ref['id']) self.manager.delete_project(self.project) self.manager.delete_user(self.user) - #self.compute.kill() - #self.network.kill() super(CloudTestCase, self).tearDown() def _create_key(self, name): @@ -314,7 +312,6 @@ class CloudTestCase(test.TestCase): rv = self.cloud.terminate_instances(self.context, [instance_id]) def test_ajax_console(self): - kwargs = {'image_id': 'ami-1'} rv = self.cloud.run_instances(self.context, **kwargs) instance_id = rv['instancesSet'][0]['instanceId'] diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index c1ef60ff6..fcecfb352 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -124,7 +124,6 @@ class RpcTestCase(test.TestCase): 'test', {"method": "fail", "args": {"value": value}}) - LOG.error('INNNNNNN BETTTWWWWWWWWWWEEEEEEEEEEN') try: rpc.call(self.context, 'test', diff --git a/run_tests.py b/run_tests.py index 509a60caa..d5d8acd16 100644 --- a/run_tests.py +++ b/run_tests.py @@ -285,7 +285,6 @@ if __name__ == '__main__': # If any argument looks like a test name but doesn't have "nova.tests" in # front of it, automatically add that so we don't have to type as much argv = [] - logging.getLogger('amqplib').setLevel(logging.DEBUG) for x in sys.argv: if x.startswith('test_'): argv.append('nova.tests.%s' % x) -- cgit From 7755bbfc7b16248dab23bfab479d09501519290f Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:43:04 -0700 Subject: cleanups --- nova/tests/test_rpc.py | 47 +++++++++++++++++++++-------------------------- 1 file changed, 21 insertions(+), 26 deletions(-) diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index fcecfb352..8523b409c 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -31,7 +31,6 @@ LOG = logging.getLogger('nova.tests.rpc') class RpcTestCase(test.TestCase): - """Test cases for rpc""" def setUp(self): super(RpcTestCase, self).setUp() self.conn = rpc.Connection.instance(True) @@ -43,21 +42,18 @@ class RpcTestCase(test.TestCase): self.context = context.get_admin_context() def test_call_succeed(self): - """Get a value through rpc call""" value = 42 result = rpc.call(self.context, 'test', {"method": "echo", "args": {"value": value}}) self.assertEqual(value, result) def test_call_succeed_despite_multiple_returns(self): - """Get a value through rpc call""" value = 42 result = rpc.call(self.context, 'test', {"method": "echo_three_times", "args": {"value": value}}) self.assertEqual(value, result) def test_call_succeed_despite_multiple_returns_yield(self): - """Get a value through rpc call""" value = 42 result = rpc.call(self.context, 'test', {"method": "echo_three_times_yield", @@ -65,7 +61,6 @@ class RpcTestCase(test.TestCase): self.assertEqual(value, result) def test_multicall_succeed_once(self): - """Get a value through rpc call""" value = 42 result = rpc.multicall(self.context, 'test', @@ -79,7 +74,6 @@ class RpcTestCase(test.TestCase): i += 1 def test_multicall_succeed_three_times(self): - """Get a value through rpc call""" value = 42 result = rpc.multicall(self.context, 'test', @@ -91,7 +85,6 @@ class RpcTestCase(test.TestCase): i += 1 def test_multicall_succeed_three_times_yield(self): - """Get a value through rpc call""" value = 42 result = rpc.multicall(self.context, 'test', @@ -103,7 +96,7 @@ class RpcTestCase(test.TestCase): i += 1 def test_context_passed(self): - """Makes sure a context is passed through rpc call""" + """Makes sure a context is passed through rpc call.""" value = 42 result = rpc.call(self.context, 'test', {"method": "context", @@ -111,11 +104,12 @@ class RpcTestCase(test.TestCase): self.assertEqual(self.context.to_dict(), result) def test_call_exception(self): - """Test that exception gets passed back properly + """Test that exception gets passed back properly. rpc.call returns a RemoteError object. The value of the exception is converted to a string, so we convert it back to an int in the test. + """ value = 42 self.assertRaises(rpc.RemoteError, @@ -134,7 +128,7 @@ class RpcTestCase(test.TestCase): self.assertEqual(int(exc.value), value) def test_nested_calls(self): - """Test that we can do an rpc.call inside another call""" + """Test that we can do an rpc.call inside another call.""" class Nested(object): @staticmethod def echo(context, queue, value): @@ -162,8 +156,7 @@ class RpcTestCase(test.TestCase): self.assertEqual(value, result) def test_connectionpool_single(self): - """Test that ConnectionPool recycles a single connection""" - + """Test that ConnectionPool recycles a single connection.""" conn1 = rpc.ConnectionPool.get() rpc.ConnectionPool.put(conn1) conn2 = rpc.ConnectionPool.get() @@ -171,10 +164,13 @@ class RpcTestCase(test.TestCase): self.assertEqual(conn1, conn2) def test_connectionpool_double(self): - """Test that ConnectionPool returns 2 separate connections - when called consecutively and the pool returns connections LIFO - """ + """Test that ConnectionPool returns and reuses separate connections. + + When called consecutively we should get separate connections and upon + returning them those connections should be reused for future calls + before generating a new connection. + """ conn1 = rpc.ConnectionPool.get() conn2 = rpc.ConnectionPool.get() @@ -184,14 +180,11 @@ class RpcTestCase(test.TestCase): conn3 = rpc.ConnectionPool.get() conn4 = rpc.ConnectionPool.get() - self.assertEqual(conn2, conn3) - self.assertEqual(conn1, conn4) + self.assertEqual(conn1, conn3) + self.assertEqual(conn2, conn4) def test_connectionpool_limit(self): - """Test connection pool limit and verify all connections - are unique - """ - + """Test connection pool limit and connection uniqueness.""" max_size = FLAGS.rpc_conn_pool_size conns = [] @@ -205,19 +198,21 @@ class RpcTestCase(test.TestCase): class TestReceiver(object): - """Simple Proxy class so the consumer has methods to call + """Simple Proxy class so the consumer has methods to call. + + Uses static methods because we aren't actually storing any state. - Uses static methods because we aren't actually storing any state""" + """ @staticmethod def echo(context, value): - """Simply returns whatever value is sent in""" + """Simply returns whatever value is sent in.""" LOG.debug(_("Received %s"), value) return value @staticmethod def context(context, value): - """Returns dictionary version of context""" + """Returns dictionary version of context.""" LOG.debug(_("Received %s"), context) return context.to_dict() @@ -235,5 +230,5 @@ class TestReceiver(object): @staticmethod def fail(context, value): - """Raises an exception with the value sent in""" + """Raises an exception with the value sent in.""" raise Exception(value) -- cgit From f56df190ee888ae731740e7e949fb6f0c012d687 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:43:04 -0700 Subject: replace removed import --- nova/tests/test_cloud.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index ca3ef7ffe..b64be662e 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -19,6 +19,7 @@ from base64 import b64decode from M2Crypto import BIO from M2Crypto import RSA +import os from eventlet import greenthread -- cgit From b3506a471bbce063d72aead211f45d693bda7853 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:43:04 -0700 Subject: don't put connection back in pool --- nova/service.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/service.py b/nova/service.py index 94afd5f78..141fd4253 100644 --- a/nova/service.py +++ b/nova/service.py @@ -181,7 +181,6 @@ class Service(object): except greenlet.GreenletExit: pass self.stop() - rpc.ConnectionPool.put(self.conn) try: db.service_destroy(context.get_admin_context(), self.service_id) except exception.NotFound: -- cgit From a05e8e7587e42633e8459fd050eee3a4da247330 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:43:04 -0700 Subject: move consumerset killing into stop --- nova/service.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nova/service.py b/nova/service.py index 141fd4253..782183322 100644 --- a/nova/service.py +++ b/nova/service.py @@ -175,11 +175,6 @@ class Service(object): def kill(self): """Destroy the service object in the datastore.""" - self.csetthread.kill() - try: - self.csetthread.wait() - except greenlet.GreenletExit: - pass self.stop() try: db.service_destroy(context.get_admin_context(), self.service_id) @@ -187,6 +182,11 @@ class Service(object): logging.warn(_('Service killed that has no database entry')) def stop(self): + self.csetthread.kill() + try: + self.csetthread.wait() + except greenlet.GreenletExit: + pass for x in self.timers: try: x.stop() -- cgit From feb04f0117450bcd6e8f4966f4487575073be41c Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:43:04 -0700 Subject: change the behavior of calling a multicall --- nova/rpc.py | 8 +++++--- nova/tests/test_rpc.py | 4 ++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index 8d14494f0..493978e57 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -236,6 +236,9 @@ class AdapterConsumer(Consumer): # This final None tells multicall that it is done. msg_reply(msg_id, None, None) + elif hasattr(rval, 'send'): + # NOTE(vish): this iterates through the generator + list(rval) except Exception as e: logging.exception('Exception during message handling') if msg_id: @@ -530,9 +533,8 @@ class MulticallWaiter(object): def call(context, topic, msg): """Sends a message on a topic and wait for a response.""" rv = multicall(context, topic, msg) - for x in rv: - rv.close() - return x + # NOTE(vish): return the last result from the multicall + return list(rv)[-1] def cast(context, topic, msg): diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index 8523b409c..35f4a64d9 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -51,14 +51,14 @@ class RpcTestCase(test.TestCase): value = 42 result = rpc.call(self.context, 'test', {"method": "echo_three_times", "args": {"value": value}}) - self.assertEqual(value, result) + self.assertEqual(value + 2, result) def test_call_succeed_despite_multiple_returns_yield(self): value = 42 result = rpc.call(self.context, 'test', {"method": "echo_three_times_yield", "args": {"value": value}}) - self.assertEqual(value, result) + self.assertEqual(value + 2, result) def test_multicall_succeed_once(self): value = 42 -- cgit From d7e0b45a9bc415e87beee32f10c8d6bdff9819ed Mon Sep 17 00:00:00 2001 From: termie Date: Thu, 26 May 2011 15:08:53 -0700 Subject: changes per review --- nova/rpc.py | 17 ++++++++++------- nova/service.py | 17 ++++++++--------- nova/tests/test_rpc.py | 12 +++--------- nova/tests/test_service.py | 6 ++++-- 4 files changed, 25 insertions(+), 27 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index 493978e57..1ec495bc8 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -28,6 +28,7 @@ import json import sys import time import traceback +import types import uuid from carrot import connection as carrot_connection @@ -228,7 +229,7 @@ class AdapterConsumer(Consumer): rval = node_func(context=ctxt, **node_args) if msg_id: # Check if the result was a generator - if hasattr(rval, 'send'): + if isinstance(rval, types.GeneratorType): for x in rval: msg_reply(msg_id, x, None) else: @@ -236,7 +237,7 @@ class AdapterConsumer(Consumer): # This final None tells multicall that it is done. msg_reply(msg_id, None, None) - elif hasattr(rval, 'send'): + elif isinstance(rval, types.GeneratorType): # NOTE(vish): this iterates through the generator list(rval) except Exception as e: @@ -281,11 +282,11 @@ class FanoutAdapterConsumer(AdapterConsumer): class ConsumerSet(object): """Groups consumers to listen on together on a single connection.""" - def __init__(self, conn, consumer_list): + def __init__(self, connection, consumer_list): self.consumer_list = set(consumer_list) self.consumer_set = None self.enabled = True - self.init(conn) + self.init(connection) def init(self, conn): if not conn: @@ -316,8 +317,7 @@ class ConsumerSet(object): running = False break except Exception as e: - LOG.error(_("Received exception %s " % type(e) + \ - "while processing consumer")) + LOG.exception(_("Exception while processing consumer")) self.reconnect() # Break to outer loop break @@ -534,7 +534,10 @@ def call(context, topic, msg): """Sends a message on a topic and wait for a response.""" rv = multicall(context, topic, msg) # NOTE(vish): return the last result from the multicall - return list(rv)[-1] + rv = list(rv) + if not rv: + return + return rv[-1] def cast(context, topic, msg): diff --git a/nova/service.py b/nova/service.py index 782183322..74f9f04d8 100644 --- a/nova/service.py +++ b/nova/service.py @@ -105,19 +105,18 @@ class Service(object): connection=self.conn, topic=self.topic, proxy=self) - - cset = rpc.ConsumerSet(self.conn, [consumer_all, - consumer_node, - fanout]) + consumer_set = rpc.ConsumerSet( + connection=self.conn, + consumer_list=[consumer_all, consumer_node, fanout]) # Wait forever, processing these consumers def _wait(): try: - cset.wait() + consumer_set.wait() finally: - cset.close() + consumer_set.close() - self.csetthread = greenthread.spawn(_wait) + self.consumer_set_thread = greenthread.spawn(_wait) if self.report_interval: pulse = utils.LoopingCall(self.report_state) @@ -182,9 +181,9 @@ class Service(object): logging.warn(_('Service killed that has no database entry')) def stop(self): - self.csetthread.kill() + self.consumer_set_thread.kill() try: - self.csetthread.wait() + self.consumer_set_thread.wait() except greenlet.GreenletExit: pass for x in self.timers: diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index 35f4a64d9..ffd748efe 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -66,12 +66,10 @@ class RpcTestCase(test.TestCase): 'test', {"method": "echo", "args": {"value": value}}) - i = 0 - for x in result: + for i, x in enumerate(result): if i > 0: self.fail('should only receive one response') self.assertEqual(value + i, x) - i += 1 def test_multicall_succeed_three_times(self): value = 42 @@ -79,10 +77,8 @@ class RpcTestCase(test.TestCase): 'test', {"method": "echo_three_times", "args": {"value": value}}) - i = 0 - for x in result: + for i, x in enumerate(result): self.assertEqual(value + i, x) - i += 1 def test_multicall_succeed_three_times_yield(self): value = 42 @@ -90,10 +86,8 @@ class RpcTestCase(test.TestCase): 'test', {"method": "echo_three_times_yield", "args": {"value": value}}) - i = 0 - for x in result: + for i, x in enumerate(result): self.assertEqual(value + i, x) - i += 1 def test_context_passed(self): """Makes sure a context is passed through rpc call.""" diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py index 0bba01d92..d1cc8bd61 100644 --- a/nova/tests/test_service.py +++ b/nova/tests/test_service.py @@ -142,7 +142,8 @@ class ServiceTestCase(test.TestCase): mock_cset = self.mox.CreateMock(rpc.ConsumerSet, {'wait': wait_func}) - rpc.ConsumerSet(mox.IgnoreArg(), mox.IsA(list)).AndReturn(mock_cset) + rpc.ConsumerSet(connection=mox.IgnoreArg(), + consumer_list=mox.IsA(list)).AndReturn(mock_cset) wait_func(mox.IgnoreArg()) service_create = {'host': host, @@ -331,7 +332,8 @@ class ServiceTestCase(test.TestCase): mock_cset = self.mox.CreateMock(rpc.ConsumerSet, {'wait': wait_func}) - rpc.ConsumerSet(mox.IgnoreArg(), mox.IsA(list)).AndReturn(mock_cset) + rpc.ConsumerSet(connection=mox.IgnoreArg(), + consumer_list=mox.IsA(list)).AndReturn(mock_cset) wait_func(mox.IgnoreArg()) self.mox.StubOutWithMock(serv.manager.driver, -- cgit From 103bcae9f172dfee64e7b9235807bcfe1a8aefb3 Mon Sep 17 00:00:00 2001 From: termie Date: Thu, 26 May 2011 17:06:52 -0700 Subject: fix a minor bug unrelated to this change --- nova/rpc.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nova/rpc.py b/nova/rpc.py index 1ec495bc8..c5277c6a9 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -212,7 +212,9 @@ class AdapterConsumer(Consumer): # we just log the message and send an error string # back to the caller LOG.warn(_('no method for message: %s') % message_data) - msg_reply(msg_id, _('No method for message: %s') % message_data) + if msg_id: + msg_reply(msg_id, + _('No method for message: %s') % message_data) return self.pool.spawn_n(self._process_data, msg_id, ctxt, method, args) -- cgit From 6b0ed0cb61838d01b15df26fc32df0de90f1cfbe Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Fri, 27 May 2011 13:20:45 +0900 Subject: Fix a description of 'snapshot_name_template'. --- nova/db/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/db/api.py b/nova/db/api.py index 3597732b9..e85ce9f16 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -48,7 +48,7 @@ flags.DEFINE_string('instance_name_template', 'instance-%08x', flags.DEFINE_string('volume_name_template', 'volume-%08x', 'Template string to be used to generate instance names') flags.DEFINE_string('snapshot_name_template', 'snapshot-%08x', - 'Template string to be used to generate instance names') + 'Template string to be used to generate snapshot names') IMPL = utils.LazyPluggable(FLAGS['db_backend'], -- cgit From 8b4c91b9f2c28e4809659f199affddbd66482dbb Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Fri, 27 May 2011 13:36:59 +0900 Subject: Fix pep8 violations. --- nova/api/ec2/cloud.py | 13 +++++++++---- .../versions/019_add_volume_snapshot_support.py | 3 +-- nova/db/sqlalchemy/models.py | 1 + nova/tests/test_volume.py | 5 +++-- nova/volume/driver.py | 6 +++--- nova/volume/manager.py | 3 ++- 6 files changed, 19 insertions(+), 12 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 6927d6774..403b7ab40 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -285,7 +285,9 @@ class CloudController(object): snapshots = [] for ec2_id in snapshot_id: internal_id = ec2utils.ec2_id_to_id(ec2_id) - snapshot = self.volume_api.get_snapshot(context, snapshot_id=internal_id) + snapshot = self.volume_api.get_snapshot( + context, + snapshot_id=internal_id) snapshots.append(snapshot) else: snapshots = self.volume_api.get_all_snapshots(context) @@ -295,7 +297,8 @@ class CloudController(object): def _format_snapshot(self, context, snapshot): s = {} s['snapshotId'] = ec2utils.id_to_ec2_id(snapshot['id'], 'snap-%08x') - s['volumeId'] = ec2utils.id_to_ec2_id(snapshot['volume_id'], 'vol-%08x') + s['volumeId'] = ec2utils.id_to_ec2_id(snapshot['volume_id'], + 'vol-%08x') s['status'] = snapshot['status'] s['startTime'] = snapshot['created_at'] s['progress'] = snapshot['progress'] @@ -308,7 +311,8 @@ class CloudController(object): return s def create_snapshot(self, context, volume_id, **kwargs): - LOG.audit(_("Create snapshot of volume %s"), volume_id, context=context) + LOG.audit(_("Create snapshot of volume %s"), volume_id, + context=context) volume_id = ec2utils.ec2_id_to_id(volume_id) snapshot = self.volume_api.create_snapshot( context, @@ -629,7 +633,8 @@ class CloudController(object): else: v['attachmentSet'] = [{}] if volume.get('snapshot_id') != None: - v['snapshotId'] = ec2utils.id_to_ec2_id(volume['snapshot_id'], 'snap-%08x') + v['snapshotId'] = ec2utils.id_to_ec2_id(volume['snapshot_id'], + 'snap-%08x') else: v['snapshotId'] = None diff --git a/nova/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py b/nova/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py index 5a44bac16..f16d6db56 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py @@ -48,8 +48,7 @@ snapshots = Table('snapshots', meta, unicode_error=None, _warn_on_bytestring=False)), Column('display_description', String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)) - ) + unicode_error=None, _warn_on_bytestring=False))) def upgrade(migrate_engine): diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index b887c5bad..480f62399 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -353,6 +353,7 @@ class Snapshot(BASE, NovaBase): display_name = Column(String(255)) display_description = Column(String(255)) + class ExportDevice(BASE, NovaBase): """Represates a shelf and blade that a volume can be exported on.""" __tablename__ = 'export_devices' diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py index c66b66959..3472b1f59 100644 --- a/nova/tests/test_volume.py +++ b/nova/tests/test_volume.py @@ -193,8 +193,9 @@ class VolumeTestCase(test.TestCase): self.volume.create_volume(self.context, volume_id) snapshot_id = self._create_snapshot(volume_id) self.volume.create_snapshot(self.context, volume_id, snapshot_id) - self.assertEqual(snapshot_id, db.snapshot_get(context.get_admin_context(), - snapshot_id).id) + self.assertEqual(snapshot_id, + db.snapshot_get(context.get_admin_context(), + snapshot_id).id) self.volume.delete_snapshot(self.context, snapshot_id) self.assertRaises(exception.NotFound, diff --git a/nova/volume/driver.py b/nova/volume/driver.py index e0e18b9bf..21cc228c9 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -97,7 +97,7 @@ class VolumeDriver(object): def _copy_volume(self, srcstr, deststr, size_in_g): self._execute('sudo', 'dd', 'if=%s' % srcstr, 'of=%s' % deststr, 'count=%d' % (size_in_g * 1024), 'bs=1M') - + def _volume_not_present(self, volume_name): path_name = '%s/%s' % (FLAGS.volume_group, volume_name) try: @@ -115,7 +115,7 @@ class VolumeDriver(object): self._try_execute('sudo', 'lvremove', '-f', "%s/%s" % (FLAGS.volume_group, self._escape_snapshot(volume['name']))) - + def _sizestr(self, size_in_g): if int(size_in_g) == 0: return '100M' @@ -150,7 +150,7 @@ class VolumeDriver(object): out = out.strip() if (out[0] == 'o') or (out[0] == 'O'): raise exception.VolumeIsBusy(volume_name=volume['name']) - + self._delete_volume(volume, volume['size']) def create_snapshot(self, snapshot): diff --git a/nova/volume/manager.py b/nova/volume/manager.py index fd889633d..40a104d35 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -169,7 +169,8 @@ class VolumeManager(manager.SchedulerDependentManager): LOG.debug(_("snapshot %(snap_name)s: creating") % locals()) model_update = self.driver.create_snapshot(snapshot_ref) if model_update: - self.db.snapshot_update(context, snapshot_ref['id'], model_update) + self.db.snapshot_update(context, snapshot_ref['id'], + model_update) except Exception: self.db.snapshot_update(context, -- cgit From a92f2bcbbaa40458e81bad3f6cb21288161322f9 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Fri, 27 May 2011 06:56:50 +0000 Subject: fix calls to openssl properly now. Only append \n to stdin when decoding. Updated the test slightly, also. --- nova/tests/test_xenapi.py | 1 + nova/virt/xenapi/vmops.py | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 18a267896..3ba37a762 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -595,6 +595,7 @@ class XenAPIDiffieHellmanTestCase(test.TestCase): def test_encryption(self): msg = "This is a top-secret message" enc = self.alice.encrypt(msg) + self.assertFalse(enc.endswith('\n')) dec = self.bob.decrypt(enc) self.assertEquals(dec, msg) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 6d516ddbc..1d8678ce2 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -1195,12 +1195,16 @@ class SimpleDH(object): '-nosalt %(dec_flag)s') if which.lower()[0] == 'd': dec_flag = ' -d' + # When decoding base64, we need to make sure there's a + # single '\n' at the end of the base64 encoded data. + # It's kinda dumb that openssl wants to see a newline + text = text.strip('\n') + '\n' else: dec_flag = '' shared = self._shared cmd = base_cmd % locals() proc = _runproc(cmd) - proc.stdin.write(text + '\n') + proc.stdin.write(text) proc.stdin.close() proc.wait() err = proc.stderr.read() -- cgit From 107b15d2dd7d554d9cca177343ab45c51029d484 Mon Sep 17 00:00:00 2001 From: Mark Washenberger Date: Fri, 27 May 2011 10:15:33 -0400 Subject: fix encryption handling of newlines again and restructure the code a bit --- nova/tests/test_xenapi.py | 22 ++++++++++++++++++---- nova/virt/xenapi/vmops.py | 24 +++++++----------------- 2 files changed, 25 insertions(+), 21 deletions(-) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 3ba37a762..0632d05a5 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -592,12 +592,26 @@ class XenAPIDiffieHellmanTestCase(test.TestCase): bob_shared = self.bob.compute_shared(alice_pub) self.assertEquals(alice_shared, bob_shared) - def test_encryption(self): - msg = "This is a top-secret message" - enc = self.alice.encrypt(msg) + def _test_encryption(self, message): + enc = self.alice.encrypt(message) self.assertFalse(enc.endswith('\n')) dec = self.bob.decrypt(enc) - self.assertEquals(dec, msg) + self.assertEquals(dec, message) + + def test_encrypt_simple_message(self): + self._test_encryption('This is a simple message.') + + def test_encrypt_message_with_newlines_at_end(self): + self._test_encryption('This message has a newline at the end.\n') + + def test_encrypt_many_newlines_at_end(self): + self._test_encryption('Message with lotsa newlines.\n\n\n') + + def test_encrypt_newlines_inside_message(self): + self._test_encryption('Message\nwith\ninterior\nnewlines.') + + def test_encrypt_with_leading_newlines(self): + self._test_encryption('\n\nMessage with leading newlines.') def tearDown(self): super(XenAPIDiffieHellmanTestCase, self).tearDown() diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 1d8678ce2..ce84c8652 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -1190,30 +1190,20 @@ class SimpleDH(object): mpi = M2Crypto.m2.bn_to_mpi(bn) return mpi - def _run_ssl(self, text, which): - base_cmd = ('openssl enc -aes-128-cbc -a -pass pass:%(shared)s ' - '-nosalt %(dec_flag)s') - if which.lower()[0] == 'd': - dec_flag = ' -d' - # When decoding base64, we need to make sure there's a - # single '\n' at the end of the base64 encoded data. - # It's kinda dumb that openssl wants to see a newline - text = text.strip('\n') + '\n' - else: - dec_flag = '' - shared = self._shared - cmd = base_cmd % locals() - proc = _runproc(cmd) + def _run_ssl(self, subcommand, text): + proc = _runproc('openssl %s' % subcommand) proc.stdin.write(text) proc.stdin.close() proc.wait() err = proc.stderr.read() if err: raise RuntimeError(_('OpenSSL error: %s') % err) - return proc.stdout.read().strip('\n') + return proc.stdout.read() def encrypt(self, text): - return self._run_ssl(text, 'enc') + cmd = 'enc -aes-128-cbc -a -pass pass:%s -nosalt' % self._shared + return self._run_ssl(cmd, text).strip('\n') def decrypt(self, text): - return self._run_ssl(text, 'dec') + cmd = 'enc -aes-128-cbc -a -A -pass pass:%s -nosalt -d' % self._shared + return self._run_ssl(cmd, text) -- cgit From 28b19b9e20100236f98e04cc43bcf106768ff2bb Mon Sep 17 00:00:00 2001 From: "Dave Walker (Daviey)" Date: Fri, 27 May 2011 15:28:10 +0100 Subject: nova/auth/novarc.template: Changed NOVA_KEY_DIR to allow symlink support --- nova/auth/novarc.template | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template index cda2ecc28..8170fcafe 100644 --- a/nova/auth/novarc.template +++ b/nova/auth/novarc.template @@ -1,4 +1,4 @@ -NOVA_KEY_DIR=$(pushd $(dirname $BASH_SOURCE)>/dev/null; pwd; popd>/dev/null) +NOVA_KEY_DIR=$(dirname $(readlink -f ${BASH_SOURCE})) export EC2_ACCESS_KEY="%(access)s:%(project)s" export EC2_SECRET_KEY="%(secret)s" export EC2_URL="%(ec2)s" -- cgit From f6d847cd867c09319f9fc451c09dc7322542e26b Mon Sep 17 00:00:00 2001 From: Mark Washenberger Date: Fri, 27 May 2011 10:40:50 -0400 Subject: prevent encryption from adding newlines on long messages --- nova/tests/test_xenapi.py | 4 ++++ nova/virt/xenapi/vmops.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 0632d05a5..fe37f0ebe 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -594,6 +594,7 @@ class XenAPIDiffieHellmanTestCase(test.TestCase): def _test_encryption(self, message): enc = self.alice.encrypt(message) + print enc self.assertFalse(enc.endswith('\n')) dec = self.bob.decrypt(enc) self.assertEquals(dec, message) @@ -613,6 +614,9 @@ class XenAPIDiffieHellmanTestCase(test.TestCase): def test_encrypt_with_leading_newlines(self): self._test_encryption('\n\nMessage with leading newlines.') + def test_encrypt_really_long_message(self): + self._test_encryption(''.join(['abcd' for i in xrange(1024)])) + def tearDown(self): super(XenAPIDiffieHellmanTestCase, self).tearDown() diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index ce84c8652..1fcaaeede 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -1201,7 +1201,7 @@ class SimpleDH(object): return proc.stdout.read() def encrypt(self, text): - cmd = 'enc -aes-128-cbc -a -pass pass:%s -nosalt' % self._shared + cmd = 'enc -aes-128-cbc -a -A -pass pass:%s -nosalt' % self._shared return self._run_ssl(cmd, text).strip('\n') def decrypt(self, text): -- cgit From 60a291747eeded09ade608088eae47fdb300a56b Mon Sep 17 00:00:00 2001 From: Mark Washenberger Date: Fri, 27 May 2011 10:41:12 -0400 Subject: remove errant print statement --- nova/tests/test_xenapi.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index fe37f0ebe..9d56c1644 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -594,7 +594,6 @@ class XenAPIDiffieHellmanTestCase(test.TestCase): def _test_encryption(self, message): enc = self.alice.encrypt(message) - print enc self.assertFalse(enc.endswith('\n')) dec = self.bob.decrypt(enc) self.assertEquals(dec, message) -- cgit From 1af3ac5f60bb9a4ad201f0bd84a355235be2f354 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Fri, 27 May 2011 19:50:57 +0000 Subject: fixed so all the new encryption tests pass.. including data with newlines and so forth --- nova/virt/xenapi/vmops.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 1fcaaeede..e116ef2d1 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -1190,8 +1190,12 @@ class SimpleDH(object): mpi = M2Crypto.m2.bn_to_mpi(bn) return mpi - def _run_ssl(self, subcommand, text): - proc = _runproc('openssl %s' % subcommand) + def _run_ssl(self, text, extra_args=None): + if not extra_args: + extra_args = '' + cmd = 'enc -aes-128-cbc -a -pass pass:%s -nosalt %s' % ( + self._shared, extra_args) + proc = _runproc('openssl %s' % cmd) proc.stdin.write(text) proc.stdin.close() proc.wait() @@ -1201,9 +1205,9 @@ class SimpleDH(object): return proc.stdout.read() def encrypt(self, text): - cmd = 'enc -aes-128-cbc -a -A -pass pass:%s -nosalt' % self._shared - return self._run_ssl(cmd, text).strip('\n') + return self._run_ssl(text).strip('\n') def decrypt(self, text): - cmd = 'enc -aes-128-cbc -a -A -pass pass:%s -nosalt -d' % self._shared - return self._run_ssl(cmd, text) + if text[len(text)-1:] != '\n': + text = text + '\n' + return self._run_ssl(text, '-d') -- cgit From cb42d3ec2c358a1666fde06d4252d1d76baeffff Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Fri, 27 May 2011 20:29:48 +0000 Subject: added -A back in to pass to openssl --- nova/virt/xenapi/vmops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index e116ef2d1..389c27598 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -1193,7 +1193,7 @@ class SimpleDH(object): def _run_ssl(self, text, extra_args=None): if not extra_args: extra_args = '' - cmd = 'enc -aes-128-cbc -a -pass pass:%s -nosalt %s' % ( + cmd = 'enc -aes-128-cbc -A -a -pass pass:%s -nosalt %s' % ( self._shared, extra_args) proc = _runproc('openssl %s' % cmd) proc.stdin.write(text) -- cgit From 2e4fca0b2a8dc4295d14a337ffa2771fab857420 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Fri, 27 May 2011 16:31:18 -0400 Subject: now pip-requires mox version 0.5.3 --- tools/pip-requires | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tools/pip-requires b/tools/pip-requires index 8f8018765..f1c5b2003 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -17,8 +17,7 @@ redis==2.0.0 routes==1.12.3 WebOb==0.9.8 wsgiref==0.1.2 -mox==0.5.0 --f http://pymox.googlecode.com/files/mox-0.5.0.tar.gz +mox==0.5.3 greenlet==0.3.1 nose bzr -- cgit From 132d0579a11b5f3b0be930e5a9369205cb282e35 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Fri, 27 May 2011 20:48:57 +0000 Subject: added \n is not needed with -A --- nova/virt/xenapi/vmops.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 389c27598..2b3fb6a39 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -1208,6 +1208,4 @@ class SimpleDH(object): return self._run_ssl(text).strip('\n') def decrypt(self, text): - if text[len(text)-1:] != '\n': - text = text + '\n' return self._run_ssl(text, '-d') -- cgit