From ffac2aa8162ba5111a01b495d9dd7e43bfda4af4 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 23 May 2011 14:38:37 -0500 Subject: initial fudging in of swap disk --- nova/tests/xenapi/stubs.py | 2 +- nova/virt/xenapi/vm_utils.py | 18 ++++++++++++------ nova/virt/xenapi/vmops.py | 21 +++++++++++++-------- plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 12 +++++++++--- 4 files changed, 35 insertions(+), 18 deletions(-) diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 4833ccb07..d9306900d 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -37,7 +37,7 @@ def stubout_instance_snapshot(stubs): sr_ref=sr_ref, sharable=False) vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) vdi_uuid = vdi_rec['uuid'] - return vdi_uuid + return dict(primary_vdi_uuid=vdi_uuid, swap_vdi_uuid=None) stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 9f6cd608c..c24fc7ba6 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -408,18 +408,24 @@ class VMHelper(HelperBase): kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'download_vhd', kwargs) - vdi_uuid = session.wait_for_task(task, instance_id) + vdi_uuids = session.wait_for_task(task, instance_id) + primary_vdi_uuid = vdi_uuids.get('primary_vdi_uuid') + swap_vdi_uuid = vdi_uuids.get('swap_vdi_uuid') cls.scan_sr(session, instance_id, sr_ref) # Set the name-label to ease debugging - vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid) - name_label = get_name_label_for_image(image) - session.get_xenapi().VDI.set_name_label(vdi_ref, name_label) + primary_vdi_ref = session.get_xenapi().VDI.get_by_uuid(primary_vdi_uuid) + primary_name_label = get_name_label_for_image(image) + session.get_xenapi().VDI.set_name_label(primary_vdi_ref, primary_name_label) - LOG.debug(_("xapi 'download_vhd' returned VDI UUID %(vdi_uuid)s") + LOG.debug(_("xapi 'download_vhd' returned VDI UUID %(primary_vdi_uuid)s") % locals()) - return vdi_uuid + + LOG.debug("=" * 100) + LOG.debug(rimary_vdi_uuid) + LOG.debug(swap_vdi_uuid) + return (primary_vdi_uuid, swap_vdi_uuid) @classmethod def _fetch_image_glance_disk(cls, session, instance_id, image, access, diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 0074444f8..4a01cac29 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -109,20 +109,20 @@ class VMOps(object): user = AuthManager().get_user(instance.user_id) project = AuthManager().get_project(instance.project_id) disk_image_type = VMHelper.determine_disk_image_type(instance) - vdi_uuid = VMHelper.fetch_image(self._session, instance.id, - instance.image_id, user, project, disk_image_type) - return vdi_uuid + (primary_vdi_uuid, swap_vdi_uuid) = VMHelper.fetch_image(self._session, + instance.id, instance.image_id, user, project, disk_image_type) + return (primary_vdi_uuid, swap_vdi_uuid) def spawn(self, instance, network_info=None): - vdi_uuid = self._create_disk(instance) - vm_ref = self._create_vm(instance, vdi_uuid, network_info) + vdi_uuid, swap_uuid = self._create_disk(instance) + vm_ref = self._create_vm(instance, vdi_uuid, swap_uuid, network_info) self._spawn(instance, vm_ref) def spawn_rescue(self, instance): """Spawn a rescue instance.""" self.spawn(instance) - def _create_vm(self, instance, vdi_uuid, network_info=None): + def _create_vm(self, instance, vdi_uuid, swap_vdi_uuid=None, network_info=None): """Create VM instance.""" instance_name = instance.name vm_ref = VMHelper.lookup(self._session, instance_name) @@ -143,18 +143,20 @@ class VMOps(object): # Are we building from a pre-existing disk? vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) + if swap_vdi_uuid: + swap_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', swap_vdi_uuid) disk_image_type = VMHelper.determine_disk_image_type(instance) kernel = None if instance.kernel_id: kernel = VMHelper.fetch_image(self._session, instance.id, - instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK) + instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK)[0] ramdisk = None if instance.ramdisk_id: ramdisk = VMHelper.fetch_image(self._session, instance.id, - instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK) + instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK)[0] use_pv_kernel = VMHelper.determine_is_pv(self._session, instance.id, vdi_ref, disk_image_type, instance.os_type) @@ -163,6 +165,9 @@ class VMOps(object): VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, vdi_ref=vdi_ref, userdevice=0, bootable=True) + if swap_vdi_uuid: + VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, + vdi_ref=swap_vdi_ref, userdevice=0, bootable=False) # TODO(tr3buchet) - check to make sure we have network info, otherwise # create it now. This goes away once nova-multi-nic hits. diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 4b45671ae..9d6ee78ab 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -177,8 +177,15 @@ def _fixup_vhds(sr_path, staging_path, uuid_stack): else: assert_vhd_not_hidden(base_copy_path) + # If we find a swap.vhd, go ahead and copy it into the SR + swap_uuid = None + orig_swap_path = os.path.join(staging_path, 'swap.vhd') + if os.path.exists(orig_swap_path): + swap_path, swap_uuid = rename_with_uuid(orig_swap_path) + move_into_sr(swap_path) + move_into_sr(base_copy_path) - return vdi_uuid + return dict(primary_vdi_uuid=vdi_uuid, swap_vdi_uuid=swap_uuid) def _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids): @@ -324,8 +331,7 @@ def download_vhd(session, args): try: _download_tarball(sr_path, staging_path, image_id, glance_host, glance_port) - vdi_uuid = _fixup_vhds(sr_path, staging_path, uuid_stack) - return vdi_uuid + return _fixup_vhds(sr_path, staging_path, uuid_stack) finally: _cleanup_staging_area(staging_path) -- cgit From 94766fac0f5fdb3c7847b1129a8f05948a97f887 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 23 May 2011 20:42:54 +0000 Subject: cleanup and fixes --- nova/virt/xenapi/vm_utils.py | 18 +++++----- nova/virt/xenapi/vmops.py | 40 +++++++++++++--------- plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 12 ++++--- 3 files changed, 42 insertions(+), 28 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index c24fc7ba6..f1f7b8249 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -410,7 +410,7 @@ class VMHelper(HelperBase): task = session.async_call_plugin('glance', 'download_vhd', kwargs) vdi_uuids = session.wait_for_task(task, instance_id) primary_vdi_uuid = vdi_uuids.get('primary_vdi_uuid') - swap_vdi_uuid = vdi_uuids.get('swap_vdi_uuid') + swap_vdi_uuid = vdi_uuids.get('swap_vdi_uuid', None) cls.scan_sr(session, instance_id, sr_ref) @@ -419,13 +419,14 @@ class VMHelper(HelperBase): primary_name_label = get_name_label_for_image(image) session.get_xenapi().VDI.set_name_label(primary_vdi_ref, primary_name_label) - LOG.debug(_("xapi 'download_vhd' returned VDI UUID %(primary_vdi_uuid)s") - % locals()) + LOG.debug(_("xapi 'download_vhd' returned VDI UUID " + "%(primary_vdi_uuid)s") % locals()) + if swap_vdi_uuid: + LOG.debug(_("xapi 'download_vhd' returned SWAP VDI UUID " + "%(swap_vdi_uuid)s") % locals()) - LOG.debug("=" * 100) - LOG.debug(rimary_vdi_uuid) - LOG.debug(swap_vdi_uuid) - return (primary_vdi_uuid, swap_vdi_uuid) + LOG.debug("=" * 100) + return vdi_uuids @classmethod def _fetch_image_glance_disk(cls, session, instance_id, image, access, @@ -482,7 +483,8 @@ class VMHelper(HelperBase): LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi_ref) return filename else: - return session.get_xenapi().VDI.get_uuid(vdi_ref) + vdi_uuid = session.get_xenapi().VDI.get_uuid(vdi_ref) + return {'primary_vdi_uuid': vdi_uuid} @classmethod def determine_disk_image_type(cls, instance): diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 4a01cac29..0c30ad4cb 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -109,20 +109,21 @@ class VMOps(object): user = AuthManager().get_user(instance.user_id) project = AuthManager().get_project(instance.project_id) disk_image_type = VMHelper.determine_disk_image_type(instance) - (primary_vdi_uuid, swap_vdi_uuid) = VMHelper.fetch_image(self._session, - instance.id, instance.image_id, user, project, disk_image_type) - return (primary_vdi_uuid, swap_vdi_uuid) + vdi_uuids = VMHelper.fetch_image(self._session, + instance.id, instance.image_id, user, project, + disk_image_type) + return vdi_uuids def spawn(self, instance, network_info=None): - vdi_uuid, swap_uuid = self._create_disk(instance) - vm_ref = self._create_vm(instance, vdi_uuid, swap_uuid, network_info) + vdi_uuids = self._create_disk(instance) + vm_ref = self._create_vm(instance, vdi_uuids, network_info) self._spawn(instance, vm_ref) def spawn_rescue(self, instance): """Spawn a rescue instance.""" self.spawn(instance) - def _create_vm(self, instance, vdi_uuid, swap_vdi_uuid=None, network_info=None): + def _create_vm(self, instance, vdi_uuids, network_info=None): """Create VM instance.""" instance_name = instance.name vm_ref = VMHelper.lookup(self._session, instance_name) @@ -142,30 +143,37 @@ class VMOps(object): project = AuthManager().get_project(instance.project_id) # Are we building from a pre-existing disk? - vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) + primary_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', + vdi_uuids['primary_vdi_uuid']) + swap_vdi_uuid = vdi_uuids.get('swap_vdi_uuid', None) if swap_vdi_uuid: swap_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', swap_vdi_uuid) + else: + swap_vdi_ref = None disk_image_type = VMHelper.determine_disk_image_type(instance) kernel = None if instance.kernel_id: kernel = VMHelper.fetch_image(self._session, instance.id, - instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK)[0] + instance.kernel_id, user, project, + ImageType.KERNEL_RAMDISK) ramdisk = None if instance.ramdisk_id: ramdisk = VMHelper.fetch_image(self._session, instance.id, - instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK)[0] + instance.ramdisk_id, user, project, + ImageType.KERNEL_RAMDISK) - use_pv_kernel = VMHelper.determine_is_pv(self._session, instance.id, - vdi_ref, disk_image_type, instance.os_type) - vm_ref = VMHelper.create_vm(self._session, instance, kernel, ramdisk, - use_pv_kernel) + use_pv_kernel = VMHelper.determine_is_pv(self._session, + instance.id, primary_vdi_ref, disk_image_type, + instance.os_type) + vm_ref = VMHelper.create_vm(self._session, instance, kernel, + ramdisk, use_pv_kernel) VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, - vdi_ref=vdi_ref, userdevice=0, bootable=True) - if swap_vdi_uuid: + vdi_ref=primary_vdi_ref, userdevice=0, bootable=True) + if swap_vdi_ref: VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, vdi_ref=swap_vdi_ref, userdevice=0, bootable=False) @@ -177,7 +185,7 @@ class VMOps(object): # Alter the image before VM start for, e.g. network injection if FLAGS.xenapi_inject_image: VMHelper.preconfigure_instance(self._session, instance, - vdi_ref, network_info) + primary_vdi_ref, network_info) self.create_vifs(vm_ref, network_info) self.inject_network_info(instance, network_info, vm_ref) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 9d6ee78ab..6cc7617e0 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -178,15 +178,19 @@ def _fixup_vhds(sr_path, staging_path, uuid_stack): assert_vhd_not_hidden(base_copy_path) # If we find a swap.vhd, go ahead and copy it into the SR - swap_uuid = None + swap_vdi_uuid = None orig_swap_path = os.path.join(staging_path, 'swap.vhd') if os.path.exists(orig_swap_path): - swap_path, swap_uuid = rename_with_uuid(orig_swap_path) + swap_path, swap_vdi_uuid = rename_with_uuid(orig_swap_path) move_into_sr(swap_path) - move_into_sr(base_copy_path) - return dict(primary_vdi_uuid=vdi_uuid, swap_vdi_uuid=swap_uuid) + vdi_uuids = {} + vdi_uuids['primary_vdi_uuid'] = vdi_uuid + if swap_vdi_uuid: + vdi_uuids['swap_vdi_uuid'] = swap_vdi_uuid + move_into_sr(base_copy_path) + return vdi_uuids def _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids): """Hard-link VHDs into staging area with appropriate filename -- cgit From 42c209d90f491d19b3aabc70f8dafc33b76cf20d Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 23 May 2011 16:51:28 -0500 Subject: fix tests, have glance plugin return json encoded string of vdi uuids --- nova/tests/xenapi/stubs.py | 11 +++++++++-- nova/virt/xenapi/vm_utils.py | 6 +++++- nova/virt/xenapi/vmops.py | 4 ++-- plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 6 +++++- 4 files changed, 21 insertions(+), 6 deletions(-) diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index d9306900d..9f6f64318 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -17,6 +17,7 @@ """Stubouts, mocks and fixtures for the test suite""" import eventlet +import json from nova.virt import xenapi_conn from nova.virt.xenapi import fake from nova.virt.xenapi import volume_utils @@ -37,7 +38,7 @@ def stubout_instance_snapshot(stubs): sr_ref=sr_ref, sharable=False) vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) vdi_uuid = vdi_rec['uuid'] - return dict(primary_vdi_uuid=vdi_uuid, swap_vdi_uuid=None) + return {'primary_vdi_uuid': vdi_uuid} stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image) @@ -132,10 +133,16 @@ class FakeSessionForVMTests(fake.SessionBase): def __init__(self, uri): super(FakeSessionForVMTests, self).__init__(uri) - def host_call_plugin(self, _1, _2, _3, _4, _5): + def host_call_plugin(self, _1, _2, plugin, method, _5): sr_ref = fake.get_all('SR')[0] vdi_ref = fake.create_vdi('', False, sr_ref, False) vdi_rec = fake.get_record('VDI', vdi_ref) + if plugin == "glance" and method == "download_vhd": + swap_vdi_ref = fake.create_vdi('', False, sr_ref, False) + swap_vdi_rec = fake.get_record('VDI', swap_vdi_ref) + return '%s' % json.dumps( + {'primary_vdi_uuid': vdi_rec['uuid'], + 'swap_vdi_uuid': swap_vdi_rec['uuid']}) return '%s' % vdi_rec['uuid'] def VM_start(self, _1, ref, _2, _3): diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index f1f7b8249..3d980013a 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -19,6 +19,7 @@ Helper methods for operations related to the management of VM records and their attributes like VDIs, VIFs, as well as their lookup functions. """ +import json import os import pickle import re @@ -408,7 +409,8 @@ class VMHelper(HelperBase): kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'download_vhd', kwargs) - vdi_uuids = session.wait_for_task(task, instance_id) + result = session.wait_for_task(task, instance_id) + vdi_uuids = json.loads(result) primary_vdi_uuid = vdi_uuids.get('primary_vdi_uuid') swap_vdi_uuid = vdi_uuids.get('swap_vdi_uuid', None) @@ -571,6 +573,8 @@ class VMHelper(HelperBase): args['raw'] = 'true' task = session.async_call_plugin('objectstore', fn, args) uuid = session.wait_for_task(task, instance_id) + if image_type != ImageType.KERNEL_RAMDISK: + return {'primary_vdi_uuid': uuid} return uuid @classmethod diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 0c30ad4cb..0d7ef5fac 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -91,7 +91,7 @@ class VMOps(object): def finish_resize(self, instance, disk_info): vdi_uuid = self.link_disks(instance, disk_info['base_copy'], disk_info['cow']) - vm_ref = self._create_vm(instance, vdi_uuid) + vm_ref = self._create_vm(instance, {'primary_vdi_uuid': vdi_uuid}) self.resize_instance(instance, vdi_uuid) self._spawn(instance, vm_ref) @@ -144,7 +144,7 @@ class VMOps(object): # Are we building from a pre-existing disk? primary_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', - vdi_uuids['primary_vdi_uuid']) + vdi_uuids.get('primary_vdi_uuid')) swap_vdi_uuid = vdi_uuids.get('swap_vdi_uuid', None) if swap_vdi_uuid: swap_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', swap_vdi_uuid) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 6cc7617e0..0d02adfe9 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -22,6 +22,10 @@ # import httplib +try: + import json +except ImportError: + import simplejson as json import os import os.path import pickle @@ -335,7 +339,7 @@ def download_vhd(session, args): try: _download_tarball(sr_path, staging_path, image_id, glance_host, glance_port) - return _fixup_vhds(sr_path, staging_path, uuid_stack) + return json.dumps(_fixup_vhds(sr_path, staging_path, uuid_stack)) finally: _cleanup_staging_area(staging_path) -- cgit From 038ce7e16ee7ee1afc86ded260c1aa0d40d1e1ad Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 23 May 2011 22:52:56 +0000 Subject: swap should use device 1 and rescue use device 2 --- nova/virt/xenapi/vmops.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 0d7ef5fac..6ff8fd6a4 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -175,7 +175,7 @@ class VMOps(object): vdi_ref=primary_vdi_ref, userdevice=0, bootable=True) if swap_vdi_ref: VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, - vdi_ref=swap_vdi_ref, userdevice=0, bootable=False) + vdi_ref=swap_vdi_ref, userdevice=1, bootable=False) # TODO(tr3buchet) - check to make sure we have network info, otherwise # create it now. This goes away once nova-multi-nic hits. @@ -711,7 +711,7 @@ class VMOps(object): vbd_ref = self._session.get_xenapi().VM.get_VBDs(vm_ref)[0] vdi_ref = self._session.get_xenapi().VBD.get_record(vbd_ref)["VDI"] rescue_vbd_ref = VMHelper.create_vbd(self._session, rescue_vm_ref, - vdi_ref, 1, False) + vdi_ref, 2, False) self._session.call_xenapi("Async.VBD.plug", rescue_vbd_ref) -- cgit From 26842cba90bd5637bd6aa185b300102ff257d9f1 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Tue, 24 May 2011 22:39:16 +0000 Subject: move devices back --- nova/virt/xenapi/vmops.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 6ff8fd6a4..6fff1d494 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -175,7 +175,7 @@ class VMOps(object): vdi_ref=primary_vdi_ref, userdevice=0, bootable=True) if swap_vdi_ref: VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, - vdi_ref=swap_vdi_ref, userdevice=1, bootable=False) + vdi_ref=swap_vdi_ref, userdevice=2, bootable=False) # TODO(tr3buchet) - check to make sure we have network info, otherwise # create it now. This goes away once nova-multi-nic hits. @@ -711,7 +711,7 @@ class VMOps(object): vbd_ref = self._session.get_xenapi().VM.get_VBDs(vm_ref)[0] vdi_ref = self._session.get_xenapi().VBD.get_record(vbd_ref)["VDI"] rescue_vbd_ref = VMHelper.create_vbd(self._session, rescue_vm_ref, - vdi_ref, 2, False) + vdi_ref, 1, False) self._session.call_xenapi("Async.VBD.plug", rescue_vbd_ref) -- cgit From fdd27860724cd57db6df059a97e98289f88ce6ac Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:24 -0700 Subject: add support to rpc for multicall --- nova/rpc.py | 99 +++++++++++++++++++++++++++++++++++++------------- nova/tests/test_rpc.py | 17 +++++++++ 2 files changed, 90 insertions(+), 26 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index 2116f22c3..04198a4a6 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -32,8 +32,11 @@ import uuid from carrot import connection as carrot_connection from carrot import messaging +import eventlet from eventlet import greenpool from eventlet import greenthread +from eventlet import queue + from nova import context from nova import exception @@ -131,7 +134,8 @@ class Consumer(messaging.Consumer): self.connection = Connection.recreate() self.backend = self.connection.create_backend() self.declare() - super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks) + return super(Consumer, self).fetch( + no_ack, auto_ack, enable_callbacks) if self.failed_connection: LOG.error(_('Reconnected to queue')) self.failed_connection = False @@ -347,8 +351,9 @@ def _unpack_context(msg): if key.startswith('_context_'): value = msg.pop(key) context_dict[key[9:]] = value + context_dict['msg_id'] = msg.pop('_msg_id', None) LOG.debug(_('unpacked context: %s'), context_dict) - return context.RequestContext.from_dict(context_dict) + return RpcContext.from_dict(context_dict) def _pack_context(msg, context): @@ -365,26 +370,27 @@ def _pack_context(msg, context): msg.update(context) -def call(context, topic, msg): - """Sends a message on a topic and wait for a response.""" +class RpcContext(context.RequestContext): + def __init__(self, *args, **kwargs): + msg_id = kwargs.pop('msg_id', None) + self.msg_id = msg_id + super(RpcContext, self).__init__(*args, **kwargs) + + def reply(self, *args, **kwargs): + msg_reply(self.msg_id, *args, **kwargs) + + +def multicall(context, topic, msg): + """Make a call that returns multiple times.""" LOG.debug(_('Making asynchronous call on %s ...'), topic) msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) LOG.debug(_('MSG_ID is %s') % (msg_id)) _pack_context(msg, context) - class WaitMessage(object): - def __call__(self, data, message): - """Acks message and sets result.""" - message.ack() - if data['failure']: - self.result = RemoteError(*data['failure']) - else: - self.result = data['result'] - - wait_msg = WaitMessage() conn = Connection.instance() consumer = DirectConsumer(connection=conn, msg_id=msg_id) + wait_msg = MulticallWaiter(consumer) consumer.register_callback(wait_msg) conn = Connection.instance() @@ -392,18 +398,59 @@ def call(context, topic, msg): publisher.send(msg) publisher.close() - try: - consumer.wait(limit=1) - except StopIteration: - pass - consumer.close() - # NOTE(termie): this is a little bit of a change from the original - # non-eventlet code where returning a Failure - # instance from a deferred call is very similar to - # raising an exception - if isinstance(wait_msg.result, Exception): - raise wait_msg.result - return wait_msg.result + return wait_msg + + +class MulticallWaiter(object): + def __init__(self, consumer): + self._consumer = consumer + self._results = queue.Queue() + self._closed = False + + def close(self): + self._closed = True + self._consumer.close() + + def __call__(self, data, message): + """Acks message and sets result.""" + message.ack() + if data['failure']: + self._results.put(RemoteError(*data['failure'])) + else: + self._results.put(data['result']) + + def __iter__(self): + return self.wait() + + def wait(self): + # TODO(termie): This is probably really a much simpler issue but am + # trying to solve the problem quickly. This works but + # I'd prefer to dig in and do it the best way later on. + + def _waiter(): + while not self._closed: + try: + self._consumer.wait(limit=1) + except StopIteration: + pass + eventlet.spawn(_waiter) + + while True: + result = self._results.get() + if isinstance(result, Exception): + raise result + if result == None: + self.close() + raise StopIteration + yield result + + +def call(context, topic, msg): + """Sends a message on a topic and wait for a response.""" + rv = multicall(context, topic, msg) + for x in rv: + rv.close() + return x def cast(context, topic, msg): diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index 44d7c91eb..92ddfcffc 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -49,6 +49,17 @@ class RpcTestCase(test.TestCase): "args": {"value": value}}) self.assertEqual(value, result) + def test_multicall_succeed_three_times(self): + """Get a value through rpc call""" + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo_three_times", + "args": {"value": value}}) + + for x in result: + self.assertEqual(value, x) + def test_context_passed(self): """Makes sure a context is passed through rpc call""" value = 42 @@ -126,6 +137,12 @@ class TestReceiver(object): LOG.debug(_("Received %s"), context) return context.to_dict() + @staticmethod + def echo_three_times(context, value): + context.reply(value) + context.reply(value) + context.reply(value) + @staticmethod def fail(context, value): """Raises an exception with the value sent in""" -- cgit From d46c9fffe4fab8f55483c73d3e6ef12116de9bc5 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:24 -0700 Subject: make the test more expicit --- nova/tests/test_rpc.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index 92ddfcffc..acab3e758 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -56,9 +56,10 @@ class RpcTestCase(test.TestCase): 'test', {"method": "echo_three_times", "args": {"value": value}}) - + i = 0 for x in result: - self.assertEqual(value, x) + self.assertEqual(value + i, x) + i += 1 def test_context_passed(self): """Makes sure a context is passed through rpc call""" @@ -140,8 +141,8 @@ class TestReceiver(object): @staticmethod def echo_three_times(context, value): context.reply(value) - context.reply(value) - context.reply(value) + context.reply(value + 1) + context.reply(value + 2) @staticmethod def fail(context, value): -- cgit From 7622e854ef68fbdbfc531690cf74916301956c8e Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:24 -0700 Subject: add commented out unworking code for yield-based returns --- nova/rpc.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/nova/rpc.py b/nova/rpc.py index 04198a4a6..f43291c4b 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -201,6 +201,11 @@ class AdapterConsumer(Consumer): try: rval = node_func(context=ctxt, **node_args) if msg_id: + # TODO(termie): re-enable when fix the yielding issue + #if hasattr(rval, 'send'): + # logging.error('rval! %s', rval) + # for x in rval: + # msg_reply(msg_id, x, None) msg_reply(msg_id, rval, None) except Exception as e: logging.exception('Exception during message handling') -- cgit From b44c1fe9561ee8754137d2700bab295f20a4032b Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: Add a connection pool for rpc cast/call Use the same rabbit connection for all topic listening and wait to be notified vs doing a 0.1 second poll for each. --- nova/rpc.py | 96 ++++++++++++++++++++++++++++++++++++++++++--------------- nova/service.py | 21 +++++++------ 2 files changed, 84 insertions(+), 33 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index f43291c4b..62590ca92 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -35,9 +35,9 @@ from carrot import messaging import eventlet from eventlet import greenpool from eventlet import greenthread +from eventlet import pools from eventlet import queue - from nova import context from nova import exception from nova import fakerabbit @@ -92,6 +92,11 @@ class Connection(carrot_connection.BrokerConnection): pass return cls.instance() +class Pool(pools.Pool): + def create(self): + return Connection.instance(new=True) + +ConnectionPool = Pool(max_size=20) class Consumer(messaging.Consumer): """Consumer base class. @@ -163,21 +168,9 @@ class AdapterConsumer(Consumer): self.pool = greenpool.GreenPool(FLAGS.rpc_thread_pool_size) super(AdapterConsumer, self).__init__(connection=connection, topic=topic) + self.register_callback(self.process_data) - def receive(self, *args, **kwargs): - self.pool.spawn_n(self._receive, *args, **kwargs) - - @exception.wrap_exception - def _receive(self, message_data, message): - """Magically looks for a method on the proxy object and calls it. - - Message data should be a dictionary with two keys: - method: string representing the method to call - args: dictionary of arg: value - - Example: {'method': 'echo', 'args': {'value': 42}} - - """ + def process_data(self, message_data, message): LOG.debug(_('received %s') % message_data) msg_id = message_data.pop('_msg_id', None) @@ -194,6 +187,19 @@ class AdapterConsumer(Consumer): LOG.warn(_('no method for message: %s') % message_data) msg_reply(msg_id, _('No method for message: %s') % message_data) return + self.pool.spawn_n(self._process_data, msg_id, ctxt, method, args) + + @exception.wrap_exception + def _process_data(self, msg_id, ctxt, method, args): + """Magically looks for a method on the proxy object and calls it. + + Message data should be a dictionary with two keys: + method: string representing the method to call + args: dictionary of arg: value + + Example: {'method': 'echo', 'args': {'value': 42}} + + """ node_func = getattr(self.proxy, str(method)) node_args = dict((str(k), v) for k, v in args.iteritems()) @@ -214,11 +220,6 @@ class AdapterConsumer(Consumer): return -class Publisher(messaging.Publisher): - """Publisher base class.""" - pass - - class TopicAdapterConsumer(AdapterConsumer): """Consumes messages on a specific topic.""" @@ -251,6 +252,50 @@ class FanoutAdapterConsumer(AdapterConsumer): topic=topic, proxy=proxy) +class ConsumerSet(object): + """Groups consumers to listen on together on a single connection""" + + def __init__(self, conn, consumer_list): + self.consumer_list = set(consumer_list) + self.consumer_set = None + self.init(conn) + + def init(self, conn): + if not conn: + conn = Connection.instance(new=True) + if self.consumer_set: + self.consumer_set.close() + self.consumer_set = messaging.ConsumerSet(conn) + for consumer in self.consumer_list: + consumer.connection = conn + # consumer.backend is set for us + self.consumer_set.add_consumer(consumer) + + def reconnect(self): + self.init(None) + + def wait(self, limit=None): + while True: + it = self.consumer_set.iterconsume(limit=limit) + while True: + try: + it.next() + except StopIteration: + return + except Exception as e: + LOG.error(_("Received exception %s " % str(e) + \ + "while processing consumer")) + fuck + self.reconnect() + # Break to outer loop + break + + +class Publisher(messaging.Publisher): + """Publisher base class.""" + pass + + class TopicPublisher(Publisher): """Publishes messages on a specific topic.""" @@ -315,7 +360,7 @@ def msg_reply(msg_id, reply=None, failure=None): LOG.error(_("Returning exception %s to caller"), message) LOG.error(tb) failure = (failure[0].__name__, str(failure[1]), tb) - conn = Connection.instance() + conn = ConnectionPool.get() publisher = DirectPublisher(connection=conn, msg_id=msg_id) try: publisher.send({'result': reply, 'failure': failure}) @@ -324,7 +369,9 @@ def msg_reply(msg_id, reply=None, failure=None): {'result': dict((k, repr(v)) for k, v in reply.__dict__.iteritems()), 'failure': failure}) + publisher.close() + ConnectionPool.put(conn) class RemoteError(exception.Error): @@ -393,12 +440,11 @@ def multicall(context, topic, msg): LOG.debug(_('MSG_ID is %s') % (msg_id)) _pack_context(msg, context) - conn = Connection.instance() + conn = ConnectionPool.get() consumer = DirectConsumer(connection=conn, msg_id=msg_id) wait_msg = MulticallWaiter(consumer) consumer.register_callback(wait_msg) - conn = Connection.instance() publisher = TopicPublisher(connection=conn, topic=topic) publisher.send(msg) publisher.close() @@ -462,10 +508,11 @@ def cast(context, topic, msg): """Sends a message on a topic without waiting for a response.""" LOG.debug(_('Making asynchronous cast on %s...'), topic) _pack_context(msg, context) - conn = Connection.instance() + conn = ConnectionPool.get() publisher = TopicPublisher(connection=conn, topic=topic) publisher.send(msg) publisher.close() + ConnectionPool.put(conn) def fanout_cast(context, topic, msg): @@ -511,6 +558,7 @@ def send_message(topic, message, wait=True): if wait: consumer.wait() + consumer.close() if __name__ == '__main__': diff --git a/nova/service.py b/nova/service.py index ab1238c3b..7761cfef5 100644 --- a/nova/service.py +++ b/nova/service.py @@ -91,26 +91,29 @@ class Service(object): if 'nova-compute' == self.binary: self.manager.update_available_resource(ctxt) - conn1 = rpc.Connection.instance(new=True) - conn2 = rpc.Connection.instance(new=True) - conn3 = rpc.Connection.instance(new=True) + if self.report_interval: + conn = rpc.Connection.instance(new=True) + + # Share this same connection for these Consumers consumer_all = rpc.TopicAdapterConsumer( - connection=conn1, + connection=conn, topic=self.topic, proxy=self) consumer_node = rpc.TopicAdapterConsumer( - connection=conn2, + connection=conn, topic='%s.%s' % (self.topic, self.host), proxy=self) fanout = rpc.FanoutAdapterConsumer( - connection=conn3, + connection=conn, topic=self.topic, proxy=self) - self.timers.append(consumer_all.attach_to_eventlet()) - self.timers.append(consumer_node.attach_to_eventlet()) - self.timers.append(fanout.attach_to_eventlet()) + cset = rpc.ConsumerSet(conn, [consumer_all, + consumer_node, + fanout]) + # Wait forever, processing these consumers + greenthread.spawn_n(cset.wait) pulse = utils.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, now=False) -- cgit From e1a47584cc63136280cf3ca9ef02da3efc1dff7f Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: pep8 and comment fixes --- nova/rpc.py | 25 ++++++++++++++++--------- nova/service.py | 1 - 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index 62590ca92..db5aec826 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -92,12 +92,16 @@ class Connection(carrot_connection.BrokerConnection): pass return cls.instance() + class Pool(pools.Pool): + """Class that implements a Pool of Connections""" + def create(self): return Connection.instance(new=True) ConnectionPool = Pool(max_size=20) + class Consumer(messaging.Consumer): """Consumer base class. @@ -171,6 +175,16 @@ class AdapterConsumer(Consumer): self.register_callback(self.process_data) def process_data(self, message_data, message): + """Consumer callback that parses the message for validity and + fires off a thread to call the proxy object method. + + Message data should be a dictionary with two keys: + method: string representing the method to call + args: dictionary of arg: value + + Example: {'method': 'echo', 'args': {'value': 42}} + + """ LOG.debug(_('received %s') % message_data) msg_id = message_data.pop('_msg_id', None) @@ -191,14 +205,8 @@ class AdapterConsumer(Consumer): @exception.wrap_exception def _process_data(self, msg_id, ctxt, method, args): - """Magically looks for a method on the proxy object and calls it. - - Message data should be a dictionary with two keys: - method: string representing the method to call - args: dictionary of arg: value - - Example: {'method': 'echo', 'args': {'value': 42}} - + """Thread that maigcally looks for a method on the proxy + object and calls it. """ node_func = getattr(self.proxy, str(method)) @@ -285,7 +293,6 @@ class ConsumerSet(object): except Exception as e: LOG.error(_("Received exception %s " % str(e) + \ "while processing consumer")) - fuck self.reconnect() # Break to outer loop break diff --git a/nova/service.py b/nova/service.py index 7761cfef5..c51c9b066 100644 --- a/nova/service.py +++ b/nova/service.py @@ -91,7 +91,6 @@ class Service(object): if 'nova-compute' == self.binary: self.manager.update_available_resource(ctxt) - if self.report_interval: conn = rpc.Connection.instance(new=True) -- cgit From d0be426d4e7bbfb1ecb3f078c71c1e176da441a5 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: convert fanout_cast to ConnectionPool --- nova/rpc.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/rpc.py b/nova/rpc.py index db5aec826..fdb228695 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -526,10 +526,11 @@ def fanout_cast(context, topic, msg): """Sends a message on a fanout exchange without waiting for a response.""" LOG.debug(_('Making asynchronous fanout cast...')) _pack_context(msg, context) - conn = Connection.instance() + conn = ConnectionPool.get() publisher = FanoutPublisher(topic, connection=conn) publisher.send(msg) publisher.close() + ConnectionPool.put(conn) def generic_response(message_data, message): -- cgit From f2c2a593c828fc86e298d3eb31672a09b498c41f Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: fakerabbit's declare_consumer should support more than 1 consumer. also: make fakerabbit Backend.consume be an iterator like it should be.. --- nova/fakerabbit.py | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index a7dee8caf..a29ba9d86 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -77,6 +77,10 @@ class Queue(object): class Backend(base.BaseBackend): + def __init__(self, connection, **kwargs): + super(Backend, self).__init__(connection, **kwargs) + self.consumers = [] + def queue_declare(self, queue, **kwargs): global QUEUES if queue not in QUEUES: @@ -97,16 +101,20 @@ class Backend(base.BaseBackend): EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key) def declare_consumer(self, queue, callback, *args, **kwargs): - self.current_queue = queue - self.current_callback = callback + self.consumers.append((queue, callback)) def consume(self, limit=None): + num = 0 while True: - item = self.get(self.current_queue) - if item: - self.current_callback(item) - raise StopIteration() - greenthread.sleep(0) + for (queue, callback) in self.consumers: + item = self.get(queue) + if item: + callback(item) + num += 1 + yield + if limit and num == limit: + raise StopIteration() + greenthread.sleep(0.1) def get(self, queue, no_ack=False): global QUEUES -- cgit From 90e30806a2e0c235612eb09792656cd861997f84 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 25 May 2011 15:42:24 -0700 Subject: fix consumers to actually be deleted and clean up cloud test --- nova/fakerabbit.py | 13 +++++++++---- nova/rpc.py | 13 ++++++++++--- nova/service.py | 8 +++----- nova/tests/test_cloud.py | 26 ++++++++++---------------- 4 files changed, 32 insertions(+), 28 deletions(-) diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index a29ba9d86..5f3e75c48 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -79,7 +79,7 @@ class Queue(object): class Backend(base.BaseBackend): def __init__(self, connection, **kwargs): super(Backend, self).__init__(connection, **kwargs) - self.consumers = [] + self.consumers = {} def queue_declare(self, queue, **kwargs): global QUEUES @@ -100,13 +100,18 @@ class Backend(base.BaseBackend): ' key %(routing_key)s') % locals()) EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key) - def declare_consumer(self, queue, callback, *args, **kwargs): - self.consumers.append((queue, callback)) + def declare_consumer(self, queue, callback, consumer_tag, *args, **kwargs): + LOG.debug("Adding consumer %s", consumer_tag) + self.consumers[consumer_tag] = (queue, callback) + + def cancel(self, consumer_tag): + LOG.debug("Removing consumer %s", consumer_tag) + del self.consumers[consumer_tag] def consume(self, limit=None): num = 0 while True: - for (queue, callback) in self.consumers: + for (queue, callback) in self.consumers.itervalues(): item = self.get(queue) if item: callback(item) diff --git a/nova/rpc.py b/nova/rpc.py index fdb228695..e2e962fcc 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -30,11 +30,11 @@ import time import traceback import uuid +import greenlet from carrot import connection as carrot_connection from carrot import messaging import eventlet from eventlet import greenpool -from eventlet import greenthread from eventlet import pools from eventlet import queue @@ -266,6 +266,7 @@ class ConsumerSet(object): def __init__(self, conn, consumer_list): self.consumer_list = set(consumer_list) self.consumer_set = None + self.enabled = True self.init(conn) def init(self, conn): @@ -283,15 +284,21 @@ class ConsumerSet(object): self.init(None) def wait(self, limit=None): - while True: + running = True + while running: it = self.consumer_set.iterconsume(limit=limit) + if not it: + break while True: try: it.next() except StopIteration: return + except greenlet.GreenletExit: + running = False + break except Exception as e: - LOG.error(_("Received exception %s " % str(e) + \ + LOG.error(_("Received exception %s " % type(e) + \ "while processing consumer")) self.reconnect() # Break to outer loop diff --git a/nova/service.py b/nova/service.py index c51c9b066..a0ff7c9f3 100644 --- a/nova/service.py +++ b/nova/service.py @@ -21,12 +21,8 @@ import inspect import os -import sys -import time -from eventlet import event from eventlet import greenthread -from eventlet import greenpool from nova import context from nova import db @@ -112,7 +108,7 @@ class Service(object): consumer_node, fanout]) # Wait forever, processing these consumers - greenthread.spawn_n(cset.wait) + self.csetthread = greenthread.spawn(cset.wait) pulse = utils.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, now=False) @@ -169,6 +165,8 @@ class Service(object): def kill(self): """Destroy the service object in the datastore.""" + self.csetthread.kill() + self.csetthread.wait() self.stop() try: db.service_destroy(context.get_admin_context(), self.service_id) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 54c0454de..1e14c327c 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -17,13 +17,8 @@ # under the License. from base64 import b64decode -import json from M2Crypto import BIO from M2Crypto import RSA -import os -import shutil -import tempfile -import time from eventlet import greenthread @@ -33,12 +28,10 @@ from nova import db from nova import flags from nova import log as logging from nova import rpc -from nova import service from nova import test from nova import utils from nova import exception from nova.auth import manager -from nova.compute import power_state from nova.api.ec2 import cloud from nova.api.ec2 import ec2utils from nova.image import local @@ -79,6 +72,15 @@ class CloudTestCase(test.TestCase): self.stubs.Set(local.LocalImageService, 'show', fake_show) self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show) + # NOTE(vish): set up a manual wait so rpc.cast has a chance to finish + rpc_cast = rpc.cast + + def finish_cast(*args, **kwargs): + rpc_cast(*args, **kwargs) + greenthread.sleep(0.2) + + self.stubs.Set(rpc, 'cast', finish_cast) + def tearDown(self): network_ref = db.project_get_network(self.context, self.project.id) @@ -113,7 +115,6 @@ class CloudTestCase(test.TestCase): self.cloud.describe_addresses(self.context) self.cloud.release_address(self.context, public_ip=address) - greenthread.sleep(0.3) db.floating_ip_destroy(self.context, address) def test_associate_disassociate_address(self): @@ -129,12 +130,10 @@ class CloudTestCase(test.TestCase): self.cloud.associate_address(self.context, instance_id=ec2_id, public_ip=address) - greenthread.sleep(0.3) self.cloud.disassociate_address(self.context, public_ip=address) self.cloud.release_address(self.context, public_ip=address) - greenthread.sleep(0.3) self.network.deallocate_fixed_ip(self.context, fixed) db.instance_destroy(self.context, inst['id']) db.floating_ip_destroy(self.context, address) @@ -306,31 +305,26 @@ class CloudTestCase(test.TestCase): 'instance_type': instance_type, 'max_count': max_count} rv = self.cloud.run_instances(self.context, **kwargs) - greenthread.sleep(0.3) instance_id = rv['instancesSet'][0]['instanceId'] output = self.cloud.get_console_output(context=self.context, instance_id=[instance_id]) self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE?OUTPUT') # TODO(soren): We need this until we can stop polling in the rpc code # for unit tests. - greenthread.sleep(0.3) rv = self.cloud.terminate_instances(self.context, [instance_id]) - greenthread.sleep(0.3) def test_ajax_console(self): + kwargs = {'image_id': 'ami-1'} rv = self.cloud.run_instances(self.context, **kwargs) instance_id = rv['instancesSet'][0]['instanceId'] - greenthread.sleep(0.3) output = self.cloud.get_ajax_console(context=self.context, instance_id=[instance_id]) self.assertEquals(output['url'], '%s/?token=FAKETOKEN' % FLAGS.ajax_console_proxy_url) # TODO(soren): We need this until we can stop polling in the rpc code # for unit tests. - greenthread.sleep(0.3) rv = self.cloud.terminate_instances(self.context, [instance_id]) - greenthread.sleep(0.3) def test_key_generation(self): result = self._create_key('test') -- cgit From 8f2557dcd3e3d88c0eabb63bcce90ced79347ae4 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: catch greenlet.GreenletExit when shutting service down --- nova/rpc.py | 2 +- nova/service.py | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index e2e962fcc..02052ecf5 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -24,13 +24,13 @@ No fan-out support yet. """ +import greenlet import json import sys import time import traceback import uuid -import greenlet from carrot import connection as carrot_connection from carrot import messaging import eventlet diff --git a/nova/service.py b/nova/service.py index a0ff7c9f3..c7e48544c 100644 --- a/nova/service.py +++ b/nova/service.py @@ -19,6 +19,7 @@ """Generic Node baseclass for all workers that run on hosts.""" +import greenlet import inspect import os @@ -166,7 +167,10 @@ class Service(object): def kill(self): """Destroy the service object in the datastore.""" self.csetthread.kill() - self.csetthread.wait() + try: + self.csetthread.wait() + except greenlet.GreenletExit: + pass self.stop() try: db.service_destroy(context.get_admin_context(), self.service_id) -- cgit From 5f3adfc3110ed8095cdac43cc651aa46087c5490 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: Always create Service consumers no matter if report_interval is 0 Fix tests to handle how Service loads Consumers now --- nova/service.py | 46 +++++++++++++++++++------------------ nova/tests/test_service.py | 57 +++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 75 insertions(+), 28 deletions(-) diff --git a/nova/service.py b/nova/service.py index c7e48544c..3a364b6c6 100644 --- a/nova/service.py +++ b/nova/service.py @@ -88,29 +88,31 @@ class Service(object): if 'nova-compute' == self.binary: self.manager.update_available_resource(ctxt) - if self.report_interval: - conn = rpc.Connection.instance(new=True) - - # Share this same connection for these Consumers - consumer_all = rpc.TopicAdapterConsumer( - connection=conn, - topic=self.topic, - proxy=self) - consumer_node = rpc.TopicAdapterConsumer( - connection=conn, - topic='%s.%s' % (self.topic, self.host), - proxy=self) - fanout = rpc.FanoutAdapterConsumer( - connection=conn, - topic=self.topic, - proxy=self) - - cset = rpc.ConsumerSet(conn, [consumer_all, - consumer_node, - fanout]) - # Wait forever, processing these consumers - self.csetthread = greenthread.spawn(cset.wait) + conn = rpc.Connection.instance(new=True) + logging.debug("Creating Consumer connection for Service %s" % \ + self.topic) + + # Share this same connection for these Consumers + consumer_all = rpc.TopicAdapterConsumer( + connection=conn, + topic=self.topic, + proxy=self) + consumer_node = rpc.TopicAdapterConsumer( + connection=conn, + topic='%s.%s' % (self.topic, self.host), + proxy=self) + fanout = rpc.FanoutAdapterConsumer( + connection=conn, + topic=self.topic, + proxy=self) + + cset = rpc.ConsumerSet(conn, [consumer_all, + consumer_node, + fanout]) + # Wait forever, processing these consumers + self.csetthread = greenthread.spawn(cset.wait) + if self.report_interval: pulse = utils.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, now=False) self.timers.append(pulse) diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py index d48de2057..0bba01d92 100644 --- a/nova/tests/test_service.py +++ b/nova/tests/test_service.py @@ -106,7 +106,10 @@ class ServiceTestCase(test.TestCase): # NOTE(vish): Create was moved out of mox replay to make sure that # the looping calls are created in StartService. - app = service.Service.create(host=host, binary=binary) + app = service.Service.create(host=host, binary=binary, topic=topic) + + self.mox.StubOutWithMock(service.rpc.Connection, 'instance') + service.rpc.Connection.instance(new=mox.IgnoreArg()) self.mox.StubOutWithMock(rpc, 'TopicAdapterConsumer', @@ -114,6 +117,11 @@ class ServiceTestCase(test.TestCase): self.mox.StubOutWithMock(rpc, 'FanoutAdapterConsumer', use_mock_anything=True) + + self.mox.StubOutWithMock(rpc, + 'ConsumerSet', + use_mock_anything=True) + rpc.TopicAdapterConsumer(connection=mox.IgnoreArg(), topic=topic, proxy=mox.IsA(service.Service)).AndReturn( @@ -129,9 +137,13 @@ class ServiceTestCase(test.TestCase): proxy=mox.IsA(service.Service)).AndReturn( rpc.FanoutAdapterConsumer) - rpc.TopicAdapterConsumer.attach_to_eventlet() - rpc.TopicAdapterConsumer.attach_to_eventlet() - rpc.FanoutAdapterConsumer.attach_to_eventlet() + def wait_func(self, limit=None): + return None + + mock_cset = self.mox.CreateMock(rpc.ConsumerSet, + {'wait': wait_func}) + rpc.ConsumerSet(mox.IgnoreArg(), mox.IsA(list)).AndReturn(mock_cset) + wait_func(mox.IgnoreArg()) service_create = {'host': host, 'binary': binary, @@ -287,8 +299,41 @@ class ServiceTestCase(test.TestCase): # Creating mocks self.mox.StubOutWithMock(service.rpc.Connection, 'instance') service.rpc.Connection.instance(new=mox.IgnoreArg()) - service.rpc.Connection.instance(new=mox.IgnoreArg()) - service.rpc.Connection.instance(new=mox.IgnoreArg()) + + self.mox.StubOutWithMock(rpc, + 'TopicAdapterConsumer', + use_mock_anything=True) + self.mox.StubOutWithMock(rpc, + 'FanoutAdapterConsumer', + use_mock_anything=True) + + self.mox.StubOutWithMock(rpc, + 'ConsumerSet', + use_mock_anything=True) + + rpc.TopicAdapterConsumer(connection=mox.IgnoreArg(), + topic=topic, + proxy=mox.IsA(service.Service)).AndReturn( + rpc.TopicAdapterConsumer) + + rpc.TopicAdapterConsumer(connection=mox.IgnoreArg(), + topic='%s.%s' % (topic, host), + proxy=mox.IsA(service.Service)).AndReturn( + rpc.TopicAdapterConsumer) + + rpc.FanoutAdapterConsumer(connection=mox.IgnoreArg(), + topic=topic, + proxy=mox.IsA(service.Service)).AndReturn( + rpc.FanoutAdapterConsumer) + + def wait_func(self, limit=None): + return None + + mock_cset = self.mox.CreateMock(rpc.ConsumerSet, + {'wait': wait_func}) + rpc.ConsumerSet(mox.IgnoreArg(), mox.IsA(list)).AndReturn(mock_cset) + wait_func(mox.IgnoreArg()) + self.mox.StubOutWithMock(serv.manager.driver, 'update_available_resource') serv.manager.driver.update_available_resource(mox.IgnoreArg(), host) -- cgit From 11d3672ad655c39265e5d2477a30db3a12adc65c Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: Add rpc_conn_pool_size flag for the new connection pool --- nova/rpc.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index 02052ecf5..82869fc46 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -50,7 +50,10 @@ LOG = logging.getLogger('nova.rpc') FLAGS = flags.FLAGS -flags.DEFINE_integer('rpc_thread_pool_size', 1024, 'Size of RPC thread pool') +flags.DEFINE_integer('rpc_thread_pool_size', 1024, + 'Size of RPC thread pool') +flags.DEFINE_integer('rpc_conn_pool_size', 30, + 'Size of RPC connection pool') class Connection(carrot_connection.BrokerConnection): @@ -99,7 +102,7 @@ class Pool(pools.Pool): def create(self): return Connection.instance(new=True) -ConnectionPool = Pool(max_size=20) +ConnectionPool = Pool(max_size=FLAGS.rpc_conn_pool_size) class Consumer(messaging.Consumer): -- cgit From b193b97054f11664a72cd53547f355d1c9044f88 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 25 May 2011 15:42:24 -0700 Subject: connection pool tests and make the pool LIFO --- nova/rpc.py | 8 +++++++- nova/tests/test_rpc.py | 42 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 1 deletion(-) diff --git a/nova/rpc.py b/nova/rpc.py index 82869fc46..3cc0dadd4 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -99,10 +99,16 @@ class Connection(carrot_connection.BrokerConnection): class Pool(pools.Pool): """Class that implements a Pool of Connections""" + # TODO(comstud): Timeout connections not used in a while def create(self): return Connection.instance(new=True) -ConnectionPool = Pool(max_size=FLAGS.rpc_conn_pool_size) +# Create a ConnectionPool to use for RPC calls. We'll order the +# pool as a stack (LIFO), so that we can potentially loop through and +# timeout old unused connections at some point +ConnectionPool = Pool( + max_size=FLAGS.rpc_conn_pool_size, + order_as_stack=True) class Consumer(messaging.Consumer): diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index acab3e758..f64209596 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -120,6 +120,48 @@ class RpcTestCase(test.TestCase): "value": value}}) self.assertEqual(value, result) + def test_connectionpool_single(self): + """Test that ConnectionPool recycles a single connection""" + + conn1 = rpc.ConnectionPool.get() + rpc.ConnectionPool.put(conn1) + conn2 = rpc.ConnectionPool.get() + rpc.ConnectionPool.put(conn2) + self.assertEqual(conn1, conn2) + + def test_connectionpool_double(self): + """Test that ConnectionPool returns 2 separate connections + when called consecutively and the pool returns connections LIFO + """ + + conn1 = rpc.ConnectionPool.get() + conn2 = rpc.ConnectionPool.get() + + self.assertNotEqual(conn1, conn2) + rpc.ConnectionPool.put(conn1) + rpc.ConnectionPool.put(conn2) + + conn3 = rpc.ConnectionPool.get() + conn4 = rpc.ConnectionPool.get() + self.assertEqual(conn2, conn3) + self.assertEqual(conn1, conn4) + + def test_connectionpool_limit(self): + """Test connection pool limit and verify all connections + are unique + """ + + max_size = FLAGS.rpc_conn_pool_size + conns = [] + + for i in xrange(max_size): + conns.append(rpc.ConnectionPool.get()) + + self.assertFalse(rpc.ConnectionPool.free_items) + self.assertEqual(rpc.ConnectionPool.current_size, + rpc.ConnectionPool.max_size) + self.assertEqual(len(set(conns)), max_size) + class TestReceiver(object): """Simple Proxy class so the consumer has methods to call -- cgit From 51e8eeb9b3a23f811bcbf52d9700d94c5c8b15e4 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:24 -0700 Subject: bring back commits lost in merge --- nova/rpc.py | 107 +++++++++++++++++++++++++++++-------------------- nova/tests/test_rpc.py | 19 +++++++++ 2 files changed, 82 insertions(+), 44 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index 3cc0dadd4..d7d7bb014 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -35,6 +35,7 @@ from carrot import connection as carrot_connection from carrot import messaging import eventlet from eventlet import greenpool +from eventlet import greenthread from eventlet import pools from eventlet import queue @@ -140,30 +141,30 @@ class Consumer(messaging.Consumer): FLAGS.rabbit_max_retries) sys.exit(1) - def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): - """Wraps the parent fetch with some logic for failed connection.""" - # TODO(vish): the logic for failed connections and logging should be - # refactored into some sort of connection manager object - try: - if self.failed_connection: - # NOTE(vish): connection is defined in the parent class, we can - # recreate it as long as we create the backend too - # pylint: disable=W0201 - self.connection = Connection.recreate() - self.backend = self.connection.create_backend() - self.declare() - return super(Consumer, self).fetch( - no_ack, auto_ack, enable_callbacks) - if self.failed_connection: - LOG.error(_('Reconnected to queue')) - self.failed_connection = False - # NOTE(vish): This is catching all errors because we really don't - # want exceptions to be logged 10 times a second if some - # persistent failure occurs. - except Exception, e: # pylint: disable=W0703 - if not self.failed_connection: - LOG.exception(_('Failed to fetch message from queue: %s' % e)) - self.failed_connection = True + #def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): + # """Wraps the parent fetch with some logic for failed connection.""" + # # TODO(vish): the logic for failed connections and logging should be + # # refactored into some sort of connection manager object + # try: + # if self.failed_connection: + # # NOTE(vish): connection is defined in the parent class, we can + # # recreate it as long as we create the backend too + # # pylint: disable=W0201 + # self.connection = Connection.recreate() + # self.backend = self.connection.create_backend() + # self.declare() + # return super(Consumer, self).fetch( + # no_ack, auto_ack, enable_callbacks) + # if self.failed_connection: + # LOG.error(_('Reconnected to queue')) + # self.failed_connection = False + # # NOTE(vish): This is catching all errors because we really don't + # # want exceptions to be logged 10 times a second if some + # # persistent failure occurs. + # except Exception, e: # pylint: disable=W0703 + # if not self.failed_connection: + # LOG.exception(_('Failed to fetch message from queue: %s' % e)) + # self.failed_connection = True def attach_to_eventlet(self): """Only needed for unit tests!""" @@ -195,7 +196,7 @@ class AdapterConsumer(Consumer): """ LOG.debug(_('received %s') % message_data) - msg_id = message_data.pop('_msg_id', None) + msg_id = message_data.get('_msg_id', None) ctxt = _unpack_context(message_data) @@ -225,11 +226,14 @@ class AdapterConsumer(Consumer): rval = node_func(context=ctxt, **node_args) if msg_id: # TODO(termie): re-enable when fix the yielding issue - #if hasattr(rval, 'send'): - # logging.error('rval! %s', rval) - # for x in rval: - # msg_reply(msg_id, x, None) - msg_reply(msg_id, rval, None) + if hasattr(rval, 'send'): + logging.error('rval! %s', rval) + for x in rval: + msg_reply(msg_id, x, None) + msg_reply(msg_id, None, None) + else: + msg_reply(msg_id, rval, None) + #msg_reply(msg_id, rval, None) except Exception as e: logging.exception('Exception during message handling') if msg_id: @@ -355,7 +359,7 @@ class DirectConsumer(Consumer): self.routing_key = msg_id self.exchange = msg_id self.auto_delete = True - self.exclusive = True + self.exclusive = False super(DirectConsumer, self).__init__(connection=connection) @@ -387,7 +391,9 @@ def msg_reply(msg_id, reply=None, failure=None): publisher = DirectPublisher(connection=conn, msg_id=msg_id) try: publisher.send({'result': reply, 'failure': failure}) + LOG.error('MSG REPLY SUCCESS') except TypeError: + LOG.error('MSG REPLY FAILURE') publisher.send( {'result': dict((k, repr(v)) for k, v in reply.__dict__.iteritems()), @@ -440,9 +446,9 @@ def _pack_context(msg, context): for args at some point. """ - context = dict([('_context_%s' % key, value) - for (key, value) in context.to_dict().iteritems()]) - msg.update(context) + context_d = dict([('_context_%s' % key, value) + for (key, value) in context.to_dict().iteritems()]) + msg.update(context_d) class RpcContext(context.RequestContext): @@ -463,12 +469,13 @@ def multicall(context, topic, msg): LOG.debug(_('MSG_ID is %s') % (msg_id)) _pack_context(msg, context) - conn = ConnectionPool.get() - consumer = DirectConsumer(connection=conn, msg_id=msg_id) + con_conn = ConnectionPool.get() + consumer = DirectConsumer(connection=con_conn, msg_id=msg_id) wait_msg = MulticallWaiter(consumer) consumer.register_callback(wait_msg) - publisher = TopicPublisher(connection=conn, topic=topic) + pub_conn = ConnectionPool.get() + publisher = TopicPublisher(connection=pub_conn, topic=topic) publisher.send(msg) publisher.close() @@ -484,6 +491,7 @@ class MulticallWaiter(object): def close(self): self._closed = True self._consumer.close() + ConnectionPool.put(self._consumer.connection) def __call__(self, data, message): """Acks message and sets result.""" @@ -501,15 +509,26 @@ class MulticallWaiter(object): # trying to solve the problem quickly. This works but # I'd prefer to dig in and do it the best way later on. - def _waiter(): - while not self._closed: - try: - self._consumer.wait(limit=1) - except StopIteration: - pass - eventlet.spawn(_waiter) + #def _waiter(): + # i = 0 + # while not self._closed: + # LOG.error('Iteration #%s (%s)', i, self._consumer.consumer_tag) + # i += 1 + # try: + # self._consumer.wait(limit=1) + # except StopIteration: + # pass + # self._consumer.close() + # ConnectionPool.put(self._consumer.connection) + #eventlet.spawn(_waiter) while True: + rv = None + while rv is None and not self._closed: + rv = self._consumer.fetch(enable_callbacks=True) + time.sleep(0.01) + + LOG.error('RV %s', rv) result = self._results.get() if isinstance(result, Exception): raise result diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index f64209596..e5d99474d 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -61,6 +61,18 @@ class RpcTestCase(test.TestCase): self.assertEqual(value + i, x) i += 1 + def test_multicall_succeed_three_times_yield(self): + """Get a value through rpc call""" + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo_three_times_yield", + "args": {"value": value}}) + i = 0 + for x in result: + self.assertEqual(value + i, x) + i += 1 + def test_context_passed(self): """Makes sure a context is passed through rpc call""" value = 42 @@ -83,6 +95,7 @@ class RpcTestCase(test.TestCase): 'test', {"method": "fail", "args": {"value": value}}) + LOG.error('INNNNNNN BETTTWWWWWWWWWWEEEEEEEEEEN') try: rpc.call(self.context, 'test', @@ -186,6 +199,12 @@ class TestReceiver(object): context.reply(value + 1) context.reply(value + 2) + @staticmethod + def echo_three_times_yield(context, value): + yield value + yield value + 1 + yield value + 2 + @staticmethod def fail(context, value): """Raises an exception with the value sent in""" -- cgit From 64b13a2aad676d2310947e3bf8b9e3dde6b763e7 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:24 -0700 Subject: almost everything working with fake_rabbit --- nova/rpc.py | 16 +++++++++++++++- nova/service.py | 22 ++++++++++++++++------ nova/test.py | 1 + nova/tests/test_cloud.py | 4 ++-- run_tests.py | 1 + 5 files changed, 35 insertions(+), 9 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index d7d7bb014..e1f594a99 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -102,6 +102,7 @@ class Pool(pools.Pool): # TODO(comstud): Timeout connections not used in a while def create(self): + LOG.debug('Creating new connection') return Connection.instance(new=True) # Create a ConnectionPool to use for RPC calls. We'll order the @@ -166,6 +167,10 @@ class Consumer(messaging.Consumer): # LOG.exception(_('Failed to fetch message from queue: %s' % e)) # self.failed_connection = True + def close(self, *args, **kwargs): + LOG.debug('Closing consumer %s', self.consumer_tag) + return super(Consumer, self).close(*args, **kwargs) + def attach_to_eventlet(self): """Only needed for unit tests!""" timer = utils.LoopingCall(self.fetch, enable_callbacks=True) @@ -317,6 +322,8 @@ class ConsumerSet(object): # Break to outer loop break + def close(self): + self.consumer_set.close() class Publisher(messaging.Publisher): """Publisher base class.""" @@ -525,12 +532,19 @@ class MulticallWaiter(object): while True: rv = None while rv is None and not self._closed: - rv = self._consumer.fetch(enable_callbacks=True) + try: + rv = self._consumer.fetch(enable_callbacks=True) + except Exception: + self.close() + raise + #rv = self._consumer.fetch(enable_callbacks=True) time.sleep(0.01) LOG.error('RV %s', rv) result = self._results.get() + LOG.error('RESULT %s', result) if isinstance(result, Exception): + self.close() raise result if result == None: self.close() diff --git a/nova/service.py b/nova/service.py index 3a364b6c6..2da510140 100644 --- a/nova/service.py +++ b/nova/service.py @@ -88,29 +88,39 @@ class Service(object): if 'nova-compute' == self.binary: self.manager.update_available_resource(ctxt) - conn = rpc.Connection.instance(new=True) + conn1 = rpc.Connection.instance(new=True) + conn2 = rpc.Connection.instance(new=True) + conn3 = rpc.Connection.instance(new=True) logging.debug("Creating Consumer connection for Service %s" % \ self.topic) # Share this same connection for these Consumers consumer_all = rpc.TopicAdapterConsumer( - connection=conn, + connection=conn1, topic=self.topic, proxy=self) consumer_node = rpc.TopicAdapterConsumer( - connection=conn, + connection=conn1, topic='%s.%s' % (self.topic, self.host), proxy=self) fanout = rpc.FanoutAdapterConsumer( - connection=conn, + connection=conn1, topic=self.topic, proxy=self) - cset = rpc.ConsumerSet(conn, [consumer_all, + cset = rpc.ConsumerSet(conn1, [consumer_all, consumer_node, fanout]) # Wait forever, processing these consumers - self.csetthread = greenthread.spawn(cset.wait) + def _wait(): + cset.wait() + cset.close() + + self.csetthread = greenthread.spawn(_wait) + + #self.timers.append(consumer_all.attach_to_eventlet()) + #self.timers.append(consumer_node.attach_to_eventlet()) + #self.timers.append(fanout.attach_to_eventlet()) if self.report_interval: pulse = utils.LoopingCall(self.report_state) diff --git a/nova/test.py b/nova/test.py index 4deb2a175..7b2cf94b6 100644 --- a/nova/test.py +++ b/nova/test.py @@ -85,6 +85,7 @@ class TestCase(unittest.TestCase): self._monkey_patch_attach() self._monkey_patch_wsgi() self._original_flags = FLAGS.FlagValuesDict() + rpc.ConnectionPool = rpc.Pool(max_size=30) def tearDown(self): """Runs after each test method to tear down test environment.""" diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 1e14c327c..a838dd530 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -87,8 +87,8 @@ class CloudTestCase(test.TestCase): db.network_disassociate(self.context, network_ref['id']) self.manager.delete_project(self.project) self.manager.delete_user(self.user) - self.compute.kill() - self.network.kill() + #self.compute.kill() + #self.network.kill() super(CloudTestCase, self).tearDown() def _create_key(self, name): diff --git a/run_tests.py b/run_tests.py index d5d8acd16..509a60caa 100644 --- a/run_tests.py +++ b/run_tests.py @@ -285,6 +285,7 @@ if __name__ == '__main__': # If any argument looks like a test name but doesn't have "nova.tests" in # front of it, automatically add that so we don't have to type as much argv = [] + logging.getLogger('amqplib').setLevel(logging.DEBUG) for x in sys.argv: if x.startswith('test_'): argv.append('nova.tests.%s' % x) -- cgit From e3a88390fd62308cde3d4c597d653c8dc245bed4 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:24 -0700 Subject: don't need to use a separate connection --- nova/rpc.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index e1f594a99..a212383fd 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -481,8 +481,7 @@ def multicall(context, topic, msg): wait_msg = MulticallWaiter(consumer) consumer.register_callback(wait_msg) - pub_conn = ConnectionPool.get() - publisher = TopicPublisher(connection=pub_conn, topic=topic) + publisher = TopicPublisher(connection=con_conn, topic=topic) publisher.send(msg) publisher.close() -- cgit From c9b21b0619891c069251c568e4d89be791af56c3 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 25 May 2011 15:42:24 -0700 Subject: lots of fixes for rpc and extra imports --- nova/fakerabbit.py | 12 +++-- nova/rpc.py | 68 +++++++++++------------------ nova/service.py | 9 ++-- nova/test.py | 8 ++-- nova/tests/integrated/integrated_helpers.py | 5 +-- 5 files changed, 45 insertions(+), 57 deletions(-) diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index 5f3e75c48..ff993e29a 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -31,6 +31,7 @@ LOG = logging.getLogger("nova.fakerabbit") EXCHANGES = {} QUEUES = {} +CONSUMERS = {} class Message(base.BaseMessage): @@ -101,17 +102,20 @@ class Backend(base.BaseBackend): EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key) def declare_consumer(self, queue, callback, consumer_tag, *args, **kwargs): + global CONSUMERS LOG.debug("Adding consumer %s", consumer_tag) - self.consumers[consumer_tag] = (queue, callback) + CONSUMERS[consumer_tag] = (queue, callback) def cancel(self, consumer_tag): + global CONSUMERS LOG.debug("Removing consumer %s", consumer_tag) - del self.consumers[consumer_tag] + del CONSUMERS[consumer_tag] def consume(self, limit=None): + global CONSUMERS num = 0 while True: - for (queue, callback) in self.consumers.itervalues(): + for (queue, callback) in CONSUMERS.itervalues(): item = self.get(queue) if item: callback(item) @@ -147,5 +151,7 @@ class Backend(base.BaseBackend): def reset_all(): global EXCHANGES global QUEUES + global CONSUMERS EXCHANGES = {} QUEUES = {} + CONSUMERS = {} diff --git a/nova/rpc.py b/nova/rpc.py index a212383fd..7faed4d3a 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -33,9 +33,7 @@ import uuid from carrot import connection as carrot_connection from carrot import messaging -import eventlet from eventlet import greenpool -from eventlet import greenthread from eventlet import pools from eventlet import queue @@ -142,30 +140,30 @@ class Consumer(messaging.Consumer): FLAGS.rabbit_max_retries) sys.exit(1) - #def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): - # """Wraps the parent fetch with some logic for failed connection.""" - # # TODO(vish): the logic for failed connections and logging should be - # # refactored into some sort of connection manager object - # try: - # if self.failed_connection: - # # NOTE(vish): connection is defined in the parent class, we can - # # recreate it as long as we create the backend too - # # pylint: disable=W0201 - # self.connection = Connection.recreate() - # self.backend = self.connection.create_backend() - # self.declare() - # return super(Consumer, self).fetch( - # no_ack, auto_ack, enable_callbacks) - # if self.failed_connection: - # LOG.error(_('Reconnected to queue')) - # self.failed_connection = False - # # NOTE(vish): This is catching all errors because we really don't - # # want exceptions to be logged 10 times a second if some - # # persistent failure occurs. - # except Exception, e: # pylint: disable=W0703 - # if not self.failed_connection: - # LOG.exception(_('Failed to fetch message from queue: %s' % e)) - # self.failed_connection = True + def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): + """Wraps the parent fetch with some logic for failed connection.""" + # TODO(vish): the logic for failed connections and logging should be + # refactored into some sort of connection manager object + try: + if self.failed_connection: + # NOTE(vish): connection is defined in the parent class, we can + # recreate it as long as we create the backend too + # pylint: disable=W0201 + self.connection = Connection.recreate() + self.backend = self.connection.create_backend() + self.declare() + return super(Consumer, self).fetch( + no_ack, auto_ack, enable_callbacks) + if self.failed_connection: + LOG.error(_('Reconnected to queue')) + self.failed_connection = False + # NOTE(vish): This is catching all errors because we really don't + # want exceptions to be logged 10 times a second if some + # persistent failure occurs. + except Exception, e: # pylint: disable=W0703 + if not self.failed_connection: + LOG.exception(_('Failed to fetch message from queue: %s' % e)) + self.failed_connection = True def close(self, *args, **kwargs): LOG.debug('Closing consumer %s', self.consumer_tag) @@ -325,6 +323,7 @@ class ConsumerSet(object): def close(self): self.consumer_set.close() + class Publisher(messaging.Publisher): """Publisher base class.""" pass @@ -511,23 +510,6 @@ class MulticallWaiter(object): return self.wait() def wait(self): - # TODO(termie): This is probably really a much simpler issue but am - # trying to solve the problem quickly. This works but - # I'd prefer to dig in and do it the best way later on. - - #def _waiter(): - # i = 0 - # while not self._closed: - # LOG.error('Iteration #%s (%s)', i, self._consumer.consumer_tag) - # i += 1 - # try: - # self._consumer.wait(limit=1) - # except StopIteration: - # pass - # self._consumer.close() - # ConnectionPool.put(self._consumer.connection) - #eventlet.spawn(_waiter) - while True: rv = None while rv is None and not self._closed: diff --git a/nova/service.py b/nova/service.py index 2da510140..2626c49ae 100644 --- a/nova/service.py +++ b/nova/service.py @@ -89,8 +89,6 @@ class Service(object): self.manager.update_available_resource(ctxt) conn1 = rpc.Connection.instance(new=True) - conn2 = rpc.Connection.instance(new=True) - conn3 = rpc.Connection.instance(new=True) logging.debug("Creating Consumer connection for Service %s" % \ self.topic) @@ -111,10 +109,13 @@ class Service(object): cset = rpc.ConsumerSet(conn1, [consumer_all, consumer_node, fanout]) + # Wait forever, processing these consumers def _wait(): - cset.wait() - cset.close() + try: + cset.wait() + finally: + cset.close() self.csetthread = greenthread.spawn(_wait) diff --git a/nova/test.py b/nova/test.py index 7b2cf94b6..df48afbb1 100644 --- a/nova/test.py +++ b/nova/test.py @@ -31,17 +31,15 @@ import uuid import unittest import mox -import shutil import stubout from eventlet import greenthread -from nova import context -from nova import db from nova import fakerabbit from nova import flags from nova import rpc from nova import service from nova import wsgi +from nova.virt import fake FLAGS = flags.FLAGS @@ -100,6 +98,10 @@ class TestCase(unittest.TestCase): if FLAGS.fake_rabbit: fakerabbit.reset_all() + if FLAGS.connection_type == 'fake': + if hasattr(fake.FakeConnection, '_instance'): + del fake.FakeConnection._instance + # Reset any overriden flags self.reset_flags() diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py index bc98921f0..7f590441e 100644 --- a/nova/tests/integrated/integrated_helpers.py +++ b/nova/tests/integrated/integrated_helpers.py @@ -154,10 +154,7 @@ class _IntegratedTestBase(test.TestCase): # set up services self.start_service('compute') self.start_service('volume') - # NOTE(justinsb): There's a bug here which is eluding me... - # If we start the network_service, all is good, but then subsequent - # tests fail: CloudTestCase.test_ajax_console in particular. - #self.start_service('network') + self.start_service('network') self.start_service('scheduler') self._start_api_service() -- cgit From 9334d41c6fe638a3119327702094695cfbd38271 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:25 -0700 Subject: make sure that using multicall on a call with a single result still functions --- nova/rpc.py | 4 ++-- nova/tests/test_rpc.py | 29 +++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 2 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index 7faed4d3a..84493271f 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -233,10 +233,10 @@ class AdapterConsumer(Consumer): logging.error('rval! %s', rval) for x in rval: msg_reply(msg_id, x, None) - msg_reply(msg_id, None, None) else: msg_reply(msg_id, rval, None) - #msg_reply(msg_id, rval, None) + # This final None tells multicall that it is done. + msg_reply(msg_id, None, None) except Exception as e: logging.exception('Exception during message handling') if msg_id: diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index e5d99474d..c1ef60ff6 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -49,6 +49,35 @@ class RpcTestCase(test.TestCase): "args": {"value": value}}) self.assertEqual(value, result) + def test_call_succeed_despite_multiple_returns(self): + """Get a value through rpc call""" + value = 42 + result = rpc.call(self.context, 'test', {"method": "echo_three_times", + "args": {"value": value}}) + self.assertEqual(value, result) + + def test_call_succeed_despite_multiple_returns_yield(self): + """Get a value through rpc call""" + value = 42 + result = rpc.call(self.context, 'test', + {"method": "echo_three_times_yield", + "args": {"value": value}}) + self.assertEqual(value, result) + + def test_multicall_succeed_once(self): + """Get a value through rpc call""" + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo", + "args": {"value": value}}) + i = 0 + for x in result: + if i > 0: + self.fail('should only receive one response') + self.assertEqual(value + i, x) + i += 1 + def test_multicall_succeed_three_times(self): """Get a value through rpc call""" value = 42 -- cgit From c7fe7e5e28b9f4bb999c8309f56953f6609cbc57 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:42:49 -0700 Subject: cleanup the code for merging --- nova/fakerabbit.py | 4 --- nova/rpc.py | 78 ++++++++++++++++++++++-------------------------- nova/service.py | 23 +++++++------- nova/test.py | 2 +- nova/tests/test_cloud.py | 3 -- nova/tests/test_rpc.py | 1 - run_tests.py | 1 - 7 files changed, 46 insertions(+), 66 deletions(-) diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index ff993e29a..e7e9dab77 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -78,10 +78,6 @@ class Queue(object): class Backend(base.BaseBackend): - def __init__(self, connection, **kwargs): - super(Backend, self).__init__(connection, **kwargs) - self.consumers = {} - def queue_declare(self, queue, **kwargs): global QUEUES if queue not in QUEUES: diff --git a/nova/rpc.py b/nova/rpc.py index 84493271f..8d14494f0 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -24,7 +24,6 @@ No fan-out support yet. """ -import greenlet import json import sys import time @@ -36,6 +35,7 @@ from carrot import messaging from eventlet import greenpool from eventlet import pools from eventlet import queue +import greenlet from nova import context from nova import exception @@ -50,9 +50,9 @@ LOG = logging.getLogger('nova.rpc') FLAGS = flags.FLAGS flags.DEFINE_integer('rpc_thread_pool_size', 1024, - 'Size of RPC thread pool') + 'Size of RPC thread pool') flags.DEFINE_integer('rpc_conn_pool_size', 30, - 'Size of RPC connection pool') + 'Size of RPC connection pool') class Connection(carrot_connection.BrokerConnection): @@ -96,7 +96,7 @@ class Connection(carrot_connection.BrokerConnection): class Pool(pools.Pool): - """Class that implements a Pool of Connections""" + """Class that implements a Pool of Connections.""" # TODO(comstud): Timeout connections not used in a while def create(self): @@ -152,8 +152,9 @@ class Consumer(messaging.Consumer): self.connection = Connection.recreate() self.backend = self.connection.create_backend() self.declare() - return super(Consumer, self).fetch( - no_ack, auto_ack, enable_callbacks) + return super(Consumer, self).fetch(no_ack, + auto_ack, + enable_callbacks) if self.failed_connection: LOG.error(_('Reconnected to queue')) self.failed_connection = False @@ -165,10 +166,6 @@ class Consumer(messaging.Consumer): LOG.exception(_('Failed to fetch message from queue: %s' % e)) self.failed_connection = True - def close(self, *args, **kwargs): - LOG.debug('Closing consumer %s', self.consumer_tag) - return super(Consumer, self).close(*args, **kwargs) - def attach_to_eventlet(self): """Only needed for unit tests!""" timer = utils.LoopingCall(self.fetch, enable_callbacks=True) @@ -188,8 +185,10 @@ class AdapterConsumer(Consumer): self.register_callback(self.process_data) def process_data(self, message_data, message): - """Consumer callback that parses the message for validity and - fires off a thread to call the proxy object method. + """Consumer callback to call a method on a proxy object. + + Parses the message for validity and fires off a thread to call the + proxy object method. Message data should be a dictionary with two keys: method: string representing the method to call @@ -199,8 +198,8 @@ class AdapterConsumer(Consumer): """ LOG.debug(_('received %s') % message_data) + # This will be popped off in _unpack_context msg_id = message_data.get('_msg_id', None) - ctxt = _unpack_context(message_data) method = message_data.get('method') @@ -228,13 +227,13 @@ class AdapterConsumer(Consumer): try: rval = node_func(context=ctxt, **node_args) if msg_id: - # TODO(termie): re-enable when fix the yielding issue + # Check if the result was a generator if hasattr(rval, 'send'): - logging.error('rval! %s', rval) for x in rval: msg_reply(msg_id, x, None) else: msg_reply(msg_id, rval, None) + # This final None tells multicall that it is done. msg_reply(msg_id, None, None) except Exception as e: @@ -277,7 +276,7 @@ class FanoutAdapterConsumer(AdapterConsumer): class ConsumerSet(object): - """Groups consumers to listen on together on a single connection""" + """Groups consumers to listen on together on a single connection.""" def __init__(self, conn, consumer_list): self.consumer_list = set(consumer_list) @@ -365,7 +364,7 @@ class DirectConsumer(Consumer): self.routing_key = msg_id self.exchange = msg_id self.auto_delete = True - self.exclusive = False + self.exclusive = True super(DirectConsumer, self).__init__(connection=connection) @@ -393,20 +392,18 @@ def msg_reply(msg_id, reply=None, failure=None): LOG.error(_("Returning exception %s to caller"), message) LOG.error(tb) failure = (failure[0].__name__, str(failure[1]), tb) - conn = ConnectionPool.get() - publisher = DirectPublisher(connection=conn, msg_id=msg_id) - try: - publisher.send({'result': reply, 'failure': failure}) - LOG.error('MSG REPLY SUCCESS') - except TypeError: - LOG.error('MSG REPLY FAILURE') - publisher.send( - {'result': dict((k, repr(v)) - for k, v in reply.__dict__.iteritems()), - 'failure': failure}) - publisher.close() - ConnectionPool.put(conn) + with ConnectionPool.item() as conn: + publisher = DirectPublisher(connection=conn, msg_id=msg_id) + try: + publisher.send({'result': reply, 'failure': failure}) + except TypeError: + publisher.send( + {'result': dict((k, repr(v)) + for k, v in reply.__dict__.iteritems()), + 'failure': failure}) + + publisher.close() class RemoteError(exception.Error): @@ -518,12 +515,9 @@ class MulticallWaiter(object): except Exception: self.close() raise - #rv = self._consumer.fetch(enable_callbacks=True) time.sleep(0.01) - LOG.error('RV %s', rv) result = self._results.get() - LOG.error('RESULT %s', result) if isinstance(result, Exception): self.close() raise result @@ -545,22 +539,20 @@ def cast(context, topic, msg): """Sends a message on a topic without waiting for a response.""" LOG.debug(_('Making asynchronous cast on %s...'), topic) _pack_context(msg, context) - conn = ConnectionPool.get() - publisher = TopicPublisher(connection=conn, topic=topic) - publisher.send(msg) - publisher.close() - ConnectionPool.put(conn) + with ConnectionPool.item() as conn: + publisher = TopicPublisher(connection=conn, topic=topic) + publisher.send(msg) + publisher.close() def fanout_cast(context, topic, msg): """Sends a message on a fanout exchange without waiting for a response.""" LOG.debug(_('Making asynchronous fanout cast...')) _pack_context(msg, context) - conn = ConnectionPool.get() - publisher = FanoutPublisher(topic, connection=conn) - publisher.send(msg) - publisher.close() - ConnectionPool.put(conn) + with ConnectionPool.item() as conn: + publisher = FanoutPublisher(topic, connection=conn) + publisher.send(msg) + publisher.close() def generic_response(message_data, message): diff --git a/nova/service.py b/nova/service.py index 2626c49ae..94afd5f78 100644 --- a/nova/service.py +++ b/nova/service.py @@ -88,27 +88,27 @@ class Service(object): if 'nova-compute' == self.binary: self.manager.update_available_resource(ctxt) - conn1 = rpc.Connection.instance(new=True) - logging.debug("Creating Consumer connection for Service %s" % \ - self.topic) + self.conn = rpc.Connection.instance(new=True) + logging.debug("Creating Consumer connection for Service %s" % + self.topic) # Share this same connection for these Consumers consumer_all = rpc.TopicAdapterConsumer( - connection=conn1, + connection=self.conn, topic=self.topic, proxy=self) consumer_node = rpc.TopicAdapterConsumer( - connection=conn1, + connection=self.conn, topic='%s.%s' % (self.topic, self.host), proxy=self) fanout = rpc.FanoutAdapterConsumer( - connection=conn1, + connection=self.conn, topic=self.topic, proxy=self) - cset = rpc.ConsumerSet(conn1, [consumer_all, - consumer_node, - fanout]) + cset = rpc.ConsumerSet(self.conn, [consumer_all, + consumer_node, + fanout]) # Wait forever, processing these consumers def _wait(): @@ -119,10 +119,6 @@ class Service(object): self.csetthread = greenthread.spawn(_wait) - #self.timers.append(consumer_all.attach_to_eventlet()) - #self.timers.append(consumer_node.attach_to_eventlet()) - #self.timers.append(fanout.attach_to_eventlet()) - if self.report_interval: pulse = utils.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, now=False) @@ -185,6 +181,7 @@ class Service(object): except greenlet.GreenletExit: pass self.stop() + rpc.ConnectionPool.put(self.conn) try: db.service_destroy(context.get_admin_context(), self.service_id) except exception.NotFound: diff --git a/nova/test.py b/nova/test.py index df48afbb1..80b2d0a74 100644 --- a/nova/test.py +++ b/nova/test.py @@ -83,7 +83,7 @@ class TestCase(unittest.TestCase): self._monkey_patch_attach() self._monkey_patch_wsgi() self._original_flags = FLAGS.FlagValuesDict() - rpc.ConnectionPool = rpc.Pool(max_size=30) + rpc.ConnectionPool = rpc.Pool(max_size=FLAGS.rpc_conn_pool_size) def tearDown(self): """Runs after each test method to tear down test environment.""" diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index a838dd530..ca3ef7ffe 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -87,8 +87,6 @@ class CloudTestCase(test.TestCase): db.network_disassociate(self.context, network_ref['id']) self.manager.delete_project(self.project) self.manager.delete_user(self.user) - #self.compute.kill() - #self.network.kill() super(CloudTestCase, self).tearDown() def _create_key(self, name): @@ -314,7 +312,6 @@ class CloudTestCase(test.TestCase): rv = self.cloud.terminate_instances(self.context, [instance_id]) def test_ajax_console(self): - kwargs = {'image_id': 'ami-1'} rv = self.cloud.run_instances(self.context, **kwargs) instance_id = rv['instancesSet'][0]['instanceId'] diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index c1ef60ff6..fcecfb352 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -124,7 +124,6 @@ class RpcTestCase(test.TestCase): 'test', {"method": "fail", "args": {"value": value}}) - LOG.error('INNNNNNN BETTTWWWWWWWWWWEEEEEEEEEEN') try: rpc.call(self.context, 'test', diff --git a/run_tests.py b/run_tests.py index 509a60caa..d5d8acd16 100644 --- a/run_tests.py +++ b/run_tests.py @@ -285,7 +285,6 @@ if __name__ == '__main__': # If any argument looks like a test name but doesn't have "nova.tests" in # front of it, automatically add that so we don't have to type as much argv = [] - logging.getLogger('amqplib').setLevel(logging.DEBUG) for x in sys.argv: if x.startswith('test_'): argv.append('nova.tests.%s' % x) -- cgit From 7755bbfc7b16248dab23bfab479d09501519290f Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:43:04 -0700 Subject: cleanups --- nova/tests/test_rpc.py | 47 +++++++++++++++++++++-------------------------- 1 file changed, 21 insertions(+), 26 deletions(-) diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index fcecfb352..8523b409c 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -31,7 +31,6 @@ LOG = logging.getLogger('nova.tests.rpc') class RpcTestCase(test.TestCase): - """Test cases for rpc""" def setUp(self): super(RpcTestCase, self).setUp() self.conn = rpc.Connection.instance(True) @@ -43,21 +42,18 @@ class RpcTestCase(test.TestCase): self.context = context.get_admin_context() def test_call_succeed(self): - """Get a value through rpc call""" value = 42 result = rpc.call(self.context, 'test', {"method": "echo", "args": {"value": value}}) self.assertEqual(value, result) def test_call_succeed_despite_multiple_returns(self): - """Get a value through rpc call""" value = 42 result = rpc.call(self.context, 'test', {"method": "echo_three_times", "args": {"value": value}}) self.assertEqual(value, result) def test_call_succeed_despite_multiple_returns_yield(self): - """Get a value through rpc call""" value = 42 result = rpc.call(self.context, 'test', {"method": "echo_three_times_yield", @@ -65,7 +61,6 @@ class RpcTestCase(test.TestCase): self.assertEqual(value, result) def test_multicall_succeed_once(self): - """Get a value through rpc call""" value = 42 result = rpc.multicall(self.context, 'test', @@ -79,7 +74,6 @@ class RpcTestCase(test.TestCase): i += 1 def test_multicall_succeed_three_times(self): - """Get a value through rpc call""" value = 42 result = rpc.multicall(self.context, 'test', @@ -91,7 +85,6 @@ class RpcTestCase(test.TestCase): i += 1 def test_multicall_succeed_three_times_yield(self): - """Get a value through rpc call""" value = 42 result = rpc.multicall(self.context, 'test', @@ -103,7 +96,7 @@ class RpcTestCase(test.TestCase): i += 1 def test_context_passed(self): - """Makes sure a context is passed through rpc call""" + """Makes sure a context is passed through rpc call.""" value = 42 result = rpc.call(self.context, 'test', {"method": "context", @@ -111,11 +104,12 @@ class RpcTestCase(test.TestCase): self.assertEqual(self.context.to_dict(), result) def test_call_exception(self): - """Test that exception gets passed back properly + """Test that exception gets passed back properly. rpc.call returns a RemoteError object. The value of the exception is converted to a string, so we convert it back to an int in the test. + """ value = 42 self.assertRaises(rpc.RemoteError, @@ -134,7 +128,7 @@ class RpcTestCase(test.TestCase): self.assertEqual(int(exc.value), value) def test_nested_calls(self): - """Test that we can do an rpc.call inside another call""" + """Test that we can do an rpc.call inside another call.""" class Nested(object): @staticmethod def echo(context, queue, value): @@ -162,8 +156,7 @@ class RpcTestCase(test.TestCase): self.assertEqual(value, result) def test_connectionpool_single(self): - """Test that ConnectionPool recycles a single connection""" - + """Test that ConnectionPool recycles a single connection.""" conn1 = rpc.ConnectionPool.get() rpc.ConnectionPool.put(conn1) conn2 = rpc.ConnectionPool.get() @@ -171,10 +164,13 @@ class RpcTestCase(test.TestCase): self.assertEqual(conn1, conn2) def test_connectionpool_double(self): - """Test that ConnectionPool returns 2 separate connections - when called consecutively and the pool returns connections LIFO - """ + """Test that ConnectionPool returns and reuses separate connections. + + When called consecutively we should get separate connections and upon + returning them those connections should be reused for future calls + before generating a new connection. + """ conn1 = rpc.ConnectionPool.get() conn2 = rpc.ConnectionPool.get() @@ -184,14 +180,11 @@ class RpcTestCase(test.TestCase): conn3 = rpc.ConnectionPool.get() conn4 = rpc.ConnectionPool.get() - self.assertEqual(conn2, conn3) - self.assertEqual(conn1, conn4) + self.assertEqual(conn1, conn3) + self.assertEqual(conn2, conn4) def test_connectionpool_limit(self): - """Test connection pool limit and verify all connections - are unique - """ - + """Test connection pool limit and connection uniqueness.""" max_size = FLAGS.rpc_conn_pool_size conns = [] @@ -205,19 +198,21 @@ class RpcTestCase(test.TestCase): class TestReceiver(object): - """Simple Proxy class so the consumer has methods to call + """Simple Proxy class so the consumer has methods to call. + + Uses static methods because we aren't actually storing any state. - Uses static methods because we aren't actually storing any state""" + """ @staticmethod def echo(context, value): - """Simply returns whatever value is sent in""" + """Simply returns whatever value is sent in.""" LOG.debug(_("Received %s"), value) return value @staticmethod def context(context, value): - """Returns dictionary version of context""" + """Returns dictionary version of context.""" LOG.debug(_("Received %s"), context) return context.to_dict() @@ -235,5 +230,5 @@ class TestReceiver(object): @staticmethod def fail(context, value): - """Raises an exception with the value sent in""" + """Raises an exception with the value sent in.""" raise Exception(value) -- cgit From f56df190ee888ae731740e7e949fb6f0c012d687 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:43:04 -0700 Subject: replace removed import --- nova/tests/test_cloud.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index ca3ef7ffe..b64be662e 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -19,6 +19,7 @@ from base64 import b64decode from M2Crypto import BIO from M2Crypto import RSA +import os from eventlet import greenthread -- cgit From b3506a471bbce063d72aead211f45d693bda7853 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:43:04 -0700 Subject: don't put connection back in pool --- nova/service.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/service.py b/nova/service.py index 94afd5f78..141fd4253 100644 --- a/nova/service.py +++ b/nova/service.py @@ -181,7 +181,6 @@ class Service(object): except greenlet.GreenletExit: pass self.stop() - rpc.ConnectionPool.put(self.conn) try: db.service_destroy(context.get_admin_context(), self.service_id) except exception.NotFound: -- cgit From a05e8e7587e42633e8459fd050eee3a4da247330 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:43:04 -0700 Subject: move consumerset killing into stop --- nova/service.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nova/service.py b/nova/service.py index 141fd4253..782183322 100644 --- a/nova/service.py +++ b/nova/service.py @@ -175,11 +175,6 @@ class Service(object): def kill(self): """Destroy the service object in the datastore.""" - self.csetthread.kill() - try: - self.csetthread.wait() - except greenlet.GreenletExit: - pass self.stop() try: db.service_destroy(context.get_admin_context(), self.service_id) @@ -187,6 +182,11 @@ class Service(object): logging.warn(_('Service killed that has no database entry')) def stop(self): + self.csetthread.kill() + try: + self.csetthread.wait() + except greenlet.GreenletExit: + pass for x in self.timers: try: x.stop() -- cgit From feb04f0117450bcd6e8f4966f4487575073be41c Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 25 May 2011 15:43:04 -0700 Subject: change the behavior of calling a multicall --- nova/rpc.py | 8 +++++--- nova/tests/test_rpc.py | 4 ++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index 8d14494f0..493978e57 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -236,6 +236,9 @@ class AdapterConsumer(Consumer): # This final None tells multicall that it is done. msg_reply(msg_id, None, None) + elif hasattr(rval, 'send'): + # NOTE(vish): this iterates through the generator + list(rval) except Exception as e: logging.exception('Exception during message handling') if msg_id: @@ -530,9 +533,8 @@ class MulticallWaiter(object): def call(context, topic, msg): """Sends a message on a topic and wait for a response.""" rv = multicall(context, topic, msg) - for x in rv: - rv.close() - return x + # NOTE(vish): return the last result from the multicall + return list(rv)[-1] def cast(context, topic, msg): diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index 8523b409c..35f4a64d9 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -51,14 +51,14 @@ class RpcTestCase(test.TestCase): value = 42 result = rpc.call(self.context, 'test', {"method": "echo_three_times", "args": {"value": value}}) - self.assertEqual(value, result) + self.assertEqual(value + 2, result) def test_call_succeed_despite_multiple_returns_yield(self): value = 42 result = rpc.call(self.context, 'test', {"method": "echo_three_times_yield", "args": {"value": value}}) - self.assertEqual(value, result) + self.assertEqual(value + 2, result) def test_multicall_succeed_once(self): value = 42 -- cgit From 899642030dd60541153ccee810d082816f92dd49 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Thu, 26 May 2011 19:27:27 +0000 Subject: Change the return from glance to be a list of dictionaries describing VDIs Fix the rest of the code to account for this Add a test for swap --- nova/tests/test_xenapi.py | 23 +++++ nova/tests/xenapi/stubs.py | 23 +++-- nova/virt/xenapi/fake.py | 5 +- nova/virt/xenapi/vm_utils.py | 49 +++++++---- nova/virt/xenapi/vmops.py | 52 ++++++------ plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 97 ++++++++++++++-------- 6 files changed, 168 insertions(+), 81 deletions(-) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index be1e35697..18a267896 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -395,6 +395,29 @@ class XenAPIVMTestCase(test.TestCase): os_type="linux") self.check_vm_params_for_linux() + def test_spawn_vhd_glance_swapdisk(self): + # Change the default host_call_plugin to one that'll return + # a swap disk + orig_func = stubs.FakeSessionForVMTests.host_call_plugin + + stubs.FakeSessionForVMTests.host_call_plugin = \ + stubs.FakeSessionForVMTests.host_call_plugin_swap + + try: + # We'll steal the above glance linux test + self.test_spawn_vhd_glance_linux() + finally: + # Make sure to put this back + stubs.FakeSessionForVMTests.host_call_plugin = orig_func + + # We should have 2 VBDs. + self.assertEqual(len(self.vm['VBDs']), 2) + # Now test that we have 1. + self.tearDown() + self.setUp() + self.test_spawn_vhd_glance_linux() + self.assertEqual(len(self.vm['VBDs']), 1) + def test_spawn_vhd_glance_windows(self): FLAGS.xenapi_image_service = 'glance' self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None, diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 9f6f64318..35308d95f 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -38,7 +38,7 @@ def stubout_instance_snapshot(stubs): sr_ref=sr_ref, sharable=False) vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) vdi_uuid = vdi_rec['uuid'] - return {'primary_vdi_uuid': vdi_uuid} + return [dict(vdi_type='os', vdi_uuid=vdi_uuid)] stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image) @@ -134,16 +134,29 @@ class FakeSessionForVMTests(fake.SessionBase): super(FakeSessionForVMTests, self).__init__(uri) def host_call_plugin(self, _1, _2, plugin, method, _5): + sr_ref = fake.get_all('SR')[0] + vdi_ref = fake.create_vdi('', False, sr_ref, False) + vdi_rec = fake.get_record('VDI', vdi_ref) + if plugin == "glance" and method == "download_vhd": + ret_str = json.dumps([dict(vdi_type='os', + vdi_uuid=vdi_rec['uuid'])]) + else: + ret_str = vdi_rec['uuid'] + return '%s' % ret_str + + def host_call_plugin_swap(self, _1, _2, plugin, method, _5): sr_ref = fake.get_all('SR')[0] vdi_ref = fake.create_vdi('', False, sr_ref, False) vdi_rec = fake.get_record('VDI', vdi_ref) if plugin == "glance" and method == "download_vhd": swap_vdi_ref = fake.create_vdi('', False, sr_ref, False) swap_vdi_rec = fake.get_record('VDI', swap_vdi_ref) - return '%s' % json.dumps( - {'primary_vdi_uuid': vdi_rec['uuid'], - 'swap_vdi_uuid': swap_vdi_rec['uuid']}) - return '%s' % vdi_rec['uuid'] + ret_str = json.dumps( + [dict(vdi_type='os', vdi_uuid=vdi_rec['uuid']), + dict(vdi_type='swap', vdi_uuid=swap_vdi_rec['uuid'])]) + else: + ret_str = vdi_rec['uuid'] + return '%s' % ret_str def VM_start(self, _1, ref, _2, _3): vm = fake.get_record('VM', ref) diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index e36ef3288..76988b172 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -159,7 +159,10 @@ def after_VBD_create(vbd_ref, vbd_rec): vbd_rec['device'] = '' vm_ref = vbd_rec['VM'] vm_rec = _db_content['VM'][vm_ref] - vm_rec['VBDs'] = [vbd_ref] + if vm_rec.get('VBDs', None): + vm_rec['VBDs'].append(vbd_ref) + else: + vm_rec['VBDs'] = [vbd_ref] vm_name_label = _db_content['VM'][vm_ref]['name_label'] vbd_rec['vm_name_label'] = vm_name_label diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 3d980013a..bee9742a4 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -377,6 +377,9 @@ class VMHelper(HelperBase): xenapi_image_service = ['glance', 'objectstore'] glance_address = 'address for glance services' glance_port = 'port for glance services' + + Returns: A single filename if image_type is KERNEL_RAMDISK + A list of dictionaries that describe VDIs, otherwise """ access = AuthManager().get_access_key(user, project) @@ -391,6 +394,10 @@ class VMHelper(HelperBase): @classmethod def _fetch_image_glance_vhd(cls, session, instance_id, image, access, image_type): + """Tell glance to download an image and put the VHDs into the SR + + Returns: A list of dictionaries that describe VDIs + """ LOG.debug(_("Asking xapi to fetch vhd image %(image)s") % locals()) @@ -410,25 +417,21 @@ class VMHelper(HelperBase): kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'download_vhd', kwargs) result = session.wait_for_task(task, instance_id) - vdi_uuids = json.loads(result) - primary_vdi_uuid = vdi_uuids.get('primary_vdi_uuid') - swap_vdi_uuid = vdi_uuids.get('swap_vdi_uuid', None) + vdis = json.loads(result) + for vdi in vdis: + LOG.debug(_("xapi 'download_vhd' returned VDI of " + "type '%(vdi_type)s' with UUID '%(vdi_uuid)s'" % vdi)) cls.scan_sr(session, instance_id, sr_ref) + # Pull out the UUID of the first VDI + vdi_uuid = vdis[0]['vdi_uuid'] # Set the name-label to ease debugging - primary_vdi_ref = session.get_xenapi().VDI.get_by_uuid(primary_vdi_uuid) + vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid) primary_name_label = get_name_label_for_image(image) - session.get_xenapi().VDI.set_name_label(primary_vdi_ref, primary_name_label) - - LOG.debug(_("xapi 'download_vhd' returned VDI UUID " - "%(primary_vdi_uuid)s") % locals()) - if swap_vdi_uuid: - LOG.debug(_("xapi 'download_vhd' returned SWAP VDI UUID " - "%(swap_vdi_uuid)s") % locals()) + session.get_xenapi().VDI.set_name_label(vdi_ref, primary_name_label) - LOG.debug("=" * 100) - return vdi_uuids + return vdis @classmethod def _fetch_image_glance_disk(cls, session, instance_id, image, access, @@ -440,6 +443,8 @@ class VMHelper(HelperBase): plugin; instead, it streams the disks through domU to the VDI directly. + Returns: A single filename if image_type is KERNEL_RAMDISK + A list of dictionaries that describe VDIs, otherwise """ # FIXME(sirp): Since the Glance plugin seems to be required for the # VHD disk, it may be worth using the plugin for both VHD and RAW and @@ -486,7 +491,7 @@ class VMHelper(HelperBase): return filename else: vdi_uuid = session.get_xenapi().VDI.get_uuid(vdi_ref) - return {'primary_vdi_uuid': vdi_uuid} + return [dict(vdi_type='os', vdi_uuid=vdi_uuid)] @classmethod def determine_disk_image_type(cls, instance): @@ -545,6 +550,11 @@ class VMHelper(HelperBase): @classmethod def _fetch_image_glance(cls, session, instance_id, image, access, image_type): + """Fetch image from glance based on image type. + + Returns: A single filename if image_type is KERNEL_RAMDISK + A list of dictionaries that describe VDIs, otherwise + """ if image_type == ImageType.DISK_VHD: return cls._fetch_image_glance_vhd( session, instance_id, image, access, image_type) @@ -555,6 +565,11 @@ class VMHelper(HelperBase): @classmethod def _fetch_image_objectstore(cls, session, instance_id, image, access, secret, image_type): + """Fetch an image from objectstore. + + Returns: A single filename if image_type is KERNEL_RAMDISK + A list of dictionaries that describe VDIs, otherwise + """ url = images.image_url(image) LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals()) if image_type == ImageType.KERNEL_RAMDISK: @@ -572,10 +587,10 @@ class VMHelper(HelperBase): if image_type == ImageType.DISK_RAW: args['raw'] = 'true' task = session.async_call_plugin('objectstore', fn, args) - uuid = session.wait_for_task(task, instance_id) + uuid_or_fn = session.wait_for_task(task, instance_id) if image_type != ImageType.KERNEL_RAMDISK: - return {'primary_vdi_uuid': uuid} - return uuid + return [dict(vdi_type='os', vdi_uuid=uuid_or_fn)] + return uuid_or_fn @classmethod def determine_is_pv(cls, session, instance_id, vdi_ref, disk_image_type, diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 2a8d97a9d..02e140dcc 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -91,7 +91,8 @@ class VMOps(object): def finish_resize(self, instance, disk_info): vdi_uuid = self.link_disks(instance, disk_info['base_copy'], disk_info['cow']) - vm_ref = self._create_vm(instance, {'primary_vdi_uuid': vdi_uuid}) + vm_ref = self._create_vm(instance, + [dict(vdi_type='os', vdi_uuid=vdi_uuid)]) self.resize_instance(instance, vdi_uuid) self._spawn(instance, vm_ref) @@ -105,25 +106,25 @@ class VMOps(object): LOG.debug(_("Starting instance %s"), instance.name) self._session.call_xenapi('VM.start', vm_ref, False, False) - def _create_disk(self, instance): + def _create_disks(self, instance): user = AuthManager().get_user(instance.user_id) project = AuthManager().get_project(instance.project_id) disk_image_type = VMHelper.determine_disk_image_type(instance) - vdi_uuids = VMHelper.fetch_image(self._session, + vdis = VMHelper.fetch_image(self._session, instance.id, instance.image_id, user, project, disk_image_type) - return vdi_uuids + return vdis def spawn(self, instance, network_info=None): - vdi_uuids = self._create_disk(instance) - vm_ref = self._create_vm(instance, vdi_uuids, network_info) + vdis = self._create_disks(instance) + vm_ref = self._create_vm(instance, vdis, network_info) self._spawn(instance, vm_ref) def spawn_rescue(self, instance): """Spawn a rescue instance.""" self.spawn(instance) - def _create_vm(self, instance, vdi_uuids, network_info=None): + def _create_vm(self, instance, vdis, network_info=None): """Create VM instance.""" instance_name = instance.name vm_ref = VMHelper.lookup(self._session, instance_name) @@ -142,15 +143,6 @@ class VMOps(object): user = AuthManager().get_user(instance.user_id) project = AuthManager().get_project(instance.project_id) - # Are we building from a pre-existing disk? - primary_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', - vdi_uuids.get('primary_vdi_uuid')) - swap_vdi_uuid = vdi_uuids.get('swap_vdi_uuid', None) - if swap_vdi_uuid: - swap_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', swap_vdi_uuid) - else: - swap_vdi_ref = None - disk_image_type = VMHelper.determine_disk_image_type(instance) kernel = None @@ -165,17 +157,29 @@ class VMOps(object): instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK) + # Create the VM ref and attach the first disk + first_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', + vdis[0]['vdi_uuid']) use_pv_kernel = VMHelper.determine_is_pv(self._session, - instance.id, primary_vdi_ref, disk_image_type, + instance.id, first_vdi_ref, disk_image_type, instance.os_type) - vm_ref = VMHelper.create_vm(self._session, instance, kernel, - ramdisk, use_pv_kernel) - + vm_ref = VMHelper.create_vm(self._session, instance, + kernel, ramdisk, use_pv_kernel) VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, - vdi_ref=primary_vdi_ref, userdevice=0, bootable=True) - if swap_vdi_ref: + vdi_ref=first_vdi_ref, userdevice=0, bootable=True) + + # Attach any other disks + # userdevice 1 is reserved for rescue + userdevice = 2 + for vdi in vdis[1:]: + # vdi['vdi_type'] is either 'os' or 'swap', but we don't + # really care what it is right here. + vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', + vdi['vdi_uuid']) VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, - vdi_ref=swap_vdi_ref, userdevice=2, bootable=False) + vdi_ref=vdi_ref, userdevice=userdevice, + bootable=False) + userdevice += 1 # TODO(tr3buchet) - check to make sure we have network info, otherwise # create it now. This goes away once nova-multi-nic hits. @@ -185,7 +189,7 @@ class VMOps(object): # Alter the image before VM start for, e.g. network injection if FLAGS.xenapi_inject_image: VMHelper.preconfigure_instance(self._session, instance, - primary_vdi_ref, network_info) + first_vdi_ref, network_info) self.create_vifs(vm_ref, network_info) self.inject_network_info(instance, network_info, vm_ref) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 0d02adfe9..039d1b8f6 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -91,8 +91,8 @@ def _download_tarball(sr_path, staging_path, image_id, glance_host, conn.close() -def _fixup_vhds(sr_path, staging_path, uuid_stack): - """Fixup the downloaded VHDs before we move them into the SR. +def _import_vhds(sr_path, staging_path, uuid_stack): + """Import the VHDs found in the staging path. We cannot extract VHDs directly into the SR since they don't yet have UUIDs, aren't properly associated with each other, and would be subject to @@ -102,16 +102,25 @@ def _fixup_vhds(sr_path, staging_path, uuid_stack): To avoid these we problems, we use a staging area to fixup the VHDs before moving them into the SR. The steps involved are: - 1. Extracting tarball into staging area + 1. Extracting tarball into staging area (done prior to this call) 2. Renaming VHDs to use UUIDs ('snap.vhd' -> 'ffff-aaaa-...vhd') - 3. Linking the two VHDs together + 3. Linking VHDs together if there's a snap.vhd 4. Pseudo-atomically moving the images into the SR. (It's not really - atomic because it takes place as two os.rename operations; however, - the chances of an SR.scan occuring between the two rename() + atomic because it takes place as multiple os.rename operations; + however, the chances of an SR.scan occuring between the rename()s invocations is so small that we can safely ignore it) + + Returns: A list of VDIs. Each list element is a dictionary containing + information about the VHD. Dictionary keys are: + 1. "vdi_type" - The type of VDI. Currently they can be "os_disk" or + "swap" + 2. "vdi_uuid" - The UUID of the VDI + + Example return: [{"vdi_type": "os_disk","vdi_uuid": "ffff-aaa..vhd"}, + {"vdi_type": "swap","vdi_uuid": "ffff-bbb..vhd"}] """ def rename_with_uuid(orig_path): """Rename VHD using UUID so that it will be recognized by SR on a @@ -162,39 +171,57 @@ def _fixup_vhds(sr_path, staging_path, uuid_stack): "VHD %(path)s is marked as hidden without child" % locals()) - orig_base_copy_path = os.path.join(staging_path, 'image.vhd') - if not os.path.exists(orig_base_copy_path): + def prepare_if_exists(staging_path, vhd_name, parent_path=None): + """ + Check for existance of a particular VHD in the staging path and + preparing it for moving into the SR. + + Returns: Tuple of (Path to move into the SR, VDI_UUID) + None, if the vhd_name doesn't exist in the staging path + + If the VHD exists, we will do the following: + 1. Rename it with a UUID. + 2. If parent_path exists, we'll link up the VHDs. + """ + orig_path = os.path.join(staging_path, vhd_name) + if not os.path.exists(orig_path): + return None + new_path, vdi_uuid = rename_with_uuid(orig_path) + if parent_path: + # NOTE(sirp): this step is necessary so that an SR scan won't + # delete the base_copy out from under us (since it would be + # orphaned) + link_vhds(new_path, parent_path) + return (new_path, vdi_uuid) + + vdi_return_list = [] + paths_to_move = [] + + image_info = prepare_if_exists(staging_path, 'image.vhd') + if not image_info: raise Exception("Invalid image: image.vhd not present") - base_copy_path, base_copy_uuid = rename_with_uuid(orig_base_copy_path) - - vdi_uuid = base_copy_uuid - orig_snap_path = os.path.join(staging_path, 'snap.vhd') - if os.path.exists(orig_snap_path): - snap_path, snap_uuid = rename_with_uuid(orig_snap_path) - vdi_uuid = snap_uuid - # NOTE(sirp): this step is necessary so that an SR scan won't - # delete the base_copy out from under us (since it would be - # orphaned) - link_vhds(snap_path, base_copy_path) - move_into_sr(snap_path) + paths_to_move.append(image_info[0]) + + snap_info = prepare_if_exists(staging_path, 'snap.vhd', image_path) + if snap_info: + paths_to_move.append(snap_info[0]) + # We return this snap as the VDI instead of image.vhd + vdi_return_list.append(dict(vdi_type="os", vdi_uuid=snap_info[1])) else: - assert_vhd_not_hidden(base_copy_path) + # If there's no snap, we return the image.vhd UUID + vdi_return_list.append(dict(vdi_type="os", vdi_uuid=image_info[1])) + + swap_info = prepare_if_exists(staging_path, 'swap.vhd') + if swap_info: + paths_to_move.append(swap_info[0]) + vdi_return_list.append(dict(vdi_type="swap", vdi_uuid=swap_info[1])) - # If we find a swap.vhd, go ahead and copy it into the SR - swap_vdi_uuid = None - orig_swap_path = os.path.join(staging_path, 'swap.vhd') - if os.path.exists(orig_swap_path): - swap_path, swap_vdi_uuid = rename_with_uuid(orig_swap_path) - move_into_sr(swap_path) + for path in paths_to_move: + move_into_sr(path) - vdi_uuids = {} - vdi_uuids['primary_vdi_uuid'] = vdi_uuid - if swap_vdi_uuid: - vdi_uuids['swap_vdi_uuid'] = swap_vdi_uuid + return vdi_return_list - move_into_sr(base_copy_path) - return vdi_uuids def _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids): """Hard-link VHDs into staging area with appropriate filename @@ -339,7 +366,9 @@ def download_vhd(session, args): try: _download_tarball(sr_path, staging_path, image_id, glance_host, glance_port) - return json.dumps(_fixup_vhds(sr_path, staging_path, uuid_stack)) + # Right now, it's easier to return a single string via XenAPI, + # so we'll json encode the list of VHDs. + return json.dumps(_import_vhds(sr_path, staging_path, uuid_stack)) finally: _cleanup_staging_area(staging_path) -- cgit From 04785db717492c8ba7c2d184924b3773ec944f4c Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Thu, 26 May 2011 19:37:51 +0000 Subject: fix image_path in glance plugin --- plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 039d1b8f6..fef31a9ff 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -203,7 +203,8 @@ def _import_vhds(sr_path, staging_path, uuid_stack): paths_to_move.append(image_info[0]) - snap_info = prepare_if_exists(staging_path, 'snap.vhd', image_path) + snap_info = prepare_if_exists(staging_path, 'snap.vhd', + image_info[0]) if snap_info: paths_to_move.append(snap_info[0]) # We return this snap as the VDI instead of image.vhd -- cgit From 9e22f51c80cc5f7f5ea60b5b8bb779779a19667c Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Thu, 26 May 2011 20:01:09 +0000 Subject: put back the hidden assert check i accidentally removed from glance plugin --- plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index fef31a9ff..0c00d168b 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -210,11 +210,13 @@ def _import_vhds(sr_path, staging_path, uuid_stack): # We return this snap as the VDI instead of image.vhd vdi_return_list.append(dict(vdi_type="os", vdi_uuid=snap_info[1])) else: + assert_vhd_not_hidden(image_info[0]) # If there's no snap, we return the image.vhd UUID vdi_return_list.append(dict(vdi_type="os", vdi_uuid=image_info[1])) swap_info = prepare_if_exists(staging_path, 'swap.vhd') if swap_info: + assert_vhd_not_hidden(swap_info[0]) paths_to_move.append(swap_info[0]) vdi_return_list.append(dict(vdi_type="swap", vdi_uuid=swap_info[1])) -- cgit From fc27a0ac4f907282a669e2c9f3e128890907f236 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Thu, 26 May 2011 20:21:40 +0000 Subject: add a comment when calling glance:download_vhd so it's clear what is returned --- nova/virt/xenapi/vm_utils.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index bee9742a4..06ee8ee9b 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -417,6 +417,10 @@ class VMHelper(HelperBase): kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'download_vhd', kwargs) result = session.wait_for_task(task, instance_id) + # 'download_vhd' will return a json encoded string containing + # a list of dictionaries describing VDIs. The dictionary will + # contain 'vdi_type' and 'vdi_uuid' keys. 'vdi_type' can be + # 'os' or 'swap' right now. vdis = json.loads(result) for vdi in vdis: LOG.debug(_("xapi 'download_vhd' returned VDI of " -- cgit From d7e0b45a9bc415e87beee32f10c8d6bdff9819ed Mon Sep 17 00:00:00 2001 From: termie Date: Thu, 26 May 2011 15:08:53 -0700 Subject: changes per review --- nova/rpc.py | 17 ++++++++++------- nova/service.py | 17 ++++++++--------- nova/tests/test_rpc.py | 12 +++--------- nova/tests/test_service.py | 6 ++++-- 4 files changed, 25 insertions(+), 27 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index 493978e57..1ec495bc8 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -28,6 +28,7 @@ import json import sys import time import traceback +import types import uuid from carrot import connection as carrot_connection @@ -228,7 +229,7 @@ class AdapterConsumer(Consumer): rval = node_func(context=ctxt, **node_args) if msg_id: # Check if the result was a generator - if hasattr(rval, 'send'): + if isinstance(rval, types.GeneratorType): for x in rval: msg_reply(msg_id, x, None) else: @@ -236,7 +237,7 @@ class AdapterConsumer(Consumer): # This final None tells multicall that it is done. msg_reply(msg_id, None, None) - elif hasattr(rval, 'send'): + elif isinstance(rval, types.GeneratorType): # NOTE(vish): this iterates through the generator list(rval) except Exception as e: @@ -281,11 +282,11 @@ class FanoutAdapterConsumer(AdapterConsumer): class ConsumerSet(object): """Groups consumers to listen on together on a single connection.""" - def __init__(self, conn, consumer_list): + def __init__(self, connection, consumer_list): self.consumer_list = set(consumer_list) self.consumer_set = None self.enabled = True - self.init(conn) + self.init(connection) def init(self, conn): if not conn: @@ -316,8 +317,7 @@ class ConsumerSet(object): running = False break except Exception as e: - LOG.error(_("Received exception %s " % type(e) + \ - "while processing consumer")) + LOG.exception(_("Exception while processing consumer")) self.reconnect() # Break to outer loop break @@ -534,7 +534,10 @@ def call(context, topic, msg): """Sends a message on a topic and wait for a response.""" rv = multicall(context, topic, msg) # NOTE(vish): return the last result from the multicall - return list(rv)[-1] + rv = list(rv) + if not rv: + return + return rv[-1] def cast(context, topic, msg): diff --git a/nova/service.py b/nova/service.py index 782183322..74f9f04d8 100644 --- a/nova/service.py +++ b/nova/service.py @@ -105,19 +105,18 @@ class Service(object): connection=self.conn, topic=self.topic, proxy=self) - - cset = rpc.ConsumerSet(self.conn, [consumer_all, - consumer_node, - fanout]) + consumer_set = rpc.ConsumerSet( + connection=self.conn, + consumer_list=[consumer_all, consumer_node, fanout]) # Wait forever, processing these consumers def _wait(): try: - cset.wait() + consumer_set.wait() finally: - cset.close() + consumer_set.close() - self.csetthread = greenthread.spawn(_wait) + self.consumer_set_thread = greenthread.spawn(_wait) if self.report_interval: pulse = utils.LoopingCall(self.report_state) @@ -182,9 +181,9 @@ class Service(object): logging.warn(_('Service killed that has no database entry')) def stop(self): - self.csetthread.kill() + self.consumer_set_thread.kill() try: - self.csetthread.wait() + self.consumer_set_thread.wait() except greenlet.GreenletExit: pass for x in self.timers: diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index 35f4a64d9..ffd748efe 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -66,12 +66,10 @@ class RpcTestCase(test.TestCase): 'test', {"method": "echo", "args": {"value": value}}) - i = 0 - for x in result: + for i, x in enumerate(result): if i > 0: self.fail('should only receive one response') self.assertEqual(value + i, x) - i += 1 def test_multicall_succeed_three_times(self): value = 42 @@ -79,10 +77,8 @@ class RpcTestCase(test.TestCase): 'test', {"method": "echo_three_times", "args": {"value": value}}) - i = 0 - for x in result: + for i, x in enumerate(result): self.assertEqual(value + i, x) - i += 1 def test_multicall_succeed_three_times_yield(self): value = 42 @@ -90,10 +86,8 @@ class RpcTestCase(test.TestCase): 'test', {"method": "echo_three_times_yield", "args": {"value": value}}) - i = 0 - for x in result: + for i, x in enumerate(result): self.assertEqual(value + i, x) - i += 1 def test_context_passed(self): """Makes sure a context is passed through rpc call.""" diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py index 0bba01d92..d1cc8bd61 100644 --- a/nova/tests/test_service.py +++ b/nova/tests/test_service.py @@ -142,7 +142,8 @@ class ServiceTestCase(test.TestCase): mock_cset = self.mox.CreateMock(rpc.ConsumerSet, {'wait': wait_func}) - rpc.ConsumerSet(mox.IgnoreArg(), mox.IsA(list)).AndReturn(mock_cset) + rpc.ConsumerSet(connection=mox.IgnoreArg(), + consumer_list=mox.IsA(list)).AndReturn(mock_cset) wait_func(mox.IgnoreArg()) service_create = {'host': host, @@ -331,7 +332,8 @@ class ServiceTestCase(test.TestCase): mock_cset = self.mox.CreateMock(rpc.ConsumerSet, {'wait': wait_func}) - rpc.ConsumerSet(mox.IgnoreArg(), mox.IsA(list)).AndReturn(mock_cset) + rpc.ConsumerSet(connection=mox.IgnoreArg(), + consumer_list=mox.IsA(list)).AndReturn(mock_cset) wait_func(mox.IgnoreArg()) self.mox.StubOutWithMock(serv.manager.driver, -- cgit From 103bcae9f172dfee64e7b9235807bcfe1a8aefb3 Mon Sep 17 00:00:00 2001 From: termie Date: Thu, 26 May 2011 17:06:52 -0700 Subject: fix a minor bug unrelated to this change --- nova/rpc.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nova/rpc.py b/nova/rpc.py index 1ec495bc8..c5277c6a9 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -212,7 +212,9 @@ class AdapterConsumer(Consumer): # we just log the message and send an error string # back to the caller LOG.warn(_('no method for message: %s') % message_data) - msg_reply(msg_id, _('No method for message: %s') % message_data) + if msg_id: + msg_reply(msg_id, + _('No method for message: %s') % message_data) return self.pool.spawn_n(self._process_data, msg_id, ctxt, method, args) -- cgit