summaryrefslogtreecommitdiffstats
path: root/nova
diff options
context:
space:
mode:
authorJohannes Erdfelt <johannes.erdfelt@rackspace.com>2012-02-08 23:05:53 +0000
committerJohannes Erdfelt <johannes.erdfelt@rackspace.com>2012-02-08 23:09:53 +0000
commit1815aaf13af1c5d6d4225fa0f8f4adb2b10548e6 (patch)
treee30ce79aaaa06c5a717a3450a976332c0802d503 /nova
parente456d2b4e258880f7a34044aacf13eb02cbfd532 (diff)
downloadnova-1815aaf13af1c5d6d4225fa0f8f4adb2b10548e6.tar.gz
nova-1815aaf13af1c5d6d4225fa0f8f4adb2b10548e6.tar.xz
nova-1815aaf13af1c5d6d4225fa0f8f4adb2b10548e6.zip
Remove unnecessary use of LoopingCall in nova/virt/xenapi/vm_utils.py
The code synchronously blocks waiting on the LoopingCall to finish anyway and the use of a maximum of number of attempts ends up being awkwardly fit in. Also, remove an out-of-date comment about future use of LoopingCall Change-Id: Ife397e171f28fff5e73c70e6957cecdd09a42d68
Diffstat (limited to 'nova')
-rw-r--r--nova/virt/xenapi/vm_utils.py28
1 files changed, 10 insertions, 18 deletions
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 91cbe2e38..9eb0bed32 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -33,6 +33,8 @@ import uuid
from decimal import Decimal, InvalidOperation
from xml.dom import minidom
+from eventlet import greenthread
+
from nova import exception
from nova import flags
from nova.image import glance
@@ -1211,16 +1213,7 @@ def _wait_for_vhd_coalesce(session, instance, sr_ref, vdi_ref,
snapshot
"""
max_attempts = FLAGS.xenapi_vhd_coalesce_max_attempts
- attempts = {'counter': 0}
-
- def _poll_vhds():
- attempts['counter'] += 1
- if attempts['counter'] > max_attempts:
- counter = attempts['counter']
- msg = (_("VHD coalesce attempts exceeded (%(counter)d >"
- " %(max_attempts)d), giving up...") % locals())
- raise exception.Error(msg)
-
+ for i in xrange(max_attempts):
VMHelper.scan_sr(session, instance, sr_ref)
parent_uuid = get_vhd_parent_uuid(session, vdi_ref)
if original_parent_uuid and (parent_uuid != original_parent_uuid):
@@ -1228,13 +1221,13 @@ def _wait_for_vhd_coalesce(session, instance, sr_ref, vdi_ref,
" %(original_parent_uuid)s, waiting for coalesce...")
% locals())
else:
- # Breakout of the loop (normally) and return the parent_uuid
- raise utils.LoopingCallDone(parent_uuid)
+ return parent_uuid
+
+ greenthread.sleep(FLAGS.xenapi_vhd_coalesce_poll_interval)
- loop = utils.LoopingCall(_poll_vhds)
- loop.start(FLAGS.xenapi_vhd_coalesce_poll_interval, now=True)
- parent_uuid = loop.wait()
- return parent_uuid
+ msg = (_("VHD coalesce attempts exceeded (%(max_attempts)d)"
+ ", giving up...") % locals())
+ raise exception.Error(msg)
def remap_vbd_dev(dev):
@@ -1319,7 +1312,6 @@ def vbd_unplug_with_retry(session, vbd_ref):
DEVICE_DETACH_REJECTED. For reasons which I don't understand, we're
seeing the device still in use, even when all processes using the device
should be dead."""
- # FIXME(sirp): We can use LoopingCall here w/o blocking sleep()
while True:
try:
session.call_xenapi("VBD.unplug", vbd_ref)
@@ -1329,7 +1321,7 @@ def vbd_unplug_with_retry(session, vbd_ref):
if (len(e.details) > 0 and
e.details[0] == 'DEVICE_DETACH_REJECTED'):
LOG.debug(_('VBD.unplug rejected: retrying...'))
- time.sleep(1)
+ greenthread.sleep(1)
LOG.debug(_('Not sleeping anymore!'))
elif (len(e.details) > 0 and
e.details[0] == 'DEVICE_ALREADY_DETACHED'):