From 2337fab0979b72bbc7e7730e94518a0e835a2751 Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Mon, 25 Oct 2010 03:45:19 +0900 Subject: part way through porting the codebase off of twisted this provides a very basic eventlet-based service replacement for the twistd-based services, a replacement for task.LoopingCall also adds nova-combined with the goal of running a single service when doing local testing and dev --- nova/virt/fake.py | 6 +- nova/virt/libvirt_conn.py | 172 ++++++++++++++++++++++------------------------ nova/virt/xenapi.py | 103 ++++++++++++--------------- 3 files changed, 127 insertions(+), 154 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/fake.py b/nova/virt/fake.py index eaa2261f5..0684a0877 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -24,8 +24,6 @@ This module also documents the semantics of real hypervisor connections. import logging -from twisted.internet import defer - from nova.compute import power_state @@ -105,7 +103,6 @@ class FakeConnection(object): fake_instance = FakeInstance() self.instances[instance.name] = fake_instance fake_instance._state = power_state.RUNNING - return defer.succeed(None) def reboot(self, instance): """ @@ -117,7 +114,7 @@ class FakeConnection(object): The work will be done asynchronously. This function returns a Deferred that allows the caller to detect when it is complete. """ - return defer.succeed(None) + pass def destroy(self, instance): """ @@ -130,7 +127,6 @@ class FakeConnection(object): Deferred that allows the caller to detect when it is complete. """ del self.instances[instance.name] - return defer.succeed(None) def attach_volume(self, instance_name, device_path, mountpoint): """Attach the disk at device_path to the instance at mountpoint""" diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 509ed97a0..9ca97bd1b 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -25,10 +25,10 @@ import logging import os import shutil +from eventlet import event +from eventlet import tpool + import IPy -from twisted.internet import defer -from twisted.internet import task -from twisted.internet import threads from nova import context from nova import db @@ -145,13 +145,12 @@ class LibvirtConnection(object): except Exception as _err: pass # If the instance is already terminated, we're still happy - d = defer.Deferred() - d.addCallback(lambda _: self._cleanup(instance)) - # FIXME: What does this comment mean? - # TODO(termie): short-circuit me for tests - # WE'LL save this for when we do shutdown, + + done = event.Event() + + # We'll save this for when we do shutdown, # instead of destroy - but destroy returns immediately - timer = task.LoopingCall(f=None) + timer = utils.LoopingCall(f=None) def _wait_for_shutdown(): try: @@ -160,17 +159,26 @@ class LibvirtConnection(object): instance['id'], state) if state == power_state.SHUTDOWN: timer.stop() - d.callback(None) except Exception: db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) timer.stop() - d.callback(None) timer.f = _wait_for_shutdown - timer.start(interval=0.5, now=True) - return d + timer_done = timer.start(interval=0.5, now=True) + + # NOTE(termie): this is strictly superfluous (we could put the + # cleanup code in the timer), but this emulates the + # previous model so I am keeping it around until + # everything has been vetted a bit + def _wait_for_timer(): + timer_done.wait() + self._cleanup(instance) + done.send() + + greenthread.spawn(_wait_for_time) + return done def _cleanup(self, instance): target = os.path.join(FLAGS.instances_path, instance['name']) @@ -179,32 +187,28 @@ class LibvirtConnection(object): if os.path.exists(target): shutil.rmtree(target) - @defer.inlineCallbacks @exception.wrap_exception def attach_volume(self, instance_name, device_path, mountpoint): - yield process.simple_execute("sudo virsh attach-disk %s %s %s" % - (instance_name, - device_path, - mountpoint.rpartition('/dev/')[2])) + process.simple_execute("sudo virsh attach-disk %s %s %s" % + (instance_name, + device_path, + mountpoint.rpartition('/dev/')[2])) - @defer.inlineCallbacks @exception.wrap_exception def detach_volume(self, instance_name, mountpoint): # NOTE(vish): despite the documentation, virsh detach-disk just # wants the device name without the leading /dev/ - yield process.simple_execute("sudo virsh detach-disk %s %s" % - (instance_name, - mountpoint.rpartition('/dev/')[2])) + process.simple_execute("sudo virsh detach-disk %s %s" % + (instance_name, + mountpoint.rpartition('/dev/')[2])) - @defer.inlineCallbacks @exception.wrap_exception def reboot(self, instance): xml = self.to_xml(instance) - yield self._conn.lookupByName(instance['name']).destroy() - yield self._conn.createXML(xml, 0) + self._conn.lookupByName(instance['name']).destroy() + self._conn.createXML(xml, 0) - d = defer.Deferred() - timer = task.LoopingCall(f=None) + timer = utils.LoopingCall(f=None) def _wait_for_reboot(): try: @@ -214,20 +218,16 @@ class LibvirtConnection(object): if state == power_state.RUNNING: logging.debug('instance %s: rebooted', instance['name']) timer.stop() - d.callback(None) except Exception, exn: logging.error('_wait_for_reboot failed: %s', exn) db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) timer.stop() - d.callback(None) timer.f = _wait_for_reboot - timer.start(interval=0.5, now=True) - yield d + return timer.start(interval=0.5, now=True) - @defer.inlineCallbacks @exception.wrap_exception def spawn(self, instance): xml = self.to_xml(instance) @@ -235,16 +235,12 @@ class LibvirtConnection(object): instance['id'], power_state.NOSTATE, 'launching') - yield NWFilterFirewall(self._conn).\ - setup_nwfilters_for_instance(instance) - yield self._create_image(instance, xml) - yield self._conn.createXML(xml, 0) - # TODO(termie): this should actually register - # a callback to check for successful boot + NWFilterFirewall(self._conn).setup_nwfilters_for_instance(instance) + self._create_image(instance, xml) + self._conn.createXML(xml, 0) logging.debug("instance %s: is running", instance['name']) - local_d = defer.Deferred() - timer = task.LoopingCall(f=None) + timer = utils.LoopingCall(f=None) def _wait_for_boot(): try: @@ -254,7 +250,6 @@ class LibvirtConnection(object): if state == power_state.RUNNING: logging.debug('instance %s: booted', instance['name']) timer.stop() - local_d.callback(None) except: logging.exception('instance %s: failed to boot', instance['name']) @@ -262,10 +257,9 @@ class LibvirtConnection(object): instance['id'], power_state.SHUTDOWN) timer.stop() - local_d.callback(None) + timer.f = _wait_for_boot - timer.start(interval=0.5, now=True) - yield local_d + return timer.start(interval=0.5, now=True) def _flush_xen_console(self, virsh_output): logging.info('virsh said: %r' % (virsh_output,)) @@ -273,10 +267,9 @@ class LibvirtConnection(object): if virsh_output.startswith('/dev/'): logging.info('cool, it\'s a device') - d = process.simple_execute("sudo dd if=%s iflag=nonblock" % + r = process.simple_execute("sudo dd if=%s iflag=nonblock" % virsh_output, check_exit_code=False) - d.addCallback(lambda r: r[0]) - return d + return r[0] else: return '' @@ -296,21 +289,21 @@ class LibvirtConnection(object): def get_console_output(self, instance): console_log = os.path.join(FLAGS.instances_path, instance['name'], 'console.log') - d = process.simple_execute('sudo chown %d %s' % (os.getuid(), - console_log)) + + process.simple_execute('sudo chown %d %s' % (os.getuid(), + console_log)) + if FLAGS.libvirt_type == 'xen': - # Xen is spethial - d.addCallback(lambda _: - process.simple_execute("virsh ttyconsole %s" % - instance['name'])) - d.addCallback(self._flush_xen_console) - d.addCallback(self._append_to_file, console_log) + # Xen is special + virsh_output = process.simple_execute("virsh ttyconsole %s" % + instance['name']) + data = self._flush_xen_console(virsh_output) + fpath = self._append_to_file(data, console_log) else: - d.addCallback(lambda _: defer.succeed(console_log)) - d.addCallback(self._dump_file) - return d + fpath = console_log + + return self._dump_file(fpath) - @defer.inlineCallbacks def _create_image(self, inst, libvirt_xml): # syntactic nicety basepath = lambda fname='': os.path.join(FLAGS.instances_path, @@ -318,8 +311,8 @@ class LibvirtConnection(object): fname) # ensure directories exist and are writable - yield process.simple_execute('mkdir -p %s' % basepath()) - yield process.simple_execute('chmod 0777 %s' % basepath()) + process.simple_execute('mkdir -p %s' % basepath()) + process.simple_execute('chmod 0777 %s' % basepath()) # TODO(termie): these are blocking calls, it would be great # if they weren't. @@ -335,19 +328,19 @@ class LibvirtConnection(object): project = manager.AuthManager().get_project(inst['project_id']) if not os.path.exists(basepath('disk')): - yield images.fetch(inst.image_id, basepath('disk-raw'), user, - project) + images.fetch(inst.image_id, basepath('disk-raw'), user, + project) if not os.path.exists(basepath('kernel')): - yield images.fetch(inst.kernel_id, basepath('kernel'), user, - project) + images.fetch(inst.kernel_id, basepath('kernel'), user, + project) if not os.path.exists(basepath('ramdisk')): - yield images.fetch(inst.ramdisk_id, basepath('ramdisk'), user, - project) - - execute = lambda cmd, process_input=None, check_exit_code=True: \ - process.simple_execute(cmd=cmd, - process_input=process_input, - check_exit_code=check_exit_code) + images.fetch(inst.ramdisk_id, basepath('ramdisk'), user, + project) + + def execute(cmd, process_input=None, check_exit_code=True): + return process.simple_execute(cmd=cmd, + process_input=process_input, + check_exit_code=check_exit_code) key = str(inst['key_data']) net = None @@ -369,23 +362,23 @@ class LibvirtConnection(object): if net: logging.info('instance %s: injecting net into image %s', inst['name'], inst.image_id) - yield disk.inject_data(basepath('disk-raw'), key, net, - execute=execute) + disk.inject_data(basepath('disk-raw'), key, net, + execute=execute) if os.path.exists(basepath('disk')): - yield process.simple_execute('rm -f %s' % basepath('disk')) + process.simple_execute('rm -f %s' % basepath('disk')) local_bytes = (instance_types.INSTANCE_TYPES[inst.instance_type] ['local_gb'] * 1024 * 1024 * 1024) resize = inst['instance_type'] != 'm1.tiny' - yield disk.partition(basepath('disk-raw'), basepath('disk'), - local_bytes, resize, execute=execute) + disk.partition(basepath('disk-raw'), basepath('disk'), + local_bytes, resize, execute=execute) if FLAGS.libvirt_type == 'uml': - yield process.simple_execute('sudo chown root %s' % - basepath('disk')) + process.simple_execute('sudo chown root %s' % + basepath('disk')) def to_xml(self, instance): # TODO(termie): cache? @@ -637,15 +630,15 @@ class NWFilterFirewall(object): def _define_filter(self, xml): if callable(xml): xml = xml() - d = threads.deferToThread(self._conn.nwfilterDefineXML, xml) - return d + + # execute in a native thread and block until done + tpool.execute(self._conn.nwfilterDefineXML, xml) @staticmethod def _get_net_and_mask(cidr): net = IPy.IP(cidr) return str(net.net()), str(net.netmask()) - @defer.inlineCallbacks def setup_nwfilters_for_instance(self, instance): """ Creates an NWFilter for the given instance. In the process, @@ -653,10 +646,10 @@ class NWFilterFirewall(object): the base filter are all in place. """ - yield self._define_filter(self.nova_base_ipv4_filter) - yield self._define_filter(self.nova_base_ipv6_filter) - yield self._define_filter(self.nova_dhcp_filter) - yield self._define_filter(self.nova_base_filter) + self._define_filter(self.nova_base_ipv4_filter) + self._define_filter(self.nova_base_ipv6_filter) + self._define_filter(self.nova_dhcp_filter) + self._define_filter(self.nova_base_filter) nwfilter_xml = "\n" \ " \n" % \ @@ -668,20 +661,19 @@ class NWFilterFirewall(object): net, mask = self._get_net_and_mask(network_ref['cidr']) project_filter = self.nova_project_filter(instance['project_id'], net, mask) - yield self._define_filter(project_filter) + self._define_filter(project_filter) nwfilter_xml += " \n" % \ instance['project_id'] for security_group in instance.security_groups: - yield self.ensure_security_group_filter(security_group['id']) + self.ensure_security_group_filter(security_group['id']) nwfilter_xml += " \n" % \ security_group['id'] nwfilter_xml += "" - yield self._define_filter(nwfilter_xml) - return + self._define_filter(nwfilter_xml) def ensure_security_group_filter(self, security_group_id): return self._define_filter( diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index a17e405ab..f997d01d7 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -36,11 +36,10 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block. """ import logging +import sys import xmlrpclib -from twisted.internet import defer -from twisted.internet import reactor -from twisted.internet import task +from eventlet import tpool from nova import db from nova import flags @@ -110,36 +109,33 @@ class XenAPIConnection(object): return [self._conn.xenapi.VM.get_name_label(vm) \ for vm in self._conn.xenapi.VM.get_all()] - @defer.inlineCallbacks def spawn(self, instance): - vm = yield self._lookup(instance.name) + vm = self._lookup(instance.name) if vm is not None: raise Exception('Attempted to create non-unique name %s' % instance.name) network = db.project_get_network(None, instance.project_id) - network_ref = \ - yield self._find_network_with_bridge(network.bridge) + network_ref = self._find_network_with_bridge(network.bridge) user = AuthManager().get_user(instance.user_id) project = AuthManager().get_project(instance.project_id) - vdi_uuid = yield self._fetch_image( + vdi_uuid = self._fetch_image( instance.image_id, user, project, True) - kernel = yield self._fetch_image( + kernel = self._fetch_image( instance.kernel_id, user, project, False) - ramdisk = yield self._fetch_image( + ramdisk = self._fetch_image( instance.ramdisk_id, user, project, False) - vdi_ref = yield self._call_xenapi('VDI.get_by_uuid', vdi_uuid) + vdi_ref = self._call_xenapi('VDI.get_by_uuid', vdi_uuid) - vm_ref = yield self._create_vm(instance, kernel, ramdisk) - yield self._create_vbd(vm_ref, vdi_ref, 0, True) + vm_ref = self._create_vm(instance, kernel, ramdisk) + self._create_vbd(vm_ref, vdi_ref, 0, True) if network_ref: - yield self._create_vif(vm_ref, network_ref, instance.mac_address) + self._create_vif(vm_ref, network_ref, instance.mac_address) logging.debug('Starting VM %s...', vm_ref) - yield self._call_xenapi('VM.start', vm_ref, False, False) + self._call_xenapi('VM.start', vm_ref, False, False) logging.info('Spawning VM %s created %s.', instance.name, vm_ref) - @defer.inlineCallbacks def _create_vm(self, instance, kernel, ramdisk): """Create a VM record. Returns a Deferred that gives the new VM reference.""" @@ -177,11 +173,10 @@ class XenAPIConnection(object): 'other_config': {}, } logging.debug('Created VM %s...', instance.name) - vm_ref = yield self._call_xenapi('VM.create', rec) + vm_ref = self._call_xenapi('VM.create', rec) logging.debug('Created VM %s as %s.', instance.name, vm_ref) - defer.returnValue(vm_ref) + return vm_ref - @defer.inlineCallbacks def _create_vbd(self, vm_ref, vdi_ref, userdevice, bootable): """Create a VBD record. Returns a Deferred that gives the new VBD reference.""" @@ -200,12 +195,11 @@ class XenAPIConnection(object): vbd_rec['qos_algorithm_params'] = {} vbd_rec['qos_supported_algorithms'] = [] logging.debug('Creating VBD for VM %s, VDI %s ... ', vm_ref, vdi_ref) - vbd_ref = yield self._call_xenapi('VBD.create', vbd_rec) + vbd_ref = self._call_xenapi('VBD.create', vbd_rec) logging.debug('Created VBD %s for VM %s, VDI %s.', vbd_ref, vm_ref, vdi_ref) - defer.returnValue(vbd_ref) + return vbd_ref - @defer.inlineCallbacks def _create_vif(self, vm_ref, network_ref, mac_address): """Create a VIF record. Returns a Deferred that gives the new VIF reference.""" @@ -221,24 +215,22 @@ class XenAPIConnection(object): vif_rec['qos_algorithm_params'] = {} logging.debug('Creating VIF for VM %s, network %s ... ', vm_ref, network_ref) - vif_ref = yield self._call_xenapi('VIF.create', vif_rec) + vif_ref = self._call_xenapi('VIF.create', vif_rec) logging.debug('Created VIF %s for VM %s, network %s.', vif_ref, vm_ref, network_ref) - defer.returnValue(vif_ref) + return vif_ref - @defer.inlineCallbacks def _find_network_with_bridge(self, bridge): expr = 'field "bridge" = "%s"' % bridge - networks = yield self._call_xenapi('network.get_all_records_where', - expr) + networks = self._call_xenapi('network.get_all_records_where', + expr) if len(networks) == 1: - defer.returnValue(networks.keys()[0]) + return networks.keys()[0] elif len(networks) > 1: raise Exception('Found non-unique network for bridge %s' % bridge) else: raise Exception('Found no network for bridge %s' % bridge) - @defer.inlineCallbacks def _fetch_image(self, image, user, project, use_sr): """use_sr: True to put the image as a VDI in an SR, False to place it on dom0's filesystem. The former is for VM disks, the latter for @@ -255,33 +247,31 @@ class XenAPIConnection(object): args['password'] = user.secret if use_sr: args['add_partition'] = 'true' - task = yield self._async_call_plugin('objectstore', fn, args) - uuid = yield self._wait_for_task(task) - defer.returnValue(uuid) + task = self._async_call_plugin('objectstore', fn, args) + uuid = self._wait_for_task(task) + return uuid - @defer.inlineCallbacks def reboot(self, instance): - vm = yield self._lookup(instance.name) + vm = self._lookup(instance.name) if vm is None: raise Exception('instance not present %s' % instance.name) - task = yield self._call_xenapi('Async.VM.clean_reboot', vm) - yield self._wait_for_task(task) + task = self._call_xenapi('Async.VM.clean_reboot', vm) + self._wait_for_task(task) - @defer.inlineCallbacks def destroy(self, instance): - vm = yield self._lookup(instance.name) + vm = self._lookup(instance.name) if vm is None: # Don't complain, just return. This lets us clean up instances # that have already disappeared from the underlying platform. - defer.returnValue(None) + return try: - task = yield self._call_xenapi('Async.VM.hard_shutdown', vm) - yield self._wait_for_task(task) + task = self._call_xenapi('Async.VM.hard_shutdown', vm) + self._wait_for_task(task) except Exception, exc: logging.warn(exc) try: - task = yield self._call_xenapi('Async.VM.destroy', vm) - yield self._wait_for_task(task) + task = self._call_xenapi('Async.VM.destroy', vm) + self._wait_for_task(task) except Exception, exc: logging.warn(exc) @@ -299,7 +289,6 @@ class XenAPIConnection(object): def get_console_output(self, instance): return 'FAKE CONSOLE OUTPUT' - @utils.deferredToThread def _lookup(self, i): return self._lookup_blocking(i) @@ -316,35 +305,32 @@ class XenAPIConnection(object): def _wait_for_task(self, task): """Return a Deferred that will give the result of the given task. The task is polled until it completes.""" - d = defer.Deferred() - reactor.callLater(0, self._poll_task, task, d) - return d - @utils.deferredToThread - def _poll_task(self, task, deferred): + done = event.Event() + loop = utis.LoopingTask(self._poll_task, task, done) + loop.start(FLAGS.xenapi_task_poll_interval, now=True) + return done.wait() + + def _poll_task(self, task, done): """Poll the given XenAPI task, and fire the given Deferred if we get a result.""" try: - #logging.debug('Polling task %s...', task) status = self._conn.xenapi.task.get_status(task) if status == 'pending': - reactor.callLater(FLAGS.xenapi_task_poll_interval, - self._poll_task, task, deferred) + return elif status == 'success': result = self._conn.xenapi.task.get_result(task) logging.info('Task %s status: success. %s', task, result) - deferred.callback(_parse_xmlrpc_value(result)) + done.send(_parse_xmlrpc_value(result)) else: error_info = self._conn.xenapi.task.get_error_info(task) logging.warn('Task %s status: %s. %s', task, status, error_info) - deferred.errback(XenAPI.Failure(error_info)) - #logging.debug('Polling task %s done.', task) + done.send_exception(XenAPI.Failure(error_info)) except Exception, exc: logging.warn(exc) - deferred.errback(exc) + done.send_exception(*sys.exc_info()) - @utils.deferredToThread def _call_xenapi(self, method, *args): """Call the specified XenAPI method on a background thread. Returns a Deferred for the result.""" @@ -353,11 +339,10 @@ class XenAPIConnection(object): f = f.__getattr__(m) return f(*args) - @utils.deferredToThread def _async_call_plugin(self, plugin, fn, args): """Call Async.host.call_plugin on a background thread. Returns a Deferred with the task reference.""" - return _unwrap_plugin_exceptions( + return tpool.execute(_unwrap_plugin_exceptions, self._conn.xenapi.Async.host.call_plugin, self._get_xenapi_host(), plugin, fn, args) -- cgit From 3c85f1b7ed593a2d4d126a34241f217da5cf7ce6 Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Wed, 8 Dec 2010 17:18:27 -0800 Subject: intermediate commit to checkpoint progress all relevant tests are passing except volume, next step is volume manager fixery --- nova/virt/images.py | 9 +++++---- nova/virt/libvirt_conn.py | 28 +++++++++++++--------------- nova/virt/xenapi/vm_utils.py | 28 +++++++++++----------------- nova/virt/xenapi_conn.py | 1 - 4 files changed, 29 insertions(+), 37 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/images.py b/nova/virt/images.py index 981aa5cf3..4d7c65f12 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -26,7 +26,7 @@ import time import urlparse from nova import flags -from nova import process +from nova import utils from nova.auth import manager from nova.auth import signer from nova.objectstore import image @@ -63,15 +63,16 @@ def _fetch_s3_image(image, path, user, project): cmd = ['/usr/bin/curl', '--fail', '--silent', url] for (k, v) in headers.iteritems(): - cmd += ['-H', '%s: %s' % (k, v)] + cmd += ['-H', '"%s: %s"' % (k, v)] cmd += ['-o', path] - return process.SharedPool().execute(executable=cmd[0], args=cmd[1:]) + cmd_out = ' '.join(cmd) + return utils.execute(cmd_out) def _fetch_local_image(image, path, user, project): source = _image_path('%s/image' % image) - return process.simple_execute('cp %s %s' % (source, path)) + return utils.execute('cp %s %s' % (source, path)) def _image_path(path): diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index c09a7c01d..715e7234c 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -54,7 +54,6 @@ from nova import context from nova import db from nova import exception from nova import flags -from nova import process from nova import utils #from nova.api import context from nova.auth import manager @@ -366,8 +365,8 @@ class LibvirtConnection(object): if virsh_output.startswith('/dev/'): logging.info('cool, it\'s a device') - r = process.simple_execute("sudo dd if=%s iflag=nonblock" % - virsh_output, check_exit_code=False) + r = utils.execute("sudo dd if=%s iflag=nonblock" % + virsh_output, check_exit_code=False) return r[0] else: return '' @@ -389,13 +388,13 @@ class LibvirtConnection(object): console_log = os.path.join(FLAGS.instances_path, instance['name'], 'console.log') - process.simple_execute('sudo chown %d %s' % (os.getuid(), - console_log)) + utils.execute('sudo chown %d %s' % (os.getuid(), + console_log)) if FLAGS.libvirt_type == 'xen': # Xen is special - virsh_output = process.simple_execute("virsh ttyconsole %s" % - instance['name']) + virsh_output = utils.execute("virsh ttyconsole %s" % + instance['name']) data = self._flush_xen_console(virsh_output) fpath = self._append_to_file(data, console_log) else: @@ -411,8 +410,8 @@ class LibvirtConnection(object): prefix + fname) # ensure directories exist and are writable - process.simple_execute('mkdir -p %s' % basepath(prefix='')) - process.simple_execute('chmod 0777 %s' % basepath(prefix='')) + utils.execute('mkdir -p %s' % basepath(prefix='')) + utils.execute('chmod 0777 %s' % basepath(prefix='')) # TODO(termie): these are blocking calls, it would be great # if they weren't. @@ -443,9 +442,9 @@ class LibvirtConnection(object): project) def execute(cmd, process_input=None, check_exit_code=True): - return process.simple_execute(cmd=cmd, - process_input=process_input, - check_exit_code=check_exit_code) + return utils.execute(cmd=cmd, + process_input=process_input, + check_exit_code=check_exit_code) key = str(inst['key_data']) net = None @@ -471,7 +470,7 @@ class LibvirtConnection(object): execute=execute) if os.path.exists(basepath('disk')): - process.simple_execute('rm -f %s' % basepath('disk')) + utils.execute('rm -f %s' % basepath('disk')) local_bytes = (instance_types.INSTANCE_TYPES[inst.instance_type] ['local_gb'] @@ -485,8 +484,7 @@ class LibvirtConnection(object): local_bytes, resize, execute=execute) if FLAGS.libvirt_type == 'uml': - process.simple_execute('sudo chown root %s' % - basepath('disk')) + utils.execute('sudo chown root %s' % basepath('disk')) def to_xml(self, instance, rescue=False): # TODO(termie): cache? diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 99d484ca2..b72b8e13d 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -21,14 +21,13 @@ their attributes like VDIs, VIFs, as well as their lookup functions. import logging -from twisted.internet import defer - from nova import utils from nova.auth.manager import AuthManager from nova.compute import instance_types from nova.compute import power_state from nova.virt import images + XENAPI_POWER_STATE = { 'Halted': power_state.SHUTDOWN, 'Running': power_state.RUNNING, @@ -36,6 +35,7 @@ XENAPI_POWER_STATE = { 'Suspended': power_state.SHUTDOWN, # FIXME 'Crashed': power_state.CRASHED} + XenAPI = None @@ -49,7 +49,6 @@ class VMHelper(): XenAPI = __import__('XenAPI') @classmethod - @defer.inlineCallbacks def create_vm(cls, session, instance, kernel, ramdisk): """Create a VM record. Returns a Deferred that gives the new VM reference.""" @@ -87,12 +86,11 @@ class VMHelper(): 'other_config': {}, } logging.debug('Created VM %s...', instance.name) - vm_ref = yield session.call_xenapi('VM.create', rec) + vm_ref = session.call_xenapi('VM.create', rec) logging.debug('Created VM %s as %s.', instance.name, vm_ref) - defer.returnValue(vm_ref) + return vm_ref @classmethod - @defer.inlineCallbacks def create_vbd(cls, session, vm_ref, vdi_ref, userdevice, bootable): """Create a VBD record. Returns a Deferred that gives the new VBD reference.""" @@ -111,13 +109,12 @@ class VMHelper(): vbd_rec['qos_algorithm_params'] = {} vbd_rec['qos_supported_algorithms'] = [] logging.debug('Creating VBD for VM %s, VDI %s ... ', vm_ref, vdi_ref) - vbd_ref = yield session.call_xenapi('VBD.create', vbd_rec) + vbd_ref = session.call_xenapi('VBD.create', vbd_rec) logging.debug('Created VBD %s for VM %s, VDI %s.', vbd_ref, vm_ref, vdi_ref) - defer.returnValue(vbd_ref) + return vbd_ref @classmethod - @defer.inlineCallbacks def create_vif(cls, session, vm_ref, network_ref, mac_address): """Create a VIF record. Returns a Deferred that gives the new VIF reference.""" @@ -133,13 +130,12 @@ class VMHelper(): vif_rec['qos_algorithm_params'] = {} logging.debug('Creating VIF for VM %s, network %s ... ', vm_ref, network_ref) - vif_ref = yield session.call_xenapi('VIF.create', vif_rec) + vif_ref = session.call_xenapi('VIF.create', vif_rec) logging.debug('Created VIF %s for VM %s, network %s.', vif_ref, vm_ref, network_ref) - defer.returnValue(vif_ref) + return vif_ref @classmethod - @defer.inlineCallbacks def fetch_image(cls, session, image, user, project, use_sr): """use_sr: True to put the image as a VDI in an SR, False to place it on dom0's filesystem. The former is for VM disks, the latter for @@ -156,12 +152,11 @@ class VMHelper(): args['password'] = user.secret if use_sr: args['add_partition'] = 'true' - task = yield session.async_call_plugin('objectstore', fn, args) - uuid = yield session.wait_for_task(task) - defer.returnValue(uuid) + task = session.async_call_plugin('objectstore', fn, args) + uuid = session.wait_for_task(task) + return uuid @classmethod - @utils.deferredToThread def lookup(cls, session, i): """ Look the instance i up, and returns it if available """ return VMHelper.lookup_blocking(session, i) @@ -179,7 +174,6 @@ class VMHelper(): return vms[0] @classmethod - @utils.deferredToThread def lookup_vm_vdis(cls, session, vm): """ Look for the VDIs that are attached to the VM """ return VMHelper.lookup_vm_vdis_blocking(session, vm) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index dacf9fe2b..96d211cc0 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -167,7 +167,6 @@ class XenAPISession(object): self.get_xenapi_host(), plugin, fn, args) def wait_for_task(self, task): ->>>>>>> MERGE-SOURCE """Return a Deferred that will give the result of the given task. The task is polled until it completes.""" -- cgit From af5c175dbc77048fb74311bf92569866676eee9c Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Thu, 9 Dec 2010 16:18:52 -0800 Subject: removed a few more references to twisted --- nova/virt/images.py | 2 +- nova/virt/xenapi/network_utils.py | 9 ++---- nova/virt/xenapi/vmops.py | 61 ++++++++++++++++++--------------------- 3 files changed, 32 insertions(+), 40 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/images.py b/nova/virt/images.py index 4d7c65f12..1c9b2e093 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -50,7 +50,7 @@ def _fetch_s3_image(image, path, user, project): # This should probably move somewhere else, like e.g. a download_as # method on User objects and at the same time get rewritten to use - # twisted web client. + # a web client. headers = {} headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py index 8cb4cce3a..d8632f393 100644 --- a/nova/virt/xenapi/network_utils.py +++ b/nova/virt/xenapi/network_utils.py @@ -20,8 +20,6 @@ records and their attributes like bridges, PIFs, QoS, as well as their lookup functions. """ -from twisted.internet import defer - class NetworkHelper(): """ @@ -31,14 +29,13 @@ class NetworkHelper(): return @classmethod - @defer.inlineCallbacks def find_network_with_bridge(cls, session, bridge): """ Return the network on which the bridge is attached, if found """ expr = 'field "bridge" = "%s"' % bridge - networks = yield session.call_xenapi('network.get_all_records_where', - expr) + networks = session.call_xenapi('network.get_all_records_where', + expr) if len(networks) == 1: - defer.returnValue(networks.keys()[0]) + return networks.keys()[0] elif len(networks) > 1: raise Exception('Found non-unique network for bridge %s' % bridge) else: diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index d36cdaea5..0223e512a 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -20,8 +20,6 @@ Management class for VM-related functions (spawn, reboot, etc). import logging -from twisted.internet import defer - from nova import db from nova import context from nova.auth.manager import AuthManager @@ -46,10 +44,9 @@ class VMOps(object): return [self._session.get_xenapi().VM.get_name_label(vm) \ for vm in self._session.get_xenapi().VM.get_all()] - @defer.inlineCallbacks def spawn(self, instance): """ Create VM instance """ - vm = yield VMHelper.lookup(self._session, instance.name) + vm = VMHelper.lookup(self._session, instance.name) if vm is not None: raise Exception('Attempted to create non-unique name %s' % instance.name) @@ -57,66 +54,64 @@ class VMOps(object): bridge = db.project_get_network(context.get_admin_context(), instance.project_id).bridge network_ref = \ - yield NetworkHelper.find_network_with_bridge(self._session, bridge) + NetworkHelper.find_network_with_bridge(self._session, bridge) user = AuthManager().get_user(instance.user_id) project = AuthManager().get_project(instance.project_id) - vdi_uuid = yield VMHelper.fetch_image(self._session, - instance.image_id, user, project, True) - kernel = yield VMHelper.fetch_image(self._session, - instance.kernel_id, user, project, False) - ramdisk = yield VMHelper.fetch_image(self._session, - instance.ramdisk_id, user, project, False) - vdi_ref = yield self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) - vm_ref = yield VMHelper.create_vm(self._session, - instance, kernel, ramdisk) - yield VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True) + vdi_uuid = VMHelper.fetch_image( + self._session, instance.image_id, user, project, True) + kernel = VMHelper.fetch_image( + self._session, instance.kernel_id, user, project, False) + ramdisk = VMHelper.fetch_image( + self._session, instance.ramdisk_id, user, project, False) + vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) + vm_ref = VMHelper.create_vm( + self._session, instance, kernel, ramdisk) + VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True) if network_ref: - yield VMHelper.create_vif(self._session, vm_ref, - network_ref, instance.mac_address) + VMHelper.create_vif(self._session, vm_ref, + network_ref, instance.mac_address) logging.debug('Starting VM %s...', vm_ref) - yield self._session.call_xenapi('VM.start', vm_ref, False, False) + self._session.call_xenapi('VM.start', vm_ref, False, False) logging.info('Spawning VM %s created %s.', instance.name, vm_ref) - @defer.inlineCallbacks def reboot(self, instance): """ Reboot VM instance """ instance_name = instance.name - vm = yield VMHelper.lookup(self._session, instance_name) + vm = VMHelper.lookup(self._session, instance_name) if vm is None: raise Exception('instance not present %s' % instance_name) - task = yield self._session.call_xenapi('Async.VM.clean_reboot', vm) - yield self._session.wait_for_task(task) + task = self._session.call_xenapi('Async.VM.clean_reboot', vm) + self._session.wait_for_task(task) - @defer.inlineCallbacks def destroy(self, instance): """ Destroy VM instance """ - vm = yield VMHelper.lookup(self._session, instance.name) + vm = VMHelper.lookup(self._session, instance.name) if vm is None: # Don't complain, just return. This lets us clean up instances # that have already disappeared from the underlying platform. - defer.returnValue(None) + return # Get the VDIs related to the VM - vdis = yield VMHelper.lookup_vm_vdis(self._session, vm) + vdis = VMHelper.lookup_vm_vdis(self._session, vm) try: - task = yield self._session.call_xenapi('Async.VM.hard_shutdown', + task = self._session.call_xenapi('Async.VM.hard_shutdown', vm) - yield self._session.wait_for_task(task) + self._session.wait_for_task(task) except XenAPI.Failure, exc: logging.warn(exc) # Disk clean-up if vdis: for vdi in vdis: try: - task = yield self._session.call_xenapi('Async.VDI.destroy', - vdi) - yield self._session.wait_for_task(task) + task = self._session.call_xenapi('Async.VDI.destroy', + vdi) + self._session.wait_for_task(task) except XenAPI.Failure, exc: logging.warn(exc) try: - task = yield self._session.call_xenapi('Async.VM.destroy', vm) - yield self._session.wait_for_task(task) + task = self._session.call_xenapi('Async.VM.destroy', vm) + self._session.wait_for_task(task) except XenAPI.Failure, exc: logging.warn(exc) -- cgit From c835c441981a17764931390bc1ace6121ab100a4 Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Fri, 10 Dec 2010 11:53:17 -0800 Subject: port new patches --- nova/virt/xenapi/vmops.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 13871b479..b6b92b926 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -126,14 +126,13 @@ class VMOps(object): rec = self._session.get_xenapi().VM.get_record(vm) return VMHelper.compile_info(rec) - @defer.inlineCallbacks def get_diagnostics(self, instance_id): """Return data about VM diagnostics""" - vm = yield VMHelper.lookup(self._session, instance_id) + vm = VMHelper.lookup(self._session, instance_id) if vm is None: raise Exception("instance not present %s" % instance_id) - rec = yield self._session.get_xenapi().VM.get_record(vm) - defer.returnValue(VMHelper.compile_diagnostics(self._session, rec)) + rec = self._session.get_xenapi().VM.get_record(vm) + return VMHelper.compile_diagnostics(self._session, rec) def get_console_output(self, instance): """ Return snapshot of console """ -- cgit From a2a8406b5d793545c8ecb359e18b80bba618c509 Mon Sep 17 00:00:00 2001 From: termie Date: Tue, 14 Dec 2010 16:05:39 -0800 Subject: updates per review --- nova/virt/fake.py | 4 ++-- nova/virt/libvirt_conn.py | 14 ++++++-------- nova/virt/xenapi/network_utils.py | 5 ++--- nova/virt/xenapi/vmops.py | 3 +-- nova/virt/xenapi_conn.py | 6 ++++-- 5 files changed, 15 insertions(+), 17 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 91dc8173b..77bc926c2 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -122,13 +122,13 @@ class FakeConnection(object): """ Rescue the specified instance. """ - return + pass def unrescue(self, instance): """ Unrescue the specified instance. """ - return + pass def destroy(self, instance): """ diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 715e7234c..ba51f8f69 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -215,7 +215,7 @@ class LibvirtConnection(object): self._cleanup(instance) done.send() - greenthread.spawn(_wait_for_time) + greenthread.spawn(_wait_for_timer) return done def _cleanup(self, instance): @@ -365,9 +365,9 @@ class LibvirtConnection(object): if virsh_output.startswith('/dev/'): logging.info('cool, it\'s a device') - r = utils.execute("sudo dd if=%s iflag=nonblock" % - virsh_output, check_exit_code=False) - return r[0] + out, err = utils.execute("sudo dd if=%s iflag=nonblock" % + virsh_output, check_exit_code=False) + return out else: return '' @@ -388,8 +388,7 @@ class LibvirtConnection(object): console_log = os.path.join(FLAGS.instances_path, instance['name'], 'console.log') - utils.execute('sudo chown %d %s' % (os.getuid(), - console_log)) + utils.execute('sudo chown %d %s' % (os.getuid(), console_log)) if FLAGS.libvirt_type == 'xen': # Xen is special @@ -476,7 +475,6 @@ class LibvirtConnection(object): ['local_gb'] * 1024 * 1024 * 1024) - resize = inst['instance_type'] != 'm1.tiny' resize = True if inst['instance_type'] == 'm1.tiny' or prefix == 'rescue-': resize = False @@ -743,7 +741,7 @@ class NWFilterFirewall(object): if callable(xml): xml = xml() - # execute in a native thread and block until done + # execute in a native thread and block current greenthread until done tpool.execute(self._conn.nwfilterDefineXML, xml) @staticmethod diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py index d8632f393..012954394 100644 --- a/nova/virt/xenapi/network_utils.py +++ b/nova/virt/xenapi/network_utils.py @@ -30,10 +30,9 @@ class NetworkHelper(): @classmethod def find_network_with_bridge(cls, session, bridge): - """ Return the network on which the bridge is attached, if found """ + """ Return the network on which the bridge is attached, if found.""" expr = 'field "bridge" = "%s"' % bridge - networks = session.call_xenapi('network.get_all_records_where', - expr) + networks = session.call_xenapi('network.get_all_records_where', expr) if len(networks) == 1: return networks.keys()[0] elif len(networks) > 1: diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index b6b92b926..3034df9e1 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -107,8 +107,7 @@ class VMOps(object): if vdis: for vdi in vdis: try: - task = self._session.call_xenapi('Async.VDI.destroy', - vdi) + task = self._session.call_xenapi('Async.VDI.destroy', vdi) self._session.wait_for_task(task) except XenAPI.Failure, exc: logging.warn(exc) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index df8e42d34..424311133 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -175,9 +175,11 @@ class XenAPISession(object): The task is polled until it completes.""" done = event.Event() - loop = utis.LoopingTask(self._poll_task, task, done) + loop = utils.LoopingTask(self._poll_task, task, done) loop.start(FLAGS.xenapi_task_poll_interval, now=True) - return done.wait() + rv = done.wait() + loop.stop() + return rv def _poll_task(self, task, done): """Poll the given XenAPI task, and fire the given Deferred if we -- cgit From fdf067037981c2b4b4501258919af0f9e1d0ec26 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 15 Dec 2010 10:38:30 -0800 Subject: add missing import --- nova/virt/xenapi_conn.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'nova/virt') diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 424311133..a88101ad0 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -51,6 +51,8 @@ import logging import sys import xmlrpclib +from eventlet import event + from nova import utils from nova import flags from nova.virt.xenapi.vmops import VMOps -- cgit From 9a8113584edc9a8dbf42e7039b373429c11a7760 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 15 Dec 2010 11:53:18 -0800 Subject: fixes for xenapi (thanks sandywalsh) --- nova/virt/xenapi_conn.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index a88101ad0..09d399da4 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -52,6 +52,7 @@ import sys import xmlrpclib from eventlet import event +from eventlet import tpool from nova import utils from nova import flags @@ -164,20 +165,20 @@ class XenAPISession(object): f = self._session.xenapi for m in method.split('.'): f = f.__getattr__(m) - return f(*args) + return tpool.execute(f, *args) def async_call_plugin(self, plugin, fn, args): """Call Async.host.call_plugin on a background thread.""" - return _unwrap_plugin_exceptions( - self._session.xenapi.Async.host.call_plugin, - self.get_xenapi_host(), plugin, fn, args) + return tpool.execute(_unwrap_plugin_exceptions, + self._session.xenapi.Async.host.call_plugin, + self.get_xenapi_host(), plugin, fn, args) def wait_for_task(self, task): """Return a Deferred that will give the result of the given task. The task is polled until it completes.""" done = event.Event() - loop = utils.LoopingTask(self._poll_task, task, done) + loop = utils.LoopingCall(self._poll_task, task, done) loop.start(FLAGS.xenapi_task_poll_interval, now=True) rv = done.wait() loop.stop() -- cgit From 9b049acc27d477a1ab9e13c9e064e59d8bd0a3ae Mon Sep 17 00:00:00 2001 From: termie Date: Thu, 16 Dec 2010 10:52:30 -0800 Subject: pep8 fixes --- nova/virt/libvirt_conn.py | 12 ++++++------ nova/virt/xenapi_conn.py | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index ba51f8f69..5a8c71850 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -205,7 +205,7 @@ class LibvirtConnection(object): timer.f = _wait_for_shutdown timer_done = timer.start(interval=0.5, now=True) - + # NOTE(termie): this is strictly superfluous (we could put the # cleanup code in the timer), but this emulates the # previous model so I am keeping it around until @@ -387,7 +387,7 @@ class LibvirtConnection(object): def get_console_output(self, instance): console_log = os.path.join(FLAGS.instances_path, instance['name'], 'console.log') - + utils.execute('sudo chown %d %s' % (os.getuid(), console_log)) if FLAGS.libvirt_type == 'xen': @@ -439,11 +439,11 @@ class LibvirtConnection(object): if not os.path.exists(basepath('ramdisk')): images.fetch(inst.ramdisk_id, basepath('ramdisk'), user, project) - + def execute(cmd, process_input=None, check_exit_code=True): - return utils.execute(cmd=cmd, - process_input=process_input, - check_exit_code=check_exit_code) + return utils.execute(cmd=cmd, + process_input=process_input, + check_exit_code=check_exit_code) key = str(inst['key_data']) net = None diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 09d399da4..6beb08f5e 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -200,7 +200,7 @@ class XenAPISession(object): error_info = self._session.xenapi.task.get_error_info(task) logging.warn('Task %s status: %s. %s', task, status, error_info) - done.send_exception(XenAPI.Failure(error_info)) + done.send_exception(XenAPI.Failure(error_info)) #logging.debug('Polling task %s done.', task) except XenAPI.Failure, exc: logging.warn(exc) -- cgit From 611935aa3e3a66e9638b0c127041a6fca4788b9c Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 16 Dec 2010 15:03:37 -0600 Subject: Put flags back in vm_utils --- nova/virt/xenapi/vm_utils.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index dde138404..b83ae9475 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -21,15 +21,20 @@ their attributes like VDIs, VIFs, as well as their lookup functions. import logging import urllib + from xml.dom import minidom +from nova import flags from nova import utils + from nova.auth.manager import AuthManager from nova.compute import instance_types from nova.compute import power_state from nova.virt import images +FLAGS = flags.FLAGS + XENAPI_POWER_STATE = { 'Halted': power_state.SHUTDOWN, 'Running': power_state.RUNNING, @@ -37,7 +42,6 @@ XENAPI_POWER_STATE = { 'Suspended': power_state.SHUTDOWN, # FIXME 'Crashed': power_state.CRASHED} - XenAPI = None -- cgit From e5a3d993cb13c8dc5e984a67521f77ce8fdf8e4c Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 16 Dec 2010 15:19:35 -0600 Subject: Removed unnecessary blank lines --- nova/virt/xenapi/vm_utils.py | 2 -- 1 file changed, 2 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index b83ae9475..2f5d78e75 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -21,12 +21,10 @@ their attributes like VDIs, VIFs, as well as their lookup functions. import logging import urllib - from xml.dom import minidom from nova import flags from nova import utils - from nova.auth.manager import AuthManager from nova.compute import instance_types from nova.compute import power_state -- cgit