From c599914304b262067c19b2968ab50826b4d9bcd3 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Fri, 30 Jul 2010 12:54:03 -0700 Subject: Removed duplicate toXml method --- nova/virt/libvirt_conn.py | 14 -------------- 1 file changed, 14 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 43b5eded2..6bc5ebf89 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -228,20 +228,6 @@ class LibvirtConnection(object): return os.path.abspath(os.path.join(instance.datamodel['basepath'], path)) - def toXml(self, instance): - # TODO(termie): cache? - logging.debug("Starting the toXML method") - libvirt_xml = open(FLAGS.libvirt_xml_template).read() - xml_info = instance.datamodel.copy() - # TODO(joshua): Make this xml express the attached disks as well - - # TODO(termie): lazy lazy hack because xml is annoying - xml_info['nova'] = json.dumps(instance.datamodel.copy()) - libvirt_xml = libvirt_xml % xml_info - logging.debug("Finished the toXML method") - - return libvirt_xml - def toXml(self): # TODO(termie): cache? logging.debug("Starting the toXML method") -- cgit From 7985f5fb32432630b3fef775f601900f89346ed2 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Fri, 30 Jul 2010 13:35:21 -0700 Subject: Accept a configurable libvirt_uri --- nova/virt/libvirt_conn.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 6bc5ebf89..8d473296a 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -49,6 +49,9 @@ FLAGS = flags.FLAGS flags.DEFINE_string('libvirt_xml_template', utils.abspath('compute/libvirt.xml.template'), 'Libvirt XML Template') +flags.DEFINE_string('libvirt_uri', + 'qemu:///system', + 'Libvirt URI to connect to') def get_connection(read_only): # These are loaded late so that there's no need to install these @@ -67,10 +70,11 @@ class LibvirtConnection(object): auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], 'root', None] + libvirt_uri = str(FLAGS.libvirt_uri) if read_only: - self._conn = libvirt.openReadOnly('qemu:///system') + self._conn = libvirt.openReadOnly(libvirt_uri) else: - self._conn = libvirt.openAuth('qemu:///system', auth, 0) + self._conn = libvirt.openAuth(libvirt_uri, auth, 0) def list_instances(self): -- cgit From 6c32e87c1be80230cf058586cee5a94cd25670b8 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Sat, 31 Jul 2010 21:35:58 -0700 Subject: Fixed up some of the raw disk stuff that broke in the abstraction out of libvirt --- nova/virt/libvirt_conn.py | 48 ++++++++++++++++++++++++++++++++++------------- 1 file changed, 35 insertions(+), 13 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 8d473296a..d18158678 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -195,32 +195,54 @@ class LibvirtConnection(object): user = manager.AuthManager().get_user(data['user_id']) if not os.path.exists(basepath('disk')): yield images.fetch(data['image_id'], basepath('disk-raw'), user) - if not os.path.exists(basepath('kernel')): - yield images.fetch(data['kernel_id'], basepath('kernel'), user) - if not os.path.exists(basepath('ramdisk')): - yield images.fetch(data['ramdisk_id'], basepath('ramdisk'), user) + + using_kernel = data['kernel_id'] and True + + if using_kernel: + if not os.path.exists(basepath('kernel')): + yield images.fetch(data['kernel_id'], basepath('kernel'), user) + if not os.path.exists(basepath('ramdisk')): + yield images.fetch(data['ramdisk_id'], basepath('ramdisk'), user) execute = lambda cmd, input=None: \ process.simple_execute(cmd=cmd, input=input, error_ok=1) + # For now, we assume that if we're not using a kernel, we're using a partitioned disk image + # where the target partition is the first partition + target_partition = None + if not using_kernel: + target_partition = "1" + key = data['key_data'] net = None if FLAGS.simple_network: - with open(FLAGS.simple_network_template) as f: - net = f.read() % {'address': data['private_dns_name'], + network_info = {'address': data['private_dns_name'], 'network': FLAGS.simple_network_network, 'netmask': FLAGS.simple_network_netmask, 'gateway': FLAGS.simple_network_gateway, 'broadcast': FLAGS.simple_network_broadcast, 'dns': FLAGS.simple_network_dns} - if key or net: + + with open(FLAGS.simple_network_template) as f: + net = f.read() % network_info + + with open(FLAGS.simple_network_dns_template) as f: + dns = str(Template(f.read(), searchList=[ network_info ] )) + + + if key or net or dns: logging.info('Injecting data into image %s', data['image_id']) - yield disk.inject_data(basepath('disk-raw'), key, net, execute=execute) + try: + yield disk.inject_data(basepath('disk-raw'), key=key, net=net, dns=dns, remove_network_udev=True, partition=target_partition, execute=execute) + except Exception as e: + # This could be a windows image, or a vmdk format disk + logging.warn('Could not inject data; ignoring. (%s)' % e) - if os.path.exists(basepath('disk')): - yield process.simple_execute('rm -f %s' % basepath('disk')) + if using_kernel: + if os.path.exists(basepath('disk')): + yield process.simple_execute('rm -f %s' % basepath('disk')) bytes = (instance_types.INSTANCE_TYPES[data['instance_type']]['local_gb'] * 1024 * 1024 * 1024) @@ -232,15 +254,15 @@ class LibvirtConnection(object): return os.path.abspath(os.path.join(instance.datamodel['basepath'], path)) - def toXml(self): + def toXml(self, instance): # TODO(termie): cache? logging.debug("Starting the toXML method") template_contents = open(FLAGS.libvirt_xml_template).read() - xml_info = self.datamodel.copy() + xml_info = instance.datamodel.copy() # TODO(joshua): Make this xml express the attached disks as well # TODO(termie): lazy lazy hack because xml is annoying - xml_info['nova'] = json.dumps(self.datamodel.copy()) + xml_info['nova'] = json.dumps(instance.datamodel.copy()) if xml_info['kernel_id']: xml_info['kernel'] = xml_info['basepath'] + "/kernel" -- cgit From 9732c0af89f21490cc8d6bc80799bbc8b36fb441 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Thu, 14 Oct 2010 13:38:35 -0700 Subject: Minimized diff, fixed formatting --- nova/virt/libvirt.uml.xml.template | 2 +- nova/virt/libvirt.xen.xml.template | 2 +- nova/virt/libvirt_conn.py | 2 ++ 3 files changed, 4 insertions(+), 2 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/libvirt.uml.xml.template b/nova/virt/libvirt.uml.xml.template index f6e5fad69..da9588049 100644 --- a/nova/virt/libvirt.uml.xml.template +++ b/nova/virt/libvirt.uml.xml.template @@ -12,7 +12,7 @@ - + diff --git a/nova/virt/libvirt.xen.xml.template b/nova/virt/libvirt.xen.xml.template index 9508ad3b7..8f650e512 100644 --- a/nova/virt/libvirt.xen.xml.template +++ b/nova/virt/libvirt.xen.xml.template @@ -23,7 +23,7 @@ - + diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index ba5d6dbac..ece98087b 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -72,6 +72,7 @@ flags.DEFINE_bool('allow_project_net_traffic', True, 'Whether to allow in project network traffic') + def get_connection(read_only): # These are loaded late so that there's no need to install these # libraries when not using libvirt. @@ -126,6 +127,7 @@ class LibvirtConnection(object): auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], 'root', None] + if read_only: return libvirt.openReadOnly(uri) else: -- cgit From f8028c0a4cd1c3cfb8a9c6b4c397fd67ce912cce Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Thu, 14 Oct 2010 13:48:34 -0700 Subject: Removed stray spaces that were causing an unnecessary diff line --- nova/virt/libvirt_conn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova/virt') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index ece98087b..68791c28a 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -127,7 +127,7 @@ class LibvirtConnection(object): auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], 'root', None] - + if read_only: return libvirt.openReadOnly(uri) else: -- cgit From 98623a160078dfed7347dcd1539b0cd27e51644a Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Fri, 15 Oct 2010 11:06:16 -0700 Subject: Removed 'and True' oddity --- nova/virt/libvirt_conn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova/virt') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 68791c28a..d78a8f0db 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -324,7 +324,7 @@ class LibvirtConnection(object): if not os.path.exists(basepath('disk')): yield images.fetch(inst.image_id, basepath('disk-raw'), user, project) - using_kernel = inst.kernel_id and True + using_kernel = inst.kernel_id if using_kernel: if not os.path.exists(basepath('kernel')): yield images.fetch(inst.kernel_id, basepath('kernel'), user, project) -- cgit From 2337fab0979b72bbc7e7730e94518a0e835a2751 Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Mon, 25 Oct 2010 03:45:19 +0900 Subject: part way through porting the codebase off of twisted this provides a very basic eventlet-based service replacement for the twistd-based services, a replacement for task.LoopingCall also adds nova-combined with the goal of running a single service when doing local testing and dev --- nova/virt/fake.py | 6 +- nova/virt/libvirt_conn.py | 172 ++++++++++++++++++++++------------------------ nova/virt/xenapi.py | 103 ++++++++++++--------------- 3 files changed, 127 insertions(+), 154 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/fake.py b/nova/virt/fake.py index eaa2261f5..0684a0877 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -24,8 +24,6 @@ This module also documents the semantics of real hypervisor connections. import logging -from twisted.internet import defer - from nova.compute import power_state @@ -105,7 +103,6 @@ class FakeConnection(object): fake_instance = FakeInstance() self.instances[instance.name] = fake_instance fake_instance._state = power_state.RUNNING - return defer.succeed(None) def reboot(self, instance): """ @@ -117,7 +114,7 @@ class FakeConnection(object): The work will be done asynchronously. This function returns a Deferred that allows the caller to detect when it is complete. """ - return defer.succeed(None) + pass def destroy(self, instance): """ @@ -130,7 +127,6 @@ class FakeConnection(object): Deferred that allows the caller to detect when it is complete. """ del self.instances[instance.name] - return defer.succeed(None) def attach_volume(self, instance_name, device_path, mountpoint): """Attach the disk at device_path to the instance at mountpoint""" diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 509ed97a0..9ca97bd1b 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -25,10 +25,10 @@ import logging import os import shutil +from eventlet import event +from eventlet import tpool + import IPy -from twisted.internet import defer -from twisted.internet import task -from twisted.internet import threads from nova import context from nova import db @@ -145,13 +145,12 @@ class LibvirtConnection(object): except Exception as _err: pass # If the instance is already terminated, we're still happy - d = defer.Deferred() - d.addCallback(lambda _: self._cleanup(instance)) - # FIXME: What does this comment mean? - # TODO(termie): short-circuit me for tests - # WE'LL save this for when we do shutdown, + + done = event.Event() + + # We'll save this for when we do shutdown, # instead of destroy - but destroy returns immediately - timer = task.LoopingCall(f=None) + timer = utils.LoopingCall(f=None) def _wait_for_shutdown(): try: @@ -160,17 +159,26 @@ class LibvirtConnection(object): instance['id'], state) if state == power_state.SHUTDOWN: timer.stop() - d.callback(None) except Exception: db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) timer.stop() - d.callback(None) timer.f = _wait_for_shutdown - timer.start(interval=0.5, now=True) - return d + timer_done = timer.start(interval=0.5, now=True) + + # NOTE(termie): this is strictly superfluous (we could put the + # cleanup code in the timer), but this emulates the + # previous model so I am keeping it around until + # everything has been vetted a bit + def _wait_for_timer(): + timer_done.wait() + self._cleanup(instance) + done.send() + + greenthread.spawn(_wait_for_time) + return done def _cleanup(self, instance): target = os.path.join(FLAGS.instances_path, instance['name']) @@ -179,32 +187,28 @@ class LibvirtConnection(object): if os.path.exists(target): shutil.rmtree(target) - @defer.inlineCallbacks @exception.wrap_exception def attach_volume(self, instance_name, device_path, mountpoint): - yield process.simple_execute("sudo virsh attach-disk %s %s %s" % - (instance_name, - device_path, - mountpoint.rpartition('/dev/')[2])) + process.simple_execute("sudo virsh attach-disk %s %s %s" % + (instance_name, + device_path, + mountpoint.rpartition('/dev/')[2])) - @defer.inlineCallbacks @exception.wrap_exception def detach_volume(self, instance_name, mountpoint): # NOTE(vish): despite the documentation, virsh detach-disk just # wants the device name without the leading /dev/ - yield process.simple_execute("sudo virsh detach-disk %s %s" % - (instance_name, - mountpoint.rpartition('/dev/')[2])) + process.simple_execute("sudo virsh detach-disk %s %s" % + (instance_name, + mountpoint.rpartition('/dev/')[2])) - @defer.inlineCallbacks @exception.wrap_exception def reboot(self, instance): xml = self.to_xml(instance) - yield self._conn.lookupByName(instance['name']).destroy() - yield self._conn.createXML(xml, 0) + self._conn.lookupByName(instance['name']).destroy() + self._conn.createXML(xml, 0) - d = defer.Deferred() - timer = task.LoopingCall(f=None) + timer = utils.LoopingCall(f=None) def _wait_for_reboot(): try: @@ -214,20 +218,16 @@ class LibvirtConnection(object): if state == power_state.RUNNING: logging.debug('instance %s: rebooted', instance['name']) timer.stop() - d.callback(None) except Exception, exn: logging.error('_wait_for_reboot failed: %s', exn) db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) timer.stop() - d.callback(None) timer.f = _wait_for_reboot - timer.start(interval=0.5, now=True) - yield d + return timer.start(interval=0.5, now=True) - @defer.inlineCallbacks @exception.wrap_exception def spawn(self, instance): xml = self.to_xml(instance) @@ -235,16 +235,12 @@ class LibvirtConnection(object): instance['id'], power_state.NOSTATE, 'launching') - yield NWFilterFirewall(self._conn).\ - setup_nwfilters_for_instance(instance) - yield self._create_image(instance, xml) - yield self._conn.createXML(xml, 0) - # TODO(termie): this should actually register - # a callback to check for successful boot + NWFilterFirewall(self._conn).setup_nwfilters_for_instance(instance) + self._create_image(instance, xml) + self._conn.createXML(xml, 0) logging.debug("instance %s: is running", instance['name']) - local_d = defer.Deferred() - timer = task.LoopingCall(f=None) + timer = utils.LoopingCall(f=None) def _wait_for_boot(): try: @@ -254,7 +250,6 @@ class LibvirtConnection(object): if state == power_state.RUNNING: logging.debug('instance %s: booted', instance['name']) timer.stop() - local_d.callback(None) except: logging.exception('instance %s: failed to boot', instance['name']) @@ -262,10 +257,9 @@ class LibvirtConnection(object): instance['id'], power_state.SHUTDOWN) timer.stop() - local_d.callback(None) + timer.f = _wait_for_boot - timer.start(interval=0.5, now=True) - yield local_d + return timer.start(interval=0.5, now=True) def _flush_xen_console(self, virsh_output): logging.info('virsh said: %r' % (virsh_output,)) @@ -273,10 +267,9 @@ class LibvirtConnection(object): if virsh_output.startswith('/dev/'): logging.info('cool, it\'s a device') - d = process.simple_execute("sudo dd if=%s iflag=nonblock" % + r = process.simple_execute("sudo dd if=%s iflag=nonblock" % virsh_output, check_exit_code=False) - d.addCallback(lambda r: r[0]) - return d + return r[0] else: return '' @@ -296,21 +289,21 @@ class LibvirtConnection(object): def get_console_output(self, instance): console_log = os.path.join(FLAGS.instances_path, instance['name'], 'console.log') - d = process.simple_execute('sudo chown %d %s' % (os.getuid(), - console_log)) + + process.simple_execute('sudo chown %d %s' % (os.getuid(), + console_log)) + if FLAGS.libvirt_type == 'xen': - # Xen is spethial - d.addCallback(lambda _: - process.simple_execute("virsh ttyconsole %s" % - instance['name'])) - d.addCallback(self._flush_xen_console) - d.addCallback(self._append_to_file, console_log) + # Xen is special + virsh_output = process.simple_execute("virsh ttyconsole %s" % + instance['name']) + data = self._flush_xen_console(virsh_output) + fpath = self._append_to_file(data, console_log) else: - d.addCallback(lambda _: defer.succeed(console_log)) - d.addCallback(self._dump_file) - return d + fpath = console_log + + return self._dump_file(fpath) - @defer.inlineCallbacks def _create_image(self, inst, libvirt_xml): # syntactic nicety basepath = lambda fname='': os.path.join(FLAGS.instances_path, @@ -318,8 +311,8 @@ class LibvirtConnection(object): fname) # ensure directories exist and are writable - yield process.simple_execute('mkdir -p %s' % basepath()) - yield process.simple_execute('chmod 0777 %s' % basepath()) + process.simple_execute('mkdir -p %s' % basepath()) + process.simple_execute('chmod 0777 %s' % basepath()) # TODO(termie): these are blocking calls, it would be great # if they weren't. @@ -335,19 +328,19 @@ class LibvirtConnection(object): project = manager.AuthManager().get_project(inst['project_id']) if not os.path.exists(basepath('disk')): - yield images.fetch(inst.image_id, basepath('disk-raw'), user, - project) + images.fetch(inst.image_id, basepath('disk-raw'), user, + project) if not os.path.exists(basepath('kernel')): - yield images.fetch(inst.kernel_id, basepath('kernel'), user, - project) + images.fetch(inst.kernel_id, basepath('kernel'), user, + project) if not os.path.exists(basepath('ramdisk')): - yield images.fetch(inst.ramdisk_id, basepath('ramdisk'), user, - project) - - execute = lambda cmd, process_input=None, check_exit_code=True: \ - process.simple_execute(cmd=cmd, - process_input=process_input, - check_exit_code=check_exit_code) + images.fetch(inst.ramdisk_id, basepath('ramdisk'), user, + project) + + def execute(cmd, process_input=None, check_exit_code=True): + return process.simple_execute(cmd=cmd, + process_input=process_input, + check_exit_code=check_exit_code) key = str(inst['key_data']) net = None @@ -369,23 +362,23 @@ class LibvirtConnection(object): if net: logging.info('instance %s: injecting net into image %s', inst['name'], inst.image_id) - yield disk.inject_data(basepath('disk-raw'), key, net, - execute=execute) + disk.inject_data(basepath('disk-raw'), key, net, + execute=execute) if os.path.exists(basepath('disk')): - yield process.simple_execute('rm -f %s' % basepath('disk')) + process.simple_execute('rm -f %s' % basepath('disk')) local_bytes = (instance_types.INSTANCE_TYPES[inst.instance_type] ['local_gb'] * 1024 * 1024 * 1024) resize = inst['instance_type'] != 'm1.tiny' - yield disk.partition(basepath('disk-raw'), basepath('disk'), - local_bytes, resize, execute=execute) + disk.partition(basepath('disk-raw'), basepath('disk'), + local_bytes, resize, execute=execute) if FLAGS.libvirt_type == 'uml': - yield process.simple_execute('sudo chown root %s' % - basepath('disk')) + process.simple_execute('sudo chown root %s' % + basepath('disk')) def to_xml(self, instance): # TODO(termie): cache? @@ -637,15 +630,15 @@ class NWFilterFirewall(object): def _define_filter(self, xml): if callable(xml): xml = xml() - d = threads.deferToThread(self._conn.nwfilterDefineXML, xml) - return d + + # execute in a native thread and block until done + tpool.execute(self._conn.nwfilterDefineXML, xml) @staticmethod def _get_net_and_mask(cidr): net = IPy.IP(cidr) return str(net.net()), str(net.netmask()) - @defer.inlineCallbacks def setup_nwfilters_for_instance(self, instance): """ Creates an NWFilter for the given instance. In the process, @@ -653,10 +646,10 @@ class NWFilterFirewall(object): the base filter are all in place. """ - yield self._define_filter(self.nova_base_ipv4_filter) - yield self._define_filter(self.nova_base_ipv6_filter) - yield self._define_filter(self.nova_dhcp_filter) - yield self._define_filter(self.nova_base_filter) + self._define_filter(self.nova_base_ipv4_filter) + self._define_filter(self.nova_base_ipv6_filter) + self._define_filter(self.nova_dhcp_filter) + self._define_filter(self.nova_base_filter) nwfilter_xml = "\n" \ " \n" % \ @@ -668,20 +661,19 @@ class NWFilterFirewall(object): net, mask = self._get_net_and_mask(network_ref['cidr']) project_filter = self.nova_project_filter(instance['project_id'], net, mask) - yield self._define_filter(project_filter) + self._define_filter(project_filter) nwfilter_xml += " \n" % \ instance['project_id'] for security_group in instance.security_groups: - yield self.ensure_security_group_filter(security_group['id']) + self.ensure_security_group_filter(security_group['id']) nwfilter_xml += " \n" % \ security_group['id'] nwfilter_xml += "" - yield self._define_filter(nwfilter_xml) - return + self._define_filter(nwfilter_xml) def ensure_security_group_filter(self, security_group_id): return self._define_filter( diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index a17e405ab..f997d01d7 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -36,11 +36,10 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block. """ import logging +import sys import xmlrpclib -from twisted.internet import defer -from twisted.internet import reactor -from twisted.internet import task +from eventlet import tpool from nova import db from nova import flags @@ -110,36 +109,33 @@ class XenAPIConnection(object): return [self._conn.xenapi.VM.get_name_label(vm) \ for vm in self._conn.xenapi.VM.get_all()] - @defer.inlineCallbacks def spawn(self, instance): - vm = yield self._lookup(instance.name) + vm = self._lookup(instance.name) if vm is not None: raise Exception('Attempted to create non-unique name %s' % instance.name) network = db.project_get_network(None, instance.project_id) - network_ref = \ - yield self._find_network_with_bridge(network.bridge) + network_ref = self._find_network_with_bridge(network.bridge) user = AuthManager().get_user(instance.user_id) project = AuthManager().get_project(instance.project_id) - vdi_uuid = yield self._fetch_image( + vdi_uuid = self._fetch_image( instance.image_id, user, project, True) - kernel = yield self._fetch_image( + kernel = self._fetch_image( instance.kernel_id, user, project, False) - ramdisk = yield self._fetch_image( + ramdisk = self._fetch_image( instance.ramdisk_id, user, project, False) - vdi_ref = yield self._call_xenapi('VDI.get_by_uuid', vdi_uuid) + vdi_ref = self._call_xenapi('VDI.get_by_uuid', vdi_uuid) - vm_ref = yield self._create_vm(instance, kernel, ramdisk) - yield self._create_vbd(vm_ref, vdi_ref, 0, True) + vm_ref = self._create_vm(instance, kernel, ramdisk) + self._create_vbd(vm_ref, vdi_ref, 0, True) if network_ref: - yield self._create_vif(vm_ref, network_ref, instance.mac_address) + self._create_vif(vm_ref, network_ref, instance.mac_address) logging.debug('Starting VM %s...', vm_ref) - yield self._call_xenapi('VM.start', vm_ref, False, False) + self._call_xenapi('VM.start', vm_ref, False, False) logging.info('Spawning VM %s created %s.', instance.name, vm_ref) - @defer.inlineCallbacks def _create_vm(self, instance, kernel, ramdisk): """Create a VM record. Returns a Deferred that gives the new VM reference.""" @@ -177,11 +173,10 @@ class XenAPIConnection(object): 'other_config': {}, } logging.debug('Created VM %s...', instance.name) - vm_ref = yield self._call_xenapi('VM.create', rec) + vm_ref = self._call_xenapi('VM.create', rec) logging.debug('Created VM %s as %s.', instance.name, vm_ref) - defer.returnValue(vm_ref) + return vm_ref - @defer.inlineCallbacks def _create_vbd(self, vm_ref, vdi_ref, userdevice, bootable): """Create a VBD record. Returns a Deferred that gives the new VBD reference.""" @@ -200,12 +195,11 @@ class XenAPIConnection(object): vbd_rec['qos_algorithm_params'] = {} vbd_rec['qos_supported_algorithms'] = [] logging.debug('Creating VBD for VM %s, VDI %s ... ', vm_ref, vdi_ref) - vbd_ref = yield self._call_xenapi('VBD.create', vbd_rec) + vbd_ref = self._call_xenapi('VBD.create', vbd_rec) logging.debug('Created VBD %s for VM %s, VDI %s.', vbd_ref, vm_ref, vdi_ref) - defer.returnValue(vbd_ref) + return vbd_ref - @defer.inlineCallbacks def _create_vif(self, vm_ref, network_ref, mac_address): """Create a VIF record. Returns a Deferred that gives the new VIF reference.""" @@ -221,24 +215,22 @@ class XenAPIConnection(object): vif_rec['qos_algorithm_params'] = {} logging.debug('Creating VIF for VM %s, network %s ... ', vm_ref, network_ref) - vif_ref = yield self._call_xenapi('VIF.create', vif_rec) + vif_ref = self._call_xenapi('VIF.create', vif_rec) logging.debug('Created VIF %s for VM %s, network %s.', vif_ref, vm_ref, network_ref) - defer.returnValue(vif_ref) + return vif_ref - @defer.inlineCallbacks def _find_network_with_bridge(self, bridge): expr = 'field "bridge" = "%s"' % bridge - networks = yield self._call_xenapi('network.get_all_records_where', - expr) + networks = self._call_xenapi('network.get_all_records_where', + expr) if len(networks) == 1: - defer.returnValue(networks.keys()[0]) + return networks.keys()[0] elif len(networks) > 1: raise Exception('Found non-unique network for bridge %s' % bridge) else: raise Exception('Found no network for bridge %s' % bridge) - @defer.inlineCallbacks def _fetch_image(self, image, user, project, use_sr): """use_sr: True to put the image as a VDI in an SR, False to place it on dom0's filesystem. The former is for VM disks, the latter for @@ -255,33 +247,31 @@ class XenAPIConnection(object): args['password'] = user.secret if use_sr: args['add_partition'] = 'true' - task = yield self._async_call_plugin('objectstore', fn, args) - uuid = yield self._wait_for_task(task) - defer.returnValue(uuid) + task = self._async_call_plugin('objectstore', fn, args) + uuid = self._wait_for_task(task) + return uuid - @defer.inlineCallbacks def reboot(self, instance): - vm = yield self._lookup(instance.name) + vm = self._lookup(instance.name) if vm is None: raise Exception('instance not present %s' % instance.name) - task = yield self._call_xenapi('Async.VM.clean_reboot', vm) - yield self._wait_for_task(task) + task = self._call_xenapi('Async.VM.clean_reboot', vm) + self._wait_for_task(task) - @defer.inlineCallbacks def destroy(self, instance): - vm = yield self._lookup(instance.name) + vm = self._lookup(instance.name) if vm is None: # Don't complain, just return. This lets us clean up instances # that have already disappeared from the underlying platform. - defer.returnValue(None) + return try: - task = yield self._call_xenapi('Async.VM.hard_shutdown', vm) - yield self._wait_for_task(task) + task = self._call_xenapi('Async.VM.hard_shutdown', vm) + self._wait_for_task(task) except Exception, exc: logging.warn(exc) try: - task = yield self._call_xenapi('Async.VM.destroy', vm) - yield self._wait_for_task(task) + task = self._call_xenapi('Async.VM.destroy', vm) + self._wait_for_task(task) except Exception, exc: logging.warn(exc) @@ -299,7 +289,6 @@ class XenAPIConnection(object): def get_console_output(self, instance): return 'FAKE CONSOLE OUTPUT' - @utils.deferredToThread def _lookup(self, i): return self._lookup_blocking(i) @@ -316,35 +305,32 @@ class XenAPIConnection(object): def _wait_for_task(self, task): """Return a Deferred that will give the result of the given task. The task is polled until it completes.""" - d = defer.Deferred() - reactor.callLater(0, self._poll_task, task, d) - return d - @utils.deferredToThread - def _poll_task(self, task, deferred): + done = event.Event() + loop = utis.LoopingTask(self._poll_task, task, done) + loop.start(FLAGS.xenapi_task_poll_interval, now=True) + return done.wait() + + def _poll_task(self, task, done): """Poll the given XenAPI task, and fire the given Deferred if we get a result.""" try: - #logging.debug('Polling task %s...', task) status = self._conn.xenapi.task.get_status(task) if status == 'pending': - reactor.callLater(FLAGS.xenapi_task_poll_interval, - self._poll_task, task, deferred) + return elif status == 'success': result = self._conn.xenapi.task.get_result(task) logging.info('Task %s status: success. %s', task, result) - deferred.callback(_parse_xmlrpc_value(result)) + done.send(_parse_xmlrpc_value(result)) else: error_info = self._conn.xenapi.task.get_error_info(task) logging.warn('Task %s status: %s. %s', task, status, error_info) - deferred.errback(XenAPI.Failure(error_info)) - #logging.debug('Polling task %s done.', task) + done.send_exception(XenAPI.Failure(error_info)) except Exception, exc: logging.warn(exc) - deferred.errback(exc) + done.send_exception(*sys.exc_info()) - @utils.deferredToThread def _call_xenapi(self, method, *args): """Call the specified XenAPI method on a background thread. Returns a Deferred for the result.""" @@ -353,11 +339,10 @@ class XenAPIConnection(object): f = f.__getattr__(m) return f(*args) - @utils.deferredToThread def _async_call_plugin(self, plugin, fn, args): """Call Async.host.call_plugin on a background thread. Returns a Deferred with the task reference.""" - return _unwrap_plugin_exceptions( + return tpool.execute(_unwrap_plugin_exceptions, self._conn.xenapi.Async.host.call_plugin, self._get_xenapi_host(), plugin, fn, args) -- cgit From 9ca0b3435d93a87407ca42a853562cd06aaa896e Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 22 Nov 2010 12:57:03 +0000 Subject: added placeholders --- nova/virt/xenapi.py | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'nova/virt') diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index 0f563aa41..4ed5d047f 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -296,6 +296,14 @@ class XenAPIConnection(object): yield self._wait_for_task(task) except Exception, exc: logging.warn(exc) + + @defer.inlineCallbacks + def attach_volume(self, instance_name, device_path, mountpoint): + return True + + @defer.inlineCallbacks + def detach_volume(self, instance_name, mountpoint): + return True def get_info(self, instance_id): vm = self._lookup_blocking(instance_id) -- cgit From 9f722a0bcdb987c228f4ebf1e42c904a26d0ef73 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 25 Nov 2010 10:42:06 +0000 Subject: first cut of changes for the attach_volume call --- nova/virt/xenapi.py | 94 +++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 87 insertions(+), 7 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index 4ed5d047f..ec5e7456a 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -63,6 +63,9 @@ from nova.compute import instance_types from nova.compute import power_state from nova.virt import images +from xml.dom.minidom import parseString + + XenAPI = None @@ -90,7 +93,7 @@ XENAPI_POWER_STATE = { 'Halted': power_state.SHUTDOWN, 'Running': power_state.RUNNING, 'Paused': power_state.PAUSED, - 'Suspended': power_state.SHUTDOWN, # FIXME + 'Suspended': power_state.SHUTDOWN, # FIXME 'Crashed': power_state.CRASHED} @@ -126,7 +129,7 @@ class XenAPIConnection(object): def spawn(self, instance): vm = yield self._lookup(instance.name) if vm is not None: - raise Exception('Attempted to create non-unique name %s' % + raise Exception('Attempted to create non-unique name %s' % instance.name) network = db.project_get_network(None, instance.project_id) @@ -296,14 +299,34 @@ class XenAPIConnection(object): yield self._wait_for_task(task) except Exception, exc: logging.warn(exc) - + @defer.inlineCallbacks def attach_volume(self, instance_name, device_path, mountpoint): - return True + # NOTE: No Resource Pool concept so far + logging.debug("Attach_volume: %s, %s, %s", + instance_name, device_path, mountpoint) + volume_info = _parse_volume_info(device_path, mountpoint) + # Create the iSCSI SR, and the PDB through which hosts access SRs. + # But first, retrieve target info, like Host, IQN, LUN and SCSIID + target = yield self._get_target(volume_info) + label = 'SR-%s' % volume_info['volumeId'] + sr_ref = yield self._create_sr(target, label) + # Create VDI and attach VBD to VM + vm = None + try: + task = yield self._call_xenapi('', vm) + yield self._wait_for_task(task) + except Exception, exc: + logging.warn(exc) + yield True @defer.inlineCallbacks def detach_volume(self, instance_name, mountpoint): - return True + logging.debug("Detach_volume: %s, %s, %s", instance_name, mountpoint) + # Detach VBD from VM + # Forget SR/PDB info associated with host + # TODO: can we avoid destroying the SR every time we detach? + yield True def get_info(self, instance_id): vm = self._lookup_blocking(instance_id) @@ -333,6 +356,52 @@ class XenAPIConnection(object): else: return vms[0] + @utils.deferredToThread + def _get_target(self, volume_info): + return self._get_target_blocking(volume_info) + + def _get_target_blocking(self, volume_info): + target = {} + target['target'] = volume_info['targetHost'] + target['port'] = volume_info['targetPort'] + target['targetIQN'] = volume_info['iqn'] + # We expect SR_BACKEND_FAILURE_107 to retrieve params to create the SR + try: + self._conn.xenapi.SR.create(self._get_xenapi_host(), + target, '-1', '', '', + 'lvmoiscsi', '', False, {}) + except XenAPI.Failure, exc: + if exc.details[0] == 'SR_BACKEND_FAILURE_107': + xml_response = parseString(exc.details[3]) + isciTargets = xml_response.getElementsByTagName('iscsi-target') + # Make sure that only the correct Lun is visible + if len(isciTargets) > 1: + raise Exception('More than one ISCSI Target available') + isciLuns = isciTargets.item(0).getElementsByTagName('LUN') + if len(isciLuns) > 1: + raise Exception('More than one ISCSI Lun available') + # Parse params from the xml response into the dictionary + for n in isciLuns.item(0).childNodes: + if n.nodeType == 1: + target[n.nodeName] = str(n.firstChild.data).strip() + return target + + @utils.deferredToThread + def _create_sr(self, target, label): + return self._create_sr_blocking(target, label) + + def _create_sr_blocking(self, target, label): + # TODO: we might want to put all these string literals into constants + sr = self._conn.xenapi.SR.create(self._get_xenapi_host(), + target, + target['size'], + label, + '', + 'lvmoiscsi', + '', + True, {}) + return sr + def _wait_for_task(self, task): """Return a Deferred that will give the result of the given task. The task is polled until it completes.""" @@ -412,7 +481,18 @@ def _parse_xmlrpc_value(val): if not val: return val x = xmlrpclib.loads( - '' + - val + + '' + + val + '') return x[0][0] + + +def _parse_volume_info(device_path, mountpoint): + volume_info = {} + volume_info['volumeId'] = 'vol-qurmrzn9' + # XCP and XenServer add an x to the device name + volume_info['xenMountpoint'] = '/dev/xvdb' + volume_info['targetHost'] = '10.70.177.40' + volume_info['targetPort'] = '3260' # default 3260 + volume_info['iqn'] = 'iqn.2010-10.org.openstack:vol-qurmrzn9' + return volume_info -- cgit From 688d564668aefa4b644236421a3a45fc90486634 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 25 Nov 2010 20:31:32 +0000 Subject: work on attach_volume, with a few things to iron out --- nova/virt/xenapi.py | 99 ++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 83 insertions(+), 16 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index 5bf98468e..d3167ebf3 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -147,7 +147,7 @@ class XenAPIConnection(object): vdi_ref = yield self._call_xenapi('VDI.get_by_uuid', vdi_uuid) vm_ref = yield self._create_vm(instance, kernel, ramdisk) - yield self._create_vbd(vm_ref, vdi_ref, 0, True) + yield self._create_vbd(vm_ref, vdi_ref, 0, True, True, False) if network_ref: yield self._create_vif(vm_ref, network_ref, instance.mac_address) logging.debug('Starting VM %s...', vm_ref) @@ -197,7 +197,21 @@ class XenAPIConnection(object): defer.returnValue(vm_ref) @defer.inlineCallbacks - def _create_vbd(self, vm_ref, vdi_ref, userdevice, bootable): + def _create_vdi(self, sr_ref, size, type, label, description, read_only, sharable): + vdi_rec = {} + vdi_rec['read_only'] = read_only + vdi_rec['SR'] = sr_ref + vdi_rec['virtual_size'] = str(size) + vdi_rec['name_label'] = label + vdi_rec['name_description'] = description + vdi_rec['sharable'] = sharable + vdi_rec['type'] = type + vdi_rec['other_config'] = {} + vdi_ref = yield self._call_xenapi('VDI.create', vdi_rec) + defer.returnValue(vdi_ref) + + @defer.inlineCallbacks + def _create_vbd(self, vm_ref, vdi_ref, userdevice, bootable, unpluggable, empty): """Create a VBD record. Returns a Deferred that gives the new VBD reference.""" @@ -208,8 +222,8 @@ class XenAPIConnection(object): vbd_rec['bootable'] = bootable vbd_rec['mode'] = 'RW' vbd_rec['type'] = 'disk' - vbd_rec['unpluggable'] = True - vbd_rec['empty'] = False + vbd_rec['unpluggable'] = unpluggable + vbd_rec['empty'] = empty vbd_rec['other_config'] = {} vbd_rec['qos_algorithm_type'] = '' vbd_rec['qos_algorithm_params'] = {} @@ -320,14 +334,33 @@ class XenAPIConnection(object): # But first, retrieve target info, like Host, IQN, LUN and SCSIID target = yield self._get_target(volume_info) label = 'SR-%s' % volume_info['volumeId'] - sr_ref = yield self._create_sr(target, label) + description = 'Attached-to:%s' % instance_name + # Create SR and check the physical space available for the VDI allocation + sr_ref = yield self._create_sr(target, label, description) + disk_size = yield self._get_sr_available_space(sr_ref) # Create VDI and attach VBD to VM - vm = None + vm_ref = yield self._lookup(instance_name) + logging.debug("Mounting disk of: %s GB", (disk_size / (1024*1024*1024.0))) try: - task = yield self._call_xenapi('', vm) - yield self._wait_for_task(task) + vdi_ref = yield self._create_vdi(sr_ref, disk_size, + 'user', volume_info['volumeId'], '', + False, False) except Exception, exc: logging.warn(exc) + if sr_ref: + yield self._destroy_sr(sr_ref) + raise Exception('Unable to create VDI on SR %s' % sr_ref) + else: + try: + userdevice = 2 # FIXME: this depends on the numbers of attached disks + vbd_ref = yield self._create_vbd(vm_ref, vdi_ref, userdevice, False, True, False) + task = yield self._call_xenapi('Async.VBD.plug', vbd_ref) + yield self._wait_for_task(task) + except Exception, exc: + logging.warn(exc) + if sr_ref: + yield self._destroy_sr(sr_ref) + raise Exception('Unable to create VBD on SR %s' % sr_ref) yield True @defer.inlineCallbacks @@ -395,23 +428,57 @@ class XenAPIConnection(object): if n.nodeType == 1: target[n.nodeName] = str(n.firstChild.data).strip() return target + + @utils.deferredToThread + def _get_sr_available_space(self, sr_ref): + return self._get_sr_available_space_blocking(sr_ref) + + def _get_sr_available_space_blocking(self, sr_ref): + pu = self._conn.xenapi.SR.get_physical_utilisation(sr_ref) + ps = self._conn.xenapi.SR.get_physical_size(sr_ref) + return (int(ps) - int(pu)) - (8 * 1024 * 1024) @utils.deferredToThread - def _create_sr(self, target, label): - return self._create_sr_blocking(target, label) + def _create_sr(self, target, label, description): + return self._create_sr_blocking(target, label, description) - def _create_sr_blocking(self, target, label): + def _create_sr_blocking(self, target, label, description): # TODO: we might want to put all these string literals into constants - sr = self._conn.xenapi.SR.create(self._get_xenapi_host(), + sr_ref = self._conn.xenapi.SR.create(self._get_xenapi_host(), target, target['size'], label, - '', + description, 'lvmoiscsi', '', True, {}) - return sr + # TODO: there might be some timing issues here + self._conn.xenapi.SR.scan(sr_ref) + return sr_ref + @defer.inlineCallbacks + def _destroy_sr(self, sr_ref): + # Some clean-up depending on the state of the SR + #yield self._destroy_vdbs(sr_ref) + #yield self._destroy_vdis(sr_ref) + # Destroy PDBs + pbds = yield self._conn.xenapi.SR.get_PBDs(sr_ref) + for pbd_ref in pbds: + try: + task = yield self._call_xenapi('Async.PBD.unplug', pbd_ref) + yield self._wait_for_task(task) + except Exception, exc: + logging.warn(exc) + else: + task = yield self._call_xenapi('Async.PBD.destroy', pbd_ref) + yield self._wait_for_task(task) + # Forget SR + try: + task = yield self._call_xenapi('Async.SR.forget', sr_ref) + yield self._wait_for_task(task) + except Exception, exc: + logging.warn(exc) + @utils.deferredToThread def _lookup_vm_vdis(self, vm): return self._lookup_vm_vdis_blocking(vm) @@ -524,8 +591,8 @@ def _parse_xmlrpc_value(val): def _parse_volume_info(device_path, mountpoint): volume_info = {} volume_info['volumeId'] = 'vol-qurmrzn9' - # XCP and XenServer add an x to the device name - volume_info['xenMountpoint'] = '/dev/xvdb' + # Because XCP/XS want an x beforehand + volume_info['xenMountpoint'] = '/dev/xvdc' volume_info['targetHost'] = '10.70.177.40' volume_info['targetPort'] = '3260' # default 3260 volume_info['iqn'] = 'iqn.2010-10.org.openstack:vol-qurmrzn9' -- cgit From a9b900d24020b68284e402a98ee28c107de0bd71 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 25 Nov 2010 20:42:22 +0000 Subject: added attach_volume implementation --- nova/virt/xenapi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index d3167ebf3..236360f11 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -593,7 +593,7 @@ def _parse_volume_info(device_path, mountpoint): volume_info['volumeId'] = 'vol-qurmrzn9' # Because XCP/XS want an x beforehand volume_info['xenMountpoint'] = '/dev/xvdc' - volume_info['targetHost'] = '10.70.177.40' + volume_info['targetHost'] = '' volume_info['targetPort'] = '3260' # default 3260 volume_info['iqn'] = 'iqn.2010-10.org.openstack:vol-qurmrzn9' return volume_info -- cgit From 04b1740c991d6d499364c21c2524c46ed5fc2522 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 29 Nov 2010 17:26:44 +0000 Subject: changes --- nova/virt/xenapi.py | 93 +++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 68 insertions(+), 25 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py index 236360f11..f2ba71306 100644 --- a/nova/virt/xenapi.py +++ b/nova/virt/xenapi.py @@ -198,6 +198,9 @@ class XenAPIConnection(object): @defer.inlineCallbacks def _create_vdi(self, sr_ref, size, type, label, description, read_only, sharable): + """Create a VDI record. Returns a Deferred that gives the new + VDI reference.""" + vdi_rec = {} vdi_rec['read_only'] = read_only vdi_rec['SR'] = sr_ref @@ -337,7 +340,8 @@ class XenAPIConnection(object): description = 'Attached-to:%s' % instance_name # Create SR and check the physical space available for the VDI allocation sr_ref = yield self._create_sr(target, label, description) - disk_size = yield self._get_sr_available_space(sr_ref) + disk_size = int(target['size']) + #disk_size = yield self._get_sr_available_space(sr_ref) # Create VDI and attach VBD to VM vm_ref = yield self._lookup(instance_name) logging.debug("Mounting disk of: %s GB", (disk_size / (1024*1024*1024.0))) @@ -347,20 +351,30 @@ class XenAPIConnection(object): False, False) except Exception, exc: logging.warn(exc) - if sr_ref: - yield self._destroy_sr(sr_ref) - raise Exception('Unable to create VDI on SR %s' % sr_ref) + yield self._destroy_sr(sr_ref) + raise Exception('Unable to create VDI on SR %s for instance %s' + % (sr_ref, + instance_name)) else: - try: + try: userdevice = 2 # FIXME: this depends on the numbers of attached disks vbd_ref = yield self._create_vbd(vm_ref, vdi_ref, userdevice, False, True, False) - task = yield self._call_xenapi('Async.VBD.plug', vbd_ref) - yield self._wait_for_task(task) except Exception, exc: logging.warn(exc) - if sr_ref: + yield self._destroy_sr(sr_ref) + raise Exception('Unable to create VBD on SR %s for instance %s' + % (sr_ref, + instance_name)) + else: + try: + raise Exception('') + task = yield self._call_xenapi('Async.VBD.plug', vbd_ref) + yield self._wait_for_task(task) + except Exception, exc: + logging.warn(exc) yield self._destroy_sr(sr_ref) - raise Exception('Unable to create VBD on SR %s' % sr_ref) + raise Exception('Unable to attach volume to instance %s' % instance_name) + yield True @defer.inlineCallbacks @@ -412,7 +426,7 @@ class XenAPIConnection(object): try: self._conn.xenapi.SR.create(self._get_xenapi_host(), target, '-1', '', '', - 'lvmoiscsi', '', False, {}) + 'iscsi', '', False, {}) except XenAPI.Failure, exc: if exc.details[0] == 'SR_BACKEND_FAILURE_107': xml_response = parseString(exc.details[3]) @@ -427,6 +441,9 @@ class XenAPIConnection(object): for n in isciLuns.item(0).childNodes: if n.nodeType == 1: target[n.nodeName] = str(n.firstChild.data).strip() + else: + logging.warn(exc) + raise Exception('Unable to access SR') return target @utils.deferredToThread @@ -444,24 +461,45 @@ class XenAPIConnection(object): def _create_sr_blocking(self, target, label, description): # TODO: we might want to put all these string literals into constants - sr_ref = self._conn.xenapi.SR.create(self._get_xenapi_host(), + sr_ref = self._conn.xenapi.SR.get_by_name_label(label) + if sr_ref is None: + sr_ref = self._conn.xenapi.SR.create(self._get_xenapi_host(), target, target['size'], label, description, - 'lvmoiscsi', + 'iscsi', '', True, {}) - # TODO: there might be some timing issues here - self._conn.xenapi.SR.scan(sr_ref) - return sr_ref + if sr_ref: + #self._conn.xenapi.SR.scan(sr_ref) + return sr_ref + else: + raise Exception('Unable to create SR') @defer.inlineCallbacks def _destroy_sr(self, sr_ref): # Some clean-up depending on the state of the SR - #yield self._destroy_vdbs(sr_ref) - #yield self._destroy_vdis(sr_ref) - # Destroy PDBs + # Remove VBDs + #vbds = yield self._conn.xenapi.SR.get_VBDs(sr_ref) + #for vbd_ref in vbds: + # try: + # task = yield self._call_xenapi('Async.VBD.destroy', vbd_ref) + # yield self._wait_for_task(task) + # except Exception, exc: + # logging.warn(exc) + # Remove VDIs + #======================================================================= + # vdis = yield self._conn.xenapi.SR.get_VDIs(sr_ref) + # for vdi_ref in vdis: + # try: + # task = yield self._call_xenapi('Async.VDI.destroy', vdi_ref) + # yield self._wait_for_task(task) + # except Exception, exc: + # logging.warn(exc) + #======================================================================= + sr_rec = self._conn.xenapi.SR.get_record(sr_ref) + # Detach from host pbds = yield self._conn.xenapi.SR.get_PBDs(sr_ref) for pbd_ref in pbds: try: @@ -469,16 +507,21 @@ class XenAPIConnection(object): yield self._wait_for_task(task) except Exception, exc: logging.warn(exc) - else: - task = yield self._call_xenapi('Async.PBD.destroy', pbd_ref) - yield self._wait_for_task(task) - # Forget SR + # Destroy SR try: - task = yield self._call_xenapi('Async.SR.forget', sr_ref) + task = yield self._call_xenapi('Async.SR.destroy', sr_ref) yield self._wait_for_task(task) except Exception, exc: logging.warn(exc) + def _sr_dispose_action(self, action, records): + for rec_ref in records: + try: + task = yield self._call_xenapi(action, rec_ref) + yield self._wait_for_task(task) + except Exception, exc: + logging.warn(exc) + @utils.deferredToThread def _lookup_vm_vdis(self, vm): return self._lookup_vm_vdis_blocking(vm) @@ -592,8 +635,8 @@ def _parse_volume_info(device_path, mountpoint): volume_info = {} volume_info['volumeId'] = 'vol-qurmrzn9' # Because XCP/XS want an x beforehand - volume_info['xenMountpoint'] = '/dev/xvdc' - volume_info['targetHost'] = '' + volume_info['mountpoint'] = '/dev/xvdc' # translate + volume_info['targetHost'] = '10.70.177.40' volume_info['targetPort'] = '3260' # default 3260 volume_info['iqn'] = 'iqn.2010-10.org.openstack:vol-qurmrzn9' return volume_info -- cgit From 40de074f44059f89caa15420a7174f63c76eec48 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Tue, 30 Nov 2010 19:03:13 +0000 Subject: iscsi volumes attach/detach complete. There is only one minor issue on how to discover targets from device_path --- nova/virt/xenapi/vm_utils.py | 38 +++++++ nova/virt/xenapi/volume_utils.py | 210 +++++++++++++++++++++++++++++++++++++++ nova/virt/xenapi/volumeops.py | 93 +++++++++++------ 3 files changed, 313 insertions(+), 28 deletions(-) create mode 100644 nova/virt/xenapi/volume_utils.py (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index b68df2791..6966e7b7b 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -103,6 +103,44 @@ class VMHelper(): vdi_ref) defer.returnValue(vbd_ref) + @classmethod + @utils.deferredToThread + def find_vbd_by_number(self, session, vm_ref, number): + return VMHelper.find_vbd_by_number_blocking(session, vm_ref, number) + + @classmethod + def find_vbd_by_number_blocking(self, session, vm_ref, number): + vbds = session.get_xenapi().VM.get_VBDs(vm_ref) + if vbds: + for vbd in vbds: + try: + vbd_rec = session.get_xenapi().VBD.get_record(vbd) + if vbd_rec['userdevice'] == str(number): + return vbd + except Exception, exc: + logging.warn(exc) + raise Exception('VBD not found in instance %s' % vm_ref) + + @classmethod + @defer.inlineCallbacks + def unplug_vbd(self, session, vbd_ref): + try: + vbd_ref = yield session.call_xenapi('VBD.unplug', vbd_ref) + except Exception, exc: + logging.warn(exc) + if exc.details[0] != 'DEVICE_ALREADY_DETACHED': + raise Exception('Unable to unplug VBD %s' % vbd_ref) + + @classmethod + @defer.inlineCallbacks + def destroy_vbd(self, session, vbd_ref): + try: + task = yield session.call_xenapi('Async.VBD.destroy', vbd_ref) + yield session.wait_for_task(task) + except Exception, exc: + logging.warn(exc) + raise Exception('Unable to destroy VBD %s' % vbd_ref) + @classmethod @defer.inlineCallbacks def create_vif(self, session, vm_ref, network_ref, mac_address): diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py new file mode 100644 index 000000000..b982ac124 --- /dev/null +++ b/nova/virt/xenapi/volume_utils.py @@ -0,0 +1,210 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Helper methods for operations related to the management of volumes, +and storage repositories +""" + +import logging +import re +import string + +from twisted.internet import defer + +from nova import utils +from nova import flags + +FLAGS = flags.FLAGS + +#FIXME: replace with proper target discovery +flags.DEFINE_string('target_host', None, 'iSCSI Target Host') +flags.DEFINE_string('target_port', '3260', 'iSCSI Target Port, 3260 Default') +flags.DEFINE_string('iqn_prefix', 'iqn.2010-10.org.openstack', 'IQN Prefix') + + +class VolumeHelper(): + def __init__(self, session): + return + + @classmethod + @utils.deferredToThread + def create_iscsi_storage(self, session, target, port, target_iqn, + username, password, label, description): + + return VolumeHelper.create_iscsi_storage_blocking(session, target, + port, + target_iqn, + username, + password, + label, + description) + + @classmethod + def create_iscsi_storage_blocking(self, session, target, port, target_iqn, + username, password, label, description): + + sr_ref = session.get_xenapi().SR.get_by_name_label(label) + if len(sr_ref) == 0: + logging.debug('Introducing %s...' % label) + try: + sr_ref = session.get_xenapi().SR.create( + session.get_xenapi_host(), + {'target': target, + 'port': port, + 'targetIQN': target_iqn + # TODO: when/if chap authentication is used + #'chapuser': username, + #'chappassword': password + }, + '0', label, description, 'iscsi', '', False, {}) + logging.debug('Introduced %s as %s.' % (label, sr_ref)) + return sr_ref + except Exception, exc: + logging.warn(exc) + raise Exception('Unable to create Storage Repository') + else: + return sr_ref[0] + + @classmethod + @defer.inlineCallbacks + def find_sr_from_vbd(self, session, vbd_ref): + vdi_ref = yield session.get_xenapi().VBD.get_VDI(vbd_ref) + sr_ref = yield session.get_xenapi().VDI.get_SR(vdi_ref) + defer.returnValue(sr_ref) + + @classmethod + @utils.deferredToThread + def destroy_iscsi_storage(self, session, sr_ref): + VolumeHelper.destroy_iscsi_storage_blocking(session, sr_ref) + + @classmethod + def destroy_iscsi_storage_blocking(self, session, sr_ref): + logging.debug("Forgetting SR %s ... ", sr_ref) + pbds = [] + try: + pbds = session.get_xenapi().SR.get_PBDs(sr_ref) + except Exception, exc: + logging.warn('Ignoring exception %s when getting PBDs for %s', + exc, sr_ref) + for pbd in pbds: + try: + session.get_xenapi().PBD.unplug(pbd) + except Exception, exc: + logging.warn('Ignoring exception %s when unplugging PBD %s', + exc, pbd) + try: + session.get_xenapi().SR.forget(sr_ref) + logging.debug("Forgetting SR %s done.", sr_ref) + except Exception, exc: + logging.warn('Ignoring exception %s when forgetting SR %s', + exc, sr_ref) + + @classmethod + @utils.deferredToThread + def introduce_vdi(self, session, sr_ref): + return VolumeHelper.introduce_vdi_blocking(session, sr_ref) + + @classmethod + def introduce_vdi_blocking(self, session, sr_ref): + try: + vdis = session.get_xenapi().SR.get_VDIs(sr_ref) + except Exception, exc: + raise Exception('Unable to introduce VDI on SR %s' % sr_ref) + try: + vdi_rec = session.get_xenapi().VDI.get_record(vdis[0]) + except Exception, exc: + raise Exception('Unable to get record of VDI %s on' % vdis[0]) + else: + return session.get_xenapi().VDI.introduce( + vdi_rec['uuid'], + vdi_rec['name_label'], + vdi_rec['name_description'], + vdi_rec['SR'], + vdi_rec['type'], + vdi_rec['sharable'], + vdi_rec['read_only'], + vdi_rec['other_config'], + vdi_rec['location'], + vdi_rec['xenstore_data'], + vdi_rec['sm_config']) + + @classmethod + def parse_volume_info(self, device_path, mountpoint): + # Because XCP/XS want a device number instead of a mountpoint + device_number = VolumeHelper.mountpoint_to_number(mountpoint) + volume_id = _get_volume_id(device_path) + target_host = _get_target_host(device_path) + target_port = _get_target_port(device_path) + target_iqn = _get_iqn(device_path) + + if (device_number < 0) or \ + (volume_id is None) or \ + (target_host is None) or \ + (target_iqn is None): + raise Exception('Unable to obtain target information %s, %s' % + (device_path, mountpoint)) + + volume_info = {} + volume_info['deviceNumber'] = device_number + volume_info['volumeId'] = volume_id + volume_info['targetHost'] = target_host + volume_info['targetPort'] = target_port + volume_info['targeIQN'] = target_iqn + return volume_info + + @classmethod + def mountpoint_to_number(self, mountpoint): + if mountpoint.startswith('/dev/'): + mountpoint = mountpoint[5:] + if re.match('^[hs]d[a-p]$', mountpoint): + return (ord(mountpoint[2:3]) - ord('a')) + elif re.match('^vd[a-p]$', mountpoint): + return (ord(mountpoint[2:3]) - ord('a')) + elif re.match('^[0-9]+$', mountpoint): + return string.atoi(mountpoint, 10) + else: + logging.warn('Mountpoint cannot be translated: %s', mountpoint) + return -1 + + +def _get_volume_id(n): + # FIXME: n must contain at least the volume_id + # /vol- is for remote volumes + # -vol- is for local volumes + # see compute/manager->setup_compute_volume + volume_id = n[n.find('/vol-') + 1:] + if volume_id == n: + volume_id = n[n.find('-vol-') + 1:].replace('--', '-') + return volume_id + + +def _get_target_host(n): + # FIXME: if n is none fall back on flags + if n is None or FLAGS.target_host: + return FLAGS.target_host + + +def _get_target_port(n): + # FIXME: if n is none fall back on flags + return FLAGS.target_port + + +def _get_iqn(n): + # FIXME: n must contain at least the volume_id + volume_id = _get_volume_id(n) + if n is None or FLAGS.iqn_prefix: + return '%s:%s' % (FLAGS.iqn_prefix, volume_id) diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index 5aefa0611..d5c309240 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -17,6 +17,12 @@ """ Management class for Storage-related functions (attach, detach, etc). """ +import logging + +from twisted.internet import defer + +from volume_utils import VolumeHelper +from vm_utils import VMHelper class VolumeOps(object): @@ -25,58 +31,89 @@ class VolumeOps(object): @defer.inlineCallbacks def attach_volume(self, instance_name, device_path, mountpoint): + # Before we start, check that the VM exists + vm_ref = yield VMHelper.lookup(self._session, instance_name) + if vm_ref is None: + raise Exception('Instance %s does not exist' % instance_name) # NOTE: No Resource Pool concept so far logging.debug("Attach_volume: %s, %s, %s", instance_name, device_path, mountpoint) - volume_info = _parse_volume_info(device_path, mountpoint) + vol_rec = VolumeHelper.parse_volume_info(device_path, mountpoint) # Create the iSCSI SR, and the PDB through which hosts access SRs. # But first, retrieve target info, like Host, IQN, LUN and SCSIID - target = yield self._get_target(volume_info) - label = 'SR-%s' % volume_info['volumeId'] - description = 'Attached-to:%s' % instance_name - # Create SR and check the physical space available for the VDI allocation - sr_ref = yield self._create_sr(target, label, description) - disk_size = int(target['size']) - #disk_size = yield self._get_sr_available_space(sr_ref) - # Create VDI and attach VBD to VM - vm_ref = yield self._lookup(instance_name) - logging.debug("Mounting disk of: %s GB", (disk_size / (1024*1024*1024.0))) + label = 'SR-%s' % vol_rec['volumeId'] + description = 'Disk-for:%s' % instance_name + # Create SR + sr_ref = yield VolumeHelper.create_iscsi_storage(self._session, + vol_rec['targetHost'], + vol_rec['targetPort'], + vol_rec['targeIQN'], + '', # no CHAP auth + '', + label, + description) + # Introduce VDI and attach VBD to VM try: - vdi_ref = yield self._create_vdi(sr_ref, disk_size, - 'user', volume_info['volumeId'], '', - False, False) + vdi_ref = yield VolumeHelper.introduce_vdi(self._session, sr_ref) except Exception, exc: logging.warn(exc) - yield self._destroy_sr(sr_ref) + yield VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) raise Exception('Unable to create VDI on SR %s for instance %s' % (sr_ref, instance_name)) else: try: - userdevice = 2 # FIXME: this depends on the numbers of attached disks - vbd_ref = yield self._create_vbd(vm_ref, vdi_ref, userdevice, False, True, False) + vbd_ref = yield VMHelper.create_vbd(self._session, + vm_ref, vdi_ref, + vol_rec['deviceNumber'], + False) except Exception, exc: logging.warn(exc) - yield self._destroy_sr(sr_ref) + yield VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) raise Exception('Unable to create VBD on SR %s for instance %s' % (sr_ref, instance_name)) else: try: - raise Exception('') - task = yield self._call_xenapi('Async.VBD.plug', vbd_ref) - yield self._wait_for_task(task) + #raise Exception('') + task = yield self._session.call_xenapi('Async.VBD.plug', + vbd_ref) + yield self._session.wait_for_task(task) except Exception, exc: logging.warn(exc) - yield self._destroy_sr(sr_ref) - raise Exception('Unable to attach volume to instance %s' % instance_name) - + yield VolumeHelper.destroy_iscsi_storage(self._session, + sr_ref) + raise Exception('Unable to attach volume to instance %s' % + instance_name) yield True @defer.inlineCallbacks def detach_volume(self, instance_name, mountpoint): - logging.debug("Detach_volume: %s, %s, %s", instance_name, mountpoint) + # Before we start, check that the VM exists + vm_ref = yield VMHelper.lookup(self._session, instance_name) + if vm_ref is None: + raise Exception('Instance %s does not exist' % instance_name) # Detach VBD from VM - # Forget SR/PDB info associated with host - # TODO: can we avoid destroying the SR every time we detach? - yield True \ No newline at end of file + logging.debug("Detach_volume: %s, %s", instance_name, mountpoint) + device_number = VolumeHelper.mountpoint_to_number(mountpoint) + try: + vbd_ref = yield VMHelper.find_vbd_by_number(self._session, + vm_ref, device_number) + except Exception, exc: + logging.warn(exc) + raise Exception('Unable to locate volume %s' % mountpoint) + else: + try: + sr_ref = yield VolumeHelper.find_sr_from_vbd(self._session, + vbd_ref) + yield VMHelper.unplug_vbd(self._session, vbd_ref) + except Exception, exc: + logging.warn(exc) + raise Exception('Unable to detach volume %s' % mountpoint) + try: + yield VMHelper.destroy_vbd(self._session, vbd_ref) + except Exception, exc: + logging.warn(exc) + # Forget SR + yield VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) + yield True -- cgit From 4b74a1b243d87d53e660029728d12a9c067deeac Mon Sep 17 00:00:00 2001 From: Rick Clark Date: Tue, 30 Nov 2010 13:08:39 -0600 Subject: Cleaned up pep8 errors --- nova/virt/libvirt_conn.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 09cb29773..81dbbaad5 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -467,13 +467,13 @@ class LibvirtConnection(object): process_input=process_input, check_exit_code=check_exit_code) - # For now, we assume that if we're not using a kernel, we're using a - # partitioned disk image where the target partition is the first + # For now, we assume that if we're not using a kernel, we're using a + # partitioned disk image where the target partition is the first # partition target_partition = None if not using_kernel: target_partition = "1" - + key = str(inst['key_data']) net = None network_ref = db.network_get_by_instance(context.get_admin_context(), @@ -493,11 +493,10 @@ class LibvirtConnection(object): inst['name'], inst.image_id) if net: logging.info('instance %s: injecting net into image %s', - inst['name'], inst.image_id) - execute=execute) + inst['name'], inst.image_id) try: yield disk.inject_data(basepath('disk-raw'), key, net, - partition=target_partition, + partition=target_partition, execute=execute) except Exception as e: # This could be a windows image, or a vmdk format disk @@ -557,7 +556,7 @@ class LibvirtConnection(object): if xml_info['ramdisk_id'] or xml_info['kernel_id']: xml_info['disk'] = xml_info['basepath'] + "/disk" - xml = str(Template(self.libvirt_xml, searchList=[ xml_info ] )) + xml = str(Template(self.libvirt_xml, searchList=[xml_info])) logging.debug('instance %s: finished toXML method', instance['name']) return xml -- cgit From f26489ef1ad2a7df0e9f72a8c9ad4f2e3a65ae57 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Wed, 1 Dec 2010 12:02:02 +0000 Subject: minor refactoring --- nova/virt/xenapi/novadeps.py | 82 ++++++++++++++++++++++++++++++++++++++++ nova/virt/xenapi/volume_utils.py | 77 ------------------------------------- nova/virt/xenapi/volumeops.py | 6 ++- 3 files changed, 86 insertions(+), 79 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/novadeps.py b/nova/virt/xenapi/novadeps.py index ba62468fb..db51982a6 100644 --- a/nova/virt/xenapi/novadeps.py +++ b/nova/virt/xenapi/novadeps.py @@ -14,6 +14,9 @@ # License for the specific language governing permissions and limitations # under the License. +import re +import string + from nova import db from nova import flags from nova import process @@ -32,6 +35,15 @@ XENAPI_POWER_STATE = { 'Suspended': power_state.SHUTDOWN, # FIXME 'Crashed': power_state.CRASHED} +from nova import flags + +FLAGS = flags.FLAGS + +#FIXME: replace with proper target discovery +flags.DEFINE_string('target_host', None, 'iSCSI Target Host') +flags.DEFINE_string('target_port', '3260', 'iSCSI Target Port, 3260 Default') +flags.DEFINE_string('iqn_prefix', 'iqn.2010-10.org.openstack', 'IQN Prefix') + class Instance(object): @@ -101,3 +113,73 @@ class User(object): @classmethod def get_secret(self, user): return user.secret + + +class Volume(object): + + @classmethod + def parse_volume_info(self, device_path, mountpoint): + # Because XCP/XS want a device number instead of a mountpoint + device_number = Volume.mountpoint_to_number(mountpoint) + volume_id = Volume.get_volume_id(device_path) + target_host = Volume.get_target_host(device_path) + target_port = Volume.get_target_port(device_path) + target_iqn = Volume.get_iqn(device_path) + + if (device_number < 0) or \ + (volume_id is None) or \ + (target_host is None) or \ + (target_iqn is None): + raise Exception('Unable to obtain target information %s, %s' % + (device_path, mountpoint)) + + volume_info = {} + volume_info['deviceNumber'] = device_number + volume_info['volumeId'] = volume_id + volume_info['targetHost'] = target_host + volume_info['targetPort'] = target_port + volume_info['targeIQN'] = target_iqn + return volume_info + + @classmethod + def mountpoint_to_number(self, mountpoint): + if mountpoint.startswith('/dev/'): + mountpoint = mountpoint[5:] + if re.match('^[hs]d[a-p]$', mountpoint): + return (ord(mountpoint[2:3]) - ord('a')) + elif re.match('^vd[a-p]$', mountpoint): + return (ord(mountpoint[2:3]) - ord('a')) + elif re.match('^[0-9]+$', mountpoint): + return string.atoi(mountpoint, 10) + else: + logging.warn('Mountpoint cannot be translated: %s', mountpoint) + return -1 + + @classmethod + def get_volume_id(self, n): + # FIXME: n must contain at least the volume_id + # /vol- is for remote volumes + # -vol- is for local volumes + # see compute/manager->setup_compute_volume + volume_id = n[n.find('/vol-') + 1:] + if volume_id == n: + volume_id = n[n.find('-vol-') + 1:].replace('--', '-') + return volume_id + + @classmethod + def get_target_host(self, n): + # FIXME: if n is none fall back on flags + if n is None or FLAGS.target_host: + return FLAGS.target_host + + @classmethod + def get_target_port(self, n): + # FIXME: if n is none fall back on flags + return FLAGS.target_port + + @classmethod + def get_iqn(self, n): + # FIXME: n must contain at least the volume_id + volume_id = Volume.get_volume_id(n) + if n is None or FLAGS.iqn_prefix: + return '%s:%s' % (FLAGS.iqn_prefix, volume_id) diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index b982ac124..3b3e8894c 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -20,20 +20,10 @@ and storage repositories """ import logging -import re -import string from twisted.internet import defer from nova import utils -from nova import flags - -FLAGS = flags.FLAGS - -#FIXME: replace with proper target discovery -flags.DEFINE_string('target_host', None, 'iSCSI Target Host') -flags.DEFINE_string('target_port', '3260', 'iSCSI Target Port, 3260 Default') -flags.DEFINE_string('iqn_prefix', 'iqn.2010-10.org.openstack', 'IQN Prefix') class VolumeHelper(): @@ -141,70 +131,3 @@ class VolumeHelper(): vdi_rec['location'], vdi_rec['xenstore_data'], vdi_rec['sm_config']) - - @classmethod - def parse_volume_info(self, device_path, mountpoint): - # Because XCP/XS want a device number instead of a mountpoint - device_number = VolumeHelper.mountpoint_to_number(mountpoint) - volume_id = _get_volume_id(device_path) - target_host = _get_target_host(device_path) - target_port = _get_target_port(device_path) - target_iqn = _get_iqn(device_path) - - if (device_number < 0) or \ - (volume_id is None) or \ - (target_host is None) or \ - (target_iqn is None): - raise Exception('Unable to obtain target information %s, %s' % - (device_path, mountpoint)) - - volume_info = {} - volume_info['deviceNumber'] = device_number - volume_info['volumeId'] = volume_id - volume_info['targetHost'] = target_host - volume_info['targetPort'] = target_port - volume_info['targeIQN'] = target_iqn - return volume_info - - @classmethod - def mountpoint_to_number(self, mountpoint): - if mountpoint.startswith('/dev/'): - mountpoint = mountpoint[5:] - if re.match('^[hs]d[a-p]$', mountpoint): - return (ord(mountpoint[2:3]) - ord('a')) - elif re.match('^vd[a-p]$', mountpoint): - return (ord(mountpoint[2:3]) - ord('a')) - elif re.match('^[0-9]+$', mountpoint): - return string.atoi(mountpoint, 10) - else: - logging.warn('Mountpoint cannot be translated: %s', mountpoint) - return -1 - - -def _get_volume_id(n): - # FIXME: n must contain at least the volume_id - # /vol- is for remote volumes - # -vol- is for local volumes - # see compute/manager->setup_compute_volume - volume_id = n[n.find('/vol-') + 1:] - if volume_id == n: - volume_id = n[n.find('-vol-') + 1:].replace('--', '-') - return volume_id - - -def _get_target_host(n): - # FIXME: if n is none fall back on flags - if n is None or FLAGS.target_host: - return FLAGS.target_host - - -def _get_target_port(n): - # FIXME: if n is none fall back on flags - return FLAGS.target_port - - -def _get_iqn(n): - # FIXME: n must contain at least the volume_id - volume_id = _get_volume_id(n) - if n is None or FLAGS.iqn_prefix: - return '%s:%s' % (FLAGS.iqn_prefix, volume_id) diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index d5c309240..ec4343329 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -24,6 +24,8 @@ from twisted.internet import defer from volume_utils import VolumeHelper from vm_utils import VMHelper +from novadeps import Volume + class VolumeOps(object): def __init__(self, session): @@ -38,9 +40,9 @@ class VolumeOps(object): # NOTE: No Resource Pool concept so far logging.debug("Attach_volume: %s, %s, %s", instance_name, device_path, mountpoint) - vol_rec = VolumeHelper.parse_volume_info(device_path, mountpoint) # Create the iSCSI SR, and the PDB through which hosts access SRs. # But first, retrieve target info, like Host, IQN, LUN and SCSIID + vol_rec = Volume.parse_volume_info(device_path, mountpoint) label = 'SR-%s' % vol_rec['volumeId'] description = 'Disk-for:%s' % instance_name # Create SR @@ -95,7 +97,7 @@ class VolumeOps(object): raise Exception('Instance %s does not exist' % instance_name) # Detach VBD from VM logging.debug("Detach_volume: %s, %s", instance_name, mountpoint) - device_number = VolumeHelper.mountpoint_to_number(mountpoint) + device_number = Volume.mountpoint_to_number(mountpoint) try: vbd_ref = yield VMHelper.find_vbd_by_number(self._session, vm_ref, device_number) -- cgit From e4cfd7f3fe7d3c50d65c61abf21bf998fde85147 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 2 Dec 2010 14:09:23 +0000 Subject: minor refactoring after merge --- nova/virt/xenapi/novadeps.py | 28 ++++++++++++++++++++-------- nova/virt/xenapi_conn.py | 5 ++++- 2 files changed, 24 insertions(+), 9 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/novadeps.py b/nova/virt/xenapi/novadeps.py index b4802764e..aa3535162 100644 --- a/nova/virt/xenapi/novadeps.py +++ b/nova/virt/xenapi/novadeps.py @@ -36,8 +36,6 @@ XENAPI_POWER_STATE = { 'Suspended': power_state.SHUTDOWN, # FIXME 'Crashed': power_state.CRASHED} -FLAGS = flags.FLAGS - flags.DEFINE_string('xenapi_connection_url', None, 'URL for connection to XenServer/Xen Cloud Platform.' @@ -81,6 +79,21 @@ class Configuration(object): def xenapi_task_poll_interval(self): return self._flags.xenapi_task_poll_interval + @property + def target_host(self): + return self._flags.target_host + + @property + def target_port(self): + return self._flags.target_port + + @property + def iqn_prefix(self): + return self._flags.iqn_prefix + + +config = Configuration() + class Instance(object): @@ -206,18 +219,17 @@ class Volume(object): @classmethod def get_target_host(self, n): # FIXME: if n is none fall back on flags - if n is None or FLAGS.target_host: - return FLAGS.target_host + if n is None or config.target_host: + return config.target_host @classmethod def get_target_port(self, n): # FIXME: if n is none fall back on flags - return FLAGS.target_port + return config.target_port @classmethod def get_iqn(self, n): # FIXME: n must contain at least the volume_id volume_id = Volume.get_volume_id(n) - if n is None or FLAGS.iqn_prefix: - return '%s:%s' % (FLAGS.iqn_prefix, volume_id) - + if n is None or config.iqn_prefix: + return '%s:%s' % (config.iqn_prefix, volume_id) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 948fade7e..d3f66b12c 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -44,7 +44,10 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block. :xenapi_task_poll_interval: The interval (seconds) used for polling of remote tasks (Async.VM.start, etc) (default: 0.5). - +:target_host: the iSCSI Target Host IP address, i.e. the IP + address for the nova-volume host +:target_port: iSCSI Target Port, 3260 Default +:iqn_prefix: IQN Prefix, e.g. 'iqn.2010-10.org.openstack' """ import logging -- cgit From ee71c0accbb540bcb9d08cdcdc8b659f29a0edd6 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 6 Dec 2010 19:06:32 +0000 Subject: added interim solution for target discovery. Now info can either be passed via flags or discovered via iscsiadm. Long term solution is to add a few more fields to the db in the iscsi_target table with the necessary info and modify the iscsi driver to set them --- nova/virt/xenapi/novadeps.py | 89 ++++++++++++++++++++++++++++++++++--------- nova/virt/xenapi/volumeops.py | 2 +- 2 files changed, 71 insertions(+), 20 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/novadeps.py b/nova/virt/xenapi/novadeps.py index 65576019e..66c8233b8 100644 --- a/nova/virt/xenapi/novadeps.py +++ b/nova/virt/xenapi/novadeps.py @@ -27,6 +27,9 @@ import string from nova import db from nova import flags from nova import context +from nova import process + +from twisted.internet import defer from nova.compute import power_state from nova.auth.manager import AuthManager @@ -193,15 +196,28 @@ class User(object): class Volume(object): + """ Wraps up volume specifics """ @classmethod - def parse_volume_info(self, device_path, mountpoint): - # Because XCP/XS want a device number instead of a mountpoint + @defer.inlineCallbacks + def parse_volume_info(cls, device_path, mountpoint): + """ + Parse device_path and mountpoint as they can be used by XenAPI. + In particular, the mountpoint (e.g. /dev/sdc) must be translated + into a numeric literal. + FIXME: As for device_path, currently cannot be used as it is, + because it does not contain target information. As for interim + solution, target details are passed either via Flags or obtained + by iscsiadm. Long-term solution is to add a few more fields to the + db in the iscsi_target table with the necessary info and modify + the iscsi driver to set them. + """ device_number = Volume.mountpoint_to_number(mountpoint) volume_id = Volume.get_volume_id(device_path) - target_host = Volume.get_target_host(device_path) - target_port = Volume.get_target_port(device_path) - target_iqn = Volume.get_iqn(device_path) + (iscsi_name, iscsi_portal) = yield Volume.get_target(volume_id) + target_host = Volume.get_target_host(iscsi_portal) + target_port = Volume.get_target_port(iscsi_portal) + target_iqn = Volume.get_iqn(iscsi_name, volume_id) if (device_number < 0) or \ (volume_id is None) or \ @@ -216,10 +232,11 @@ class Volume(object): volume_info['targetHost'] = target_host volume_info['targetPort'] = target_port volume_info['targeIQN'] = target_iqn - return volume_info + defer.returnValue(volume_info) @classmethod - def mountpoint_to_number(self, mountpoint): + def mountpoint_to_number(cls, mountpoint): + """ Translate a mountpoint like /dev/sdc into a numberic """ if mountpoint.startswith('/dev/'): mountpoint = mountpoint[5:] if re.match('^[hs]d[a-p]$', mountpoint): @@ -233,8 +250,9 @@ class Volume(object): return -1 @classmethod - def get_volume_id(self, n): - # FIXME: n must contain at least the volume_id + def get_volume_id(cls, n): + """ Retrieve the volume id from device_path """ + # n must contain at least the volume_id # /vol- is for remote volumes # -vol- is for local volumes # see compute/manager->setup_compute_volume @@ -244,19 +262,52 @@ class Volume(object): return volume_id @classmethod - def get_target_host(self, n): - # FIXME: if n is none fall back on flags - if n is None or config.target_host: + def get_target_host(cls, n): + """ Retrieve target host """ + if n: + return n[0:n.find(':')] + elif n is None or config.target_host: return config.target_host @classmethod - def get_target_port(self, n): - # FIXME: if n is none fall back on flags - return config.target_port + def get_target_port(cls, n): + """ Retrieve target port """ + if n: + return n[n.find(':') + 1:] + elif n is None or config.target_port: + return config.target_port @classmethod - def get_iqn(self, n): - # FIXME: n must contain at least the volume_id - volume_id = Volume.get_volume_id(n) - if n is None or config.iqn_prefix: + def get_iqn(cls, n, id): + """ Retrieve target IQN """ + if n: + return n + elif n is None or config.iqn_prefix: + volume_id = Volume.get_volume_id(id) return '%s:%s' % (config.iqn_prefix, volume_id) + + @classmethod + @defer.inlineCallbacks + def get_target(self, volume_id): + """ + Gets iscsi name and portal from volume name and host. + For this method to work the following are needed: + 1) volume_ref['host'] to resolve the public IP address + 2) ietd to listen only to the public network interface + If any of the two are missing, fall back on Flags + """ + volume_ref = db.volume_get_by_ec2_id(context.get_admin_context(), + volume_id) + + (r, _e) = yield process.simple_execute("sudo iscsiadm -m discovery -t " + "sendtargets -p %s" % + volume_ref['host']) + if len(_e) == 0: + for target in r.splitlines(): + if volume_id in target: + (location, _sep, iscsi_name) = target.partition(" ") + break + iscsi_portal = location.split(",")[0] + defer.returnValue((iscsi_name, iscsi_portal)) + else: + defer.returnValue((None, None)) diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index 6c48f6491..a052aaf95 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -47,7 +47,7 @@ class VolumeOps(object): instance_name, device_path, mountpoint) # Create the iSCSI SR, and the PDB through which hosts access SRs. # But first, retrieve target info, like Host, IQN, LUN and SCSIID - vol_rec = Volume.parse_volume_info(device_path, mountpoint) + vol_rec = yield Volume.parse_volume_info(device_path, mountpoint) label = 'SR-%s' % vol_rec['volumeId'] description = 'Disk-for:%s' % instance_name # Create SR -- cgit From 06c52051005f5e43a1f543e2d1c5922aa91c7918 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 6 Dec 2010 19:29:00 +0000 Subject: minor changes to docstrings --- nova/virt/xenapi/novadeps.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/novadeps.py b/nova/virt/xenapi/novadeps.py index 66c8233b8..3680a5dd2 100644 --- a/nova/virt/xenapi/novadeps.py +++ b/nova/virt/xenapi/novadeps.py @@ -60,10 +60,15 @@ flags.DEFINE_float('xenapi_task_poll_interval', 'The interval used for polling of remote tasks ' '(Async.VM.start, etc). Used only if ' 'connection_type=xenapi.') -#FIXME: replace with proper target discovery -flags.DEFINE_string('target_host', None, 'iSCSI Target Host') -flags.DEFINE_string('target_port', '3260', 'iSCSI Target Port, 3260 Default') -flags.DEFINE_string('iqn_prefix', 'iqn.2010-10.org.openstack', 'IQN Prefix') +flags.DEFINE_string('target_host', + None, + 'iSCSI Target Host') +flags.DEFINE_string('target_port', + '3260', + 'iSCSI Target Port, 3260 Default') +flags.DEFINE_string('iqn_prefix', + 'iqn.2010-10.org.openstack', + 'IQN Prefix') class Configuration(object): @@ -292,9 +297,9 @@ class Volume(object): """ Gets iscsi name and portal from volume name and host. For this method to work the following are needed: - 1) volume_ref['host'] to resolve the public IP address - 2) ietd to listen only to the public network interface - If any of the two are missing, fall back on Flags + 1) volume_ref['host'] must resolve to something rather than loopback + 2) ietd must bind only to the address as resolved above + If any of the two conditions are not met, fall back on Flags. """ volume_ref = db.volume_get_by_ec2_id(context.get_admin_context(), volume_id) -- cgit From e9597d1370211de15ca96f1fa52fcbe3c9166a7e Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Tue, 7 Dec 2010 14:15:22 +0000 Subject: fixed pylint violations that slipped out from a previous check --- nova/virt/xenapi/vm_utils.py | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 762cbae83..2ee6737ab 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -29,6 +29,7 @@ from nova.auth.manager import AuthManager from nova.compute import instance_types from nova.virt import images from nova.compute import power_state +from nova.virt.xenapi.volume_utils import StorageError XENAPI_POWER_STATE = { 'Halted': power_state.SHUTDOWN, @@ -115,11 +116,13 @@ class VMHelper(): @classmethod @utils.deferredToThread - def find_vbd_by_number(self, session, vm_ref, number): + def find_vbd_by_number(cls, session, vm_ref, number): + """ Get the VBD reference from the device number """ return VMHelper.find_vbd_by_number_blocking(session, vm_ref, number) @classmethod - def find_vbd_by_number_blocking(self, session, vm_ref, number): + def find_vbd_by_number_blocking(cls, session, vm_ref, number): + """ Synchronous find_vbd_by_number """ vbds = session.get_xenapi().VM.get_VBDs(vm_ref) if vbds: for vbd in vbds: @@ -127,29 +130,31 @@ class VMHelper(): vbd_rec = session.get_xenapi().VBD.get_record(vbd) if vbd_rec['userdevice'] == str(number): return vbd - except Exception, exc: + except XenAPI.Failure, exc: logging.warn(exc) raise Exception('VBD not found in instance %s' % vm_ref) @classmethod @defer.inlineCallbacks - def unplug_vbd(self, session, vbd_ref): + def unplug_vbd(cls, session, vbd_ref): + """ Unplug VBD from VM """ try: vbd_ref = yield session.call_xenapi('VBD.unplug', vbd_ref) - except Exception, exc: + except XenAPI.Failure, exc: logging.warn(exc) if exc.details[0] != 'DEVICE_ALREADY_DETACHED': - raise Exception('Unable to unplug VBD %s' % vbd_ref) + raise StorageError('Unable to unplug VBD %s' % vbd_ref) @classmethod @defer.inlineCallbacks - def destroy_vbd(self, session, vbd_ref): + def destroy_vbd(cls, session, vbd_ref): + """ Destroy VBD from host database """ try: task = yield session.call_xenapi('Async.VBD.destroy', vbd_ref) yield session.wait_for_task(task) - except Exception, exc: + except XenAPI.Failure, exc: logging.warn(exc) - raise Exception('Unable to destroy VBD %s' % vbd_ref) + raise StorageError('Unable to destroy VBD %s' % vbd_ref) @classmethod @defer.inlineCallbacks @@ -244,6 +249,7 @@ class VMHelper(): @classmethod def compile_info(cls, record): + """ Fill record with VM status information """ return {'state': XENAPI_POWER_STATE[record['power_state']], 'max_mem': long(record['memory_static_max']) >> 10, 'mem': long(record['memory_dynamic_max']) >> 10, -- cgit From 88777c09ad909c68da8d433800cae862e9bbff4a Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Tue, 7 Dec 2010 14:26:38 +0000 Subject: and yet another pylint fix --- nova/virt/xenapi/vm_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 2ee6737ab..039e72981 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -132,7 +132,7 @@ class VMHelper(): return vbd except XenAPI.Failure, exc: logging.warn(exc) - raise Exception('VBD not found in instance %s' % vm_ref) + raise StorageError('VBD not found in instance %s' % vm_ref) @classmethod @defer.inlineCallbacks -- cgit From c0fc8a5e9e72ecb780258d9cf41b32973620eb4c Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Tue, 7 Dec 2010 15:35:56 +0000 Subject: small fixes on Exception handling --- nova/virt/xenapi/vm_utils.py | 2 +- nova/virt/xenapi/volume_utils.py | 10 +++++++--- nova/virt/xenapi/volumeops.py | 7 ++++--- 3 files changed, 12 insertions(+), 7 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 039e72981..f29803136 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -132,7 +132,7 @@ class VMHelper(): return vbd except XenAPI.Failure, exc: logging.warn(exc) - raise StorageError('VBD not found in instance %s' % vm_ref) + raise StorageError('VBD not found in instance %s' % vm_ref) @classmethod @defer.inlineCallbacks diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 48aff7ef5..debaa6906 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -95,9 +95,13 @@ class VolumeHelper(): @defer.inlineCallbacks def find_sr_from_vbd(cls, session, vbd_ref): """ Find the SR reference from the VBD reference """ - vdi_ref = yield session.get_xenapi().VBD.get_VDI(vbd_ref) - sr_ref = yield session.get_xenapi().VDI.get_SR(vdi_ref) - defer.returnValue(sr_ref) + try: + vdi_ref = yield session.get_xenapi().VBD.get_VDI(vbd_ref) + sr_ref = yield session.get_xenapi().VDI.get_SR(vdi_ref) + defer.returnValue(sr_ref) + except XenAPI.Failure, exc: + logging.warn(exc) + raise StorageError('Unable to find SR from VBD %s' % vbd_ref) @classmethod @utils.deferredToThread diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index 4055688e3..b9f260756 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -18,6 +18,7 @@ Management class for Storage-related functions (attach, detach, etc). """ import logging +import XenAPI from twisted.internet import defer @@ -68,10 +69,10 @@ class VolumeOps(object): vm_ref, vdi_ref, vol_rec['deviceNumber'], False) - except StorageError, exc: + except XenAPI.Failure, exc: logging.warn(exc) yield VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) - raise StorageError('Unable to use SR %s for instance %s' + raise Exception('Unable to use SR %s for instance %s' % (sr_ref, instance_name)) else: @@ -79,7 +80,7 @@ class VolumeOps(object): task = yield self._session.call_xenapi('Async.VBD.plug', vbd_ref) yield self._session.wait_for_task(task) - except StorageError, exc: + except XenAPI.Failure, exc: logging.warn(exc) yield VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) -- cgit From 699ac0785240307ef2396d688e6c0a2acb446665 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Tue, 7 Dec 2010 22:22:48 +0000 Subject: pylint fixes --- nova/virt/xenapi/vm_utils.py | 2 +- nova/virt/xenapi/volume_utils.py | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 35d89d835..0549dc9fb 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -44,7 +44,7 @@ class VMHelper(): """ The class that wraps the helper methods together. """ - def __init__(self, session): + def __init__(self): return @classmethod diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 84eb82f15..051d0fe85 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -1,5 +1,4 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -from orca.scripts import self_voicing # Copyright (c) 2010 Citrix Systems, Inc. # @@ -46,7 +45,7 @@ class VolumeHelper(): """ The class that wraps the helper methods together. """ - def __init__(self, session): + def __init__(self): return @classmethod -- cgit From 63006a18701ff185e6837aa2b88f001052643460 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Wed, 8 Dec 2010 18:49:28 +0000 Subject: typo fix --- nova/virt/xenapi/volume_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 051d0fe85..d247066aa 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -78,14 +78,14 @@ class VolumeHelper(): if 'chapuser' in info and 'chappassword' in info: record = {'target': info['targetHost'], 'port': info['targetPort'], - 'targetIQN': info['targeIQN'], + 'targetIQN': info['targetIQN'], 'chapuser': info['chapuser'], 'chappassword': info['chappassword'] } else: record = {'target': info['targetHost'], 'port': info['targetPort'], - 'targetIQN': info['targeIQN'] + 'targetIQN': info['targetIQN'] } try: sr_ref = session.get_xenapi().SR.create( @@ -211,7 +211,7 @@ class VolumeHelper(): volume_info['volumeId'] = volume_id volume_info['targetHost'] = target_host volume_info['targetPort'] = target_port - volume_info['targeIQN'] = target_iqn + volume_info['targetIQN'] = target_iqn defer.returnValue(volume_info) @classmethod -- cgit From 3c85f1b7ed593a2d4d126a34241f217da5cf7ce6 Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Wed, 8 Dec 2010 17:18:27 -0800 Subject: intermediate commit to checkpoint progress all relevant tests are passing except volume, next step is volume manager fixery --- nova/virt/images.py | 9 +++++---- nova/virt/libvirt_conn.py | 28 +++++++++++++--------------- nova/virt/xenapi/vm_utils.py | 28 +++++++++++----------------- nova/virt/xenapi_conn.py | 1 - 4 files changed, 29 insertions(+), 37 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/images.py b/nova/virt/images.py index 981aa5cf3..4d7c65f12 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -26,7 +26,7 @@ import time import urlparse from nova import flags -from nova import process +from nova import utils from nova.auth import manager from nova.auth import signer from nova.objectstore import image @@ -63,15 +63,16 @@ def _fetch_s3_image(image, path, user, project): cmd = ['/usr/bin/curl', '--fail', '--silent', url] for (k, v) in headers.iteritems(): - cmd += ['-H', '%s: %s' % (k, v)] + cmd += ['-H', '"%s: %s"' % (k, v)] cmd += ['-o', path] - return process.SharedPool().execute(executable=cmd[0], args=cmd[1:]) + cmd_out = ' '.join(cmd) + return utils.execute(cmd_out) def _fetch_local_image(image, path, user, project): source = _image_path('%s/image' % image) - return process.simple_execute('cp %s %s' % (source, path)) + return utils.execute('cp %s %s' % (source, path)) def _image_path(path): diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index c09a7c01d..715e7234c 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -54,7 +54,6 @@ from nova import context from nova import db from nova import exception from nova import flags -from nova import process from nova import utils #from nova.api import context from nova.auth import manager @@ -366,8 +365,8 @@ class LibvirtConnection(object): if virsh_output.startswith('/dev/'): logging.info('cool, it\'s a device') - r = process.simple_execute("sudo dd if=%s iflag=nonblock" % - virsh_output, check_exit_code=False) + r = utils.execute("sudo dd if=%s iflag=nonblock" % + virsh_output, check_exit_code=False) return r[0] else: return '' @@ -389,13 +388,13 @@ class LibvirtConnection(object): console_log = os.path.join(FLAGS.instances_path, instance['name'], 'console.log') - process.simple_execute('sudo chown %d %s' % (os.getuid(), - console_log)) + utils.execute('sudo chown %d %s' % (os.getuid(), + console_log)) if FLAGS.libvirt_type == 'xen': # Xen is special - virsh_output = process.simple_execute("virsh ttyconsole %s" % - instance['name']) + virsh_output = utils.execute("virsh ttyconsole %s" % + instance['name']) data = self._flush_xen_console(virsh_output) fpath = self._append_to_file(data, console_log) else: @@ -411,8 +410,8 @@ class LibvirtConnection(object): prefix + fname) # ensure directories exist and are writable - process.simple_execute('mkdir -p %s' % basepath(prefix='')) - process.simple_execute('chmod 0777 %s' % basepath(prefix='')) + utils.execute('mkdir -p %s' % basepath(prefix='')) + utils.execute('chmod 0777 %s' % basepath(prefix='')) # TODO(termie): these are blocking calls, it would be great # if they weren't. @@ -443,9 +442,9 @@ class LibvirtConnection(object): project) def execute(cmd, process_input=None, check_exit_code=True): - return process.simple_execute(cmd=cmd, - process_input=process_input, - check_exit_code=check_exit_code) + return utils.execute(cmd=cmd, + process_input=process_input, + check_exit_code=check_exit_code) key = str(inst['key_data']) net = None @@ -471,7 +470,7 @@ class LibvirtConnection(object): execute=execute) if os.path.exists(basepath('disk')): - process.simple_execute('rm -f %s' % basepath('disk')) + utils.execute('rm -f %s' % basepath('disk')) local_bytes = (instance_types.INSTANCE_TYPES[inst.instance_type] ['local_gb'] @@ -485,8 +484,7 @@ class LibvirtConnection(object): local_bytes, resize, execute=execute) if FLAGS.libvirt_type == 'uml': - process.simple_execute('sudo chown root %s' % - basepath('disk')) + utils.execute('sudo chown root %s' % basepath('disk')) def to_xml(self, instance, rescue=False): # TODO(termie): cache? diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 99d484ca2..b72b8e13d 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -21,14 +21,13 @@ their attributes like VDIs, VIFs, as well as their lookup functions. import logging -from twisted.internet import defer - from nova import utils from nova.auth.manager import AuthManager from nova.compute import instance_types from nova.compute import power_state from nova.virt import images + XENAPI_POWER_STATE = { 'Halted': power_state.SHUTDOWN, 'Running': power_state.RUNNING, @@ -36,6 +35,7 @@ XENAPI_POWER_STATE = { 'Suspended': power_state.SHUTDOWN, # FIXME 'Crashed': power_state.CRASHED} + XenAPI = None @@ -49,7 +49,6 @@ class VMHelper(): XenAPI = __import__('XenAPI') @classmethod - @defer.inlineCallbacks def create_vm(cls, session, instance, kernel, ramdisk): """Create a VM record. Returns a Deferred that gives the new VM reference.""" @@ -87,12 +86,11 @@ class VMHelper(): 'other_config': {}, } logging.debug('Created VM %s...', instance.name) - vm_ref = yield session.call_xenapi('VM.create', rec) + vm_ref = session.call_xenapi('VM.create', rec) logging.debug('Created VM %s as %s.', instance.name, vm_ref) - defer.returnValue(vm_ref) + return vm_ref @classmethod - @defer.inlineCallbacks def create_vbd(cls, session, vm_ref, vdi_ref, userdevice, bootable): """Create a VBD record. Returns a Deferred that gives the new VBD reference.""" @@ -111,13 +109,12 @@ class VMHelper(): vbd_rec['qos_algorithm_params'] = {} vbd_rec['qos_supported_algorithms'] = [] logging.debug('Creating VBD for VM %s, VDI %s ... ', vm_ref, vdi_ref) - vbd_ref = yield session.call_xenapi('VBD.create', vbd_rec) + vbd_ref = session.call_xenapi('VBD.create', vbd_rec) logging.debug('Created VBD %s for VM %s, VDI %s.', vbd_ref, vm_ref, vdi_ref) - defer.returnValue(vbd_ref) + return vbd_ref @classmethod - @defer.inlineCallbacks def create_vif(cls, session, vm_ref, network_ref, mac_address): """Create a VIF record. Returns a Deferred that gives the new VIF reference.""" @@ -133,13 +130,12 @@ class VMHelper(): vif_rec['qos_algorithm_params'] = {} logging.debug('Creating VIF for VM %s, network %s ... ', vm_ref, network_ref) - vif_ref = yield session.call_xenapi('VIF.create', vif_rec) + vif_ref = session.call_xenapi('VIF.create', vif_rec) logging.debug('Created VIF %s for VM %s, network %s.', vif_ref, vm_ref, network_ref) - defer.returnValue(vif_ref) + return vif_ref @classmethod - @defer.inlineCallbacks def fetch_image(cls, session, image, user, project, use_sr): """use_sr: True to put the image as a VDI in an SR, False to place it on dom0's filesystem. The former is for VM disks, the latter for @@ -156,12 +152,11 @@ class VMHelper(): args['password'] = user.secret if use_sr: args['add_partition'] = 'true' - task = yield session.async_call_plugin('objectstore', fn, args) - uuid = yield session.wait_for_task(task) - defer.returnValue(uuid) + task = session.async_call_plugin('objectstore', fn, args) + uuid = session.wait_for_task(task) + return uuid @classmethod - @utils.deferredToThread def lookup(cls, session, i): """ Look the instance i up, and returns it if available """ return VMHelper.lookup_blocking(session, i) @@ -179,7 +174,6 @@ class VMHelper(): return vms[0] @classmethod - @utils.deferredToThread def lookup_vm_vdis(cls, session, vm): """ Look for the VDIs that are attached to the VM """ return VMHelper.lookup_vm_vdis_blocking(session, vm) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index dacf9fe2b..96d211cc0 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -167,7 +167,6 @@ class XenAPISession(object): self.get_xenapi_host(), plugin, fn, args) def wait_for_task(self, task): ->>>>>>> MERGE-SOURCE """Return a Deferred that will give the result of the given task. The task is polled until it completes.""" -- cgit From 5f72a004dee0cb8de3f2daee1976fa978f6e51f3 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 9 Dec 2010 16:41:35 +0000 Subject: pause from compute.manager <-> xenapi --- nova/virt/xenapi/vmops.py | 20 ++++++++++++++++++++ nova/virt/xenapi_conn.py | 8 ++++++++ 2 files changed, 28 insertions(+) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index d36cdaea5..353e83873 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -120,6 +120,26 @@ class VMOps(object): except XenAPI.Failure, exc: logging.warn(exc) + @defer.inlineCallbacks + def pause(self, instance): + """ Pause VM instance """ + instance_name = instance.name + vm = yield VMHelper.lookup(self._session, instance_name) + if vm is None: + raise Exception('instance not present %s' % instance_name) + task = yield self._session.call_xenapi('Async.VM.pause', vm) + yield self._session.wait_for_task(task) + + @defer.inlineCallbacks + def unpause(self, instance): + """ Unpause VM instance """ + instance_name = instance.name + vm = yield VMHelper.lookup(self._session, instance_name) + if vm is None: + raise Exception('instance not present %s' % instance_name) + task = yield self._session.call_xenapi('Async.VM.unpause', vm) + yield self._session.wait_for_task(task) + def get_info(self, instance_id): """ Return data about VM instance """ vm = VMHelper.lookup_blocking(self._session, instance_id) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 26b30bf92..df405e75f 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -122,6 +122,14 @@ class XenAPIConnection(object): """ Destroy VM instance """ self._vmops.destroy(instance) + def pause(self, instance): + """ Pause VM instance """ + self._vmops.pause(instance) + + def unpause(self, instance): + """ Unpause paused VM instance """ + self._vmops.unpause(instance) + def get_info(self, instance_id): """ Return data about VM instance """ return self._vmops.get_info(instance_id) -- cgit From af5c175dbc77048fb74311bf92569866676eee9c Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Thu, 9 Dec 2010 16:18:52 -0800 Subject: removed a few more references to twisted --- nova/virt/images.py | 2 +- nova/virt/xenapi/network_utils.py | 9 ++---- nova/virt/xenapi/vmops.py | 61 ++++++++++++++++++--------------------- 3 files changed, 32 insertions(+), 40 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/images.py b/nova/virt/images.py index 4d7c65f12..1c9b2e093 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -50,7 +50,7 @@ def _fetch_s3_image(image, path, user, project): # This should probably move somewhere else, like e.g. a download_as # method on User objects and at the same time get rewritten to use - # twisted web client. + # a web client. headers = {} headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py index 8cb4cce3a..d8632f393 100644 --- a/nova/virt/xenapi/network_utils.py +++ b/nova/virt/xenapi/network_utils.py @@ -20,8 +20,6 @@ records and their attributes like bridges, PIFs, QoS, as well as their lookup functions. """ -from twisted.internet import defer - class NetworkHelper(): """ @@ -31,14 +29,13 @@ class NetworkHelper(): return @classmethod - @defer.inlineCallbacks def find_network_with_bridge(cls, session, bridge): """ Return the network on which the bridge is attached, if found """ expr = 'field "bridge" = "%s"' % bridge - networks = yield session.call_xenapi('network.get_all_records_where', - expr) + networks = session.call_xenapi('network.get_all_records_where', + expr) if len(networks) == 1: - defer.returnValue(networks.keys()[0]) + return networks.keys()[0] elif len(networks) > 1: raise Exception('Found non-unique network for bridge %s' % bridge) else: diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index d36cdaea5..0223e512a 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -20,8 +20,6 @@ Management class for VM-related functions (spawn, reboot, etc). import logging -from twisted.internet import defer - from nova import db from nova import context from nova.auth.manager import AuthManager @@ -46,10 +44,9 @@ class VMOps(object): return [self._session.get_xenapi().VM.get_name_label(vm) \ for vm in self._session.get_xenapi().VM.get_all()] - @defer.inlineCallbacks def spawn(self, instance): """ Create VM instance """ - vm = yield VMHelper.lookup(self._session, instance.name) + vm = VMHelper.lookup(self._session, instance.name) if vm is not None: raise Exception('Attempted to create non-unique name %s' % instance.name) @@ -57,66 +54,64 @@ class VMOps(object): bridge = db.project_get_network(context.get_admin_context(), instance.project_id).bridge network_ref = \ - yield NetworkHelper.find_network_with_bridge(self._session, bridge) + NetworkHelper.find_network_with_bridge(self._session, bridge) user = AuthManager().get_user(instance.user_id) project = AuthManager().get_project(instance.project_id) - vdi_uuid = yield VMHelper.fetch_image(self._session, - instance.image_id, user, project, True) - kernel = yield VMHelper.fetch_image(self._session, - instance.kernel_id, user, project, False) - ramdisk = yield VMHelper.fetch_image(self._session, - instance.ramdisk_id, user, project, False) - vdi_ref = yield self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) - vm_ref = yield VMHelper.create_vm(self._session, - instance, kernel, ramdisk) - yield VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True) + vdi_uuid = VMHelper.fetch_image( + self._session, instance.image_id, user, project, True) + kernel = VMHelper.fetch_image( + self._session, instance.kernel_id, user, project, False) + ramdisk = VMHelper.fetch_image( + self._session, instance.ramdisk_id, user, project, False) + vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) + vm_ref = VMHelper.create_vm( + self._session, instance, kernel, ramdisk) + VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True) if network_ref: - yield VMHelper.create_vif(self._session, vm_ref, - network_ref, instance.mac_address) + VMHelper.create_vif(self._session, vm_ref, + network_ref, instance.mac_address) logging.debug('Starting VM %s...', vm_ref) - yield self._session.call_xenapi('VM.start', vm_ref, False, False) + self._session.call_xenapi('VM.start', vm_ref, False, False) logging.info('Spawning VM %s created %s.', instance.name, vm_ref) - @defer.inlineCallbacks def reboot(self, instance): """ Reboot VM instance """ instance_name = instance.name - vm = yield VMHelper.lookup(self._session, instance_name) + vm = VMHelper.lookup(self._session, instance_name) if vm is None: raise Exception('instance not present %s' % instance_name) - task = yield self._session.call_xenapi('Async.VM.clean_reboot', vm) - yield self._session.wait_for_task(task) + task = self._session.call_xenapi('Async.VM.clean_reboot', vm) + self._session.wait_for_task(task) - @defer.inlineCallbacks def destroy(self, instance): """ Destroy VM instance """ - vm = yield VMHelper.lookup(self._session, instance.name) + vm = VMHelper.lookup(self._session, instance.name) if vm is None: # Don't complain, just return. This lets us clean up instances # that have already disappeared from the underlying platform. - defer.returnValue(None) + return # Get the VDIs related to the VM - vdis = yield VMHelper.lookup_vm_vdis(self._session, vm) + vdis = VMHelper.lookup_vm_vdis(self._session, vm) try: - task = yield self._session.call_xenapi('Async.VM.hard_shutdown', + task = self._session.call_xenapi('Async.VM.hard_shutdown', vm) - yield self._session.wait_for_task(task) + self._session.wait_for_task(task) except XenAPI.Failure, exc: logging.warn(exc) # Disk clean-up if vdis: for vdi in vdis: try: - task = yield self._session.call_xenapi('Async.VDI.destroy', - vdi) - yield self._session.wait_for_task(task) + task = self._session.call_xenapi('Async.VDI.destroy', + vdi) + self._session.wait_for_task(task) except XenAPI.Failure, exc: logging.warn(exc) try: - task = yield self._session.call_xenapi('Async.VM.destroy', vm) - yield self._session.wait_for_task(task) + task = self._session.call_xenapi('Async.VM.destroy', vm) + self._session.wait_for_task(task) except XenAPI.Failure, exc: logging.warn(exc) -- cgit From f7862f6d212d52e09d2a3a076762c936618cf061 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 10 Dec 2010 17:55:21 +0000 Subject: added pause and unpause to fake connection --- nova/virt/fake.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'nova/virt') diff --git a/nova/virt/fake.py b/nova/virt/fake.py index f855523d3..4526f0042 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -133,6 +133,18 @@ class FakeConnection(object): """ return defer.succeed(None) + def pause(self, instance): + """ + Pause the specified instance. + """ + return defer.succeed(None) + + def unpause(self, instance): + """ + Unpause the specified instance. + """ + return defer.succeed(None) + def destroy(self, instance): """ Destroy (shutdown and delete) the specified instance. -- cgit From 2a5ad56319dfdf75bf2eab1337032f035822f272 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Fri, 10 Dec 2010 18:37:17 +0000 Subject: There is always the odd change that one forgets! --- nova/virt/xenapi_conn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index b82862764..4ace6da14 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -65,7 +65,7 @@ from nova.virt.xenapi.volumeops import VolumeOps FLAGS = flags.FLAGS flags.DEFINE_boolean('xenapi_use_fake_session', - True, + False, 'Set to true in order to use the fake XenAPI SDK') flags.DEFINE_string('xenapi_connection_url', None, -- cgit From c835c441981a17764931390bc1ace6121ab100a4 Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Fri, 10 Dec 2010 11:53:17 -0800 Subject: port new patches --- nova/virt/xenapi/vmops.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 13871b479..b6b92b926 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -126,14 +126,13 @@ class VMOps(object): rec = self._session.get_xenapi().VM.get_record(vm) return VMHelper.compile_info(rec) - @defer.inlineCallbacks def get_diagnostics(self, instance_id): """Return data about VM diagnostics""" - vm = yield VMHelper.lookup(self._session, instance_id) + vm = VMHelper.lookup(self._session, instance_id) if vm is None: raise Exception("instance not present %s" % instance_id) - rec = yield self._session.get_xenapi().VM.get_record(vm) - defer.returnValue(VMHelper.compile_diagnostics(self._session, rec)) + rec = self._session.get_xenapi().VM.get_record(vm) + return VMHelper.compile_diagnostics(self._session, rec) def get_console_output(self, instance): """ Return snapshot of console """ -- cgit From c00d99102c826f6a501ff7a530291dc8d7680df7 Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Sat, 11 Dec 2010 15:42:05 -0500 Subject: Final round of marking translation strings --- nova/virt/connection.py | 2 +- nova/virt/fake.py | 3 ++- nova/virt/libvirt_conn.py | 41 ++++++++++++++++++++++------------------- nova/virt/xenapi_conn.py | 16 ++++++++-------- 4 files changed, 33 insertions(+), 29 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/connection.py b/nova/virt/connection.py index c40bb4bb4..61e99944e 100644 --- a/nova/virt/connection.py +++ b/nova/virt/connection.py @@ -66,6 +66,6 @@ def get_connection(read_only=False): raise Exception('Unknown connection type "%s"' % t) if conn is None: - logging.error('Failed to open connection to the hypervisor') + logging.error(_('Failed to open connection to the hypervisor')) sys.exit(1) return conn diff --git a/nova/virt/fake.py b/nova/virt/fake.py index f855523d3..26b01af91 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -167,7 +167,8 @@ class FakeConnection(object): knowledge of the instance """ if instance_name not in self.instances: - raise exception.NotFound("Instance %s Not Found" % instance_name) + raise exception.NotFound(_("Instance %s Not Found") + % instance_name) i = self.instances[instance_name] return {'state': i._state, 'max_mem': 0, diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 18085089f..bc435f4b5 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -132,7 +132,7 @@ class LibvirtConnection(object): @property def _conn(self): if not self._wrapped_conn or not self._test_connection(): - logging.debug('Connecting to libvirt: %s' % self.libvirt_uri) + logging.debug(_('Connecting to libvirt: %s') % self.libvirt_uri) self._wrapped_conn = self._connect(self.libvirt_uri, self.read_only) return self._wrapped_conn @@ -144,7 +144,7 @@ class LibvirtConnection(object): except libvirt.libvirtError as e: if e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and \ e.get_error_domain() == libvirt.VIR_FROM_REMOTE: - logging.debug('Connection to libvirt broke') + logging.debug(_('Connection to libvirt broke')) return False raise @@ -214,7 +214,7 @@ class LibvirtConnection(object): def _cleanup(self, instance): target = os.path.join(FLAGS.instances_path, instance['name']) - logging.info('instance %s: deleting instance files %s', + logging.info(_('instance %s: deleting instance files %s'), instance['name'], target) if os.path.exists(target): shutil.rmtree(target) @@ -259,7 +259,7 @@ class LibvirtConnection(object): mount_device = mountpoint.rpartition("/")[2] xml = self._get_disk_xml(virt_dom.XMLDesc(0), mount_device) if not xml: - raise exception.NotFound("No disk at %s" % mount_device) + raise exception.NotFound(_("No disk at %s") % mount_device) virt_dom.detachDevice(xml) yield @@ -279,11 +279,11 @@ class LibvirtConnection(object): db.instance_set_state(context.get_admin_context(), instance['id'], state) if state == power_state.RUNNING: - logging.debug('instance %s: rebooted', instance['name']) + logging.debug(_('instance %s: rebooted'), instance['name']) timer.stop() d.callback(None) except Exception, exn: - logging.error('_wait_for_reboot failed: %s', exn) + logging.error(_('_wait_for_reboot failed: %s'), exn) db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) @@ -314,11 +314,11 @@ class LibvirtConnection(object): state = self.get_info(instance['name'])['state'] db.instance_set_state(None, instance['id'], state) if state == power_state.RUNNING: - logging.debug('instance %s: rescued', instance['name']) + logging.debug(_('instance %s: rescued'), instance['name']) timer.stop() d.callback(None) except Exception, exn: - logging.error('_wait_for_rescue failed: %s', exn) + logging.error(_('_wait_for_rescue failed: %s'), exn) db.instance_set_state(None, instance['id'], power_state.SHUTDOWN) @@ -348,7 +348,7 @@ class LibvirtConnection(object): setup_nwfilters_for_instance(instance) yield self._create_image(instance, xml) yield self._conn.createXML(xml, 0) - logging.debug("instance %s: is running", instance['name']) + logging.debug(_("instance %s: is running"), instance['name']) local_d = defer.Deferred() timer = task.LoopingCall(f=None) @@ -359,11 +359,11 @@ class LibvirtConnection(object): db.instance_set_state(context.get_admin_context(), instance['id'], state) if state == power_state.RUNNING: - logging.debug('instance %s: booted', instance['name']) + logging.debug(_('instance %s: booted'), instance['name']) timer.stop() local_d.callback(None) except: - logging.exception('instance %s: failed to boot', + logging.exception(_('instance %s: failed to boot'), instance['name']) db.instance_set_state(context.get_admin_context(), instance['id'], @@ -379,7 +379,7 @@ class LibvirtConnection(object): virsh_output = virsh_output[0].strip() if virsh_output.startswith('/dev/'): - logging.info('cool, it\'s a device') + logging.info(_('cool, it\'s a device')) d = process.simple_execute("sudo dd if=%s iflag=nonblock" % virsh_output, check_exit_code=False) d.addCallback(lambda r: r[0]) @@ -388,7 +388,7 @@ class LibvirtConnection(object): return '' def _append_to_file(self, data, fpath): - logging.info('data: %r, fpath: %r' % (data, fpath)) + logging.info(_('data: %r, fpath: %r') % (data, fpath)) fp = open(fpath, 'a+') fp.write(data) return fpath @@ -431,7 +431,7 @@ class LibvirtConnection(object): # TODO(termie): these are blocking calls, it would be great # if they weren't. - logging.info('instance %s: Creating image', inst['name']) + logging.info(_('instance %s: Creating image'), inst['name']) f = open(basepath('libvirt.xml'), 'w') f.write(libvirt_xml) f.close() @@ -477,10 +477,10 @@ class LibvirtConnection(object): 'dns': network_ref['dns']} if key or net: if key: - logging.info('instance %s: injecting key into image %s', + logging.info(_('instance %s: injecting key into image %s'), inst['name'], inst.image_id) if net: - logging.info('instance %s: injecting net into image %s', + logging.info(_('instance %s: injecting net into image %s'), inst['name'], inst.image_id) yield disk.inject_data(basepath('disk-raw'), key, net, execute=execute) @@ -504,7 +504,8 @@ class LibvirtConnection(object): def to_xml(self, instance, rescue=False): # TODO(termie): cache? - logging.debug('instance %s: starting toXML method', instance['name']) + logging.debug(_('instance %s: starting toXML method'), + instance['name']) network = db.project_get_network(context.get_admin_context(), instance['project_id']) # FIXME(vish): stick this in db @@ -528,7 +529,8 @@ class LibvirtConnection(object): libvirt_xml = self.rescue_xml % xml_info else: libvirt_xml = self.libvirt_xml % xml_info - logging.debug('instance %s: finished toXML method', instance['name']) + logging.debug(_('instance %s: finished toXML method'), + instance['name']) return libvirt_xml @@ -536,7 +538,8 @@ class LibvirtConnection(object): try: virt_dom = self._conn.lookupByName(instance_name) except: - raise exception.NotFound("Instance %s not found" % instance_name) + raise exception.NotFound(_("Instance %s not found") + % instance_name) (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info() return {'state': state, 'max_mem': max_mem, diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 2153810c8..c8c451dfe 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -92,10 +92,10 @@ def get_connection(_): username = FLAGS.xenapi_connection_username password = FLAGS.xenapi_connection_password if not url or password is None: - raise Exception('Must specify xenapi_connection_url, ' - 'xenapi_connection_username (optionally), and ' - 'xenapi_connection_password to use ' - 'connection_type=xenapi') + raise Exception(_('Must specify xenapi_connection_url, ' + 'xenapi_connection_username (optionally), and ' + 'xenapi_connection_password to use ' + 'connection_type=xenapi')) return XenAPIConnection(url, username, password) @@ -195,11 +195,11 @@ class XenAPISession(object): self._poll_task, task, deferred) elif status == 'success': result = self._session.xenapi.task.get_result(task) - logging.info('Task %s status: success. %s', task, result) + logging.info(_('Task %s status: success. %s'), task, result) deferred.callback(_parse_xmlrpc_value(result)) else: error_info = self._session.xenapi.task.get_error_info(task) - logging.warn('Task %s status: %s. %s', task, status, + logging.warn(_('Task %s status: %s. %s'), task, status, error_info) deferred.errback(XenAPI.Failure(error_info)) #logging.debug('Polling task %s done.', task) @@ -213,7 +213,7 @@ def _unwrap_plugin_exceptions(func, *args, **kwargs): try: return func(*args, **kwargs) except XenAPI.Failure, exc: - logging.debug("Got exception: %s", exc) + logging.debug(_("Got exception: %s"), exc) if (len(exc.details) == 4 and exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and exc.details[2] == 'Failure'): @@ -226,7 +226,7 @@ def _unwrap_plugin_exceptions(func, *args, **kwargs): else: raise except xmlrpclib.ProtocolError, exc: - logging.debug("Got exception: %s", exc) + logging.debug(_("Got exception: %s"), exc) raise -- cgit From 669d5f5612840c9ed6449d91ee5aae97842cac72 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 13 Dec 2010 18:22:56 +0000 Subject: second round for unit testing framework --- nova/virt/xenapi/__init__.py | 5 +++ nova/virt/xenapi/fake.py | 75 ++++++++++++++++++++++++++++++++++++++++++-- nova/virt/xenapi/vmops.py | 3 +- nova/virt/xenapi_conn.py | 2 +- 4 files changed, 80 insertions(+), 5 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/__init__.py b/nova/virt/xenapi/__init__.py index 3d598c463..d9abe54c5 100644 --- a/nova/virt/xenapi/__init__.py +++ b/nova/virt/xenapi/__init__.py @@ -13,3 +13,8 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + +""" +:mod:`xenapi` -- Nova support for XenServer and XCP through XenAPI +================================================================== +""" diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 75ff587e1..3a01f9c3d 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -1,4 +1,6 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 +from twisted.web.domhelpers import _get +from aptdaemon.defer import defer # Copyright (c) 2010 Citrix Systems, Inc. # @@ -33,15 +35,15 @@ class Failure(Exception): class FakeXenAPISession(object): """ The session to invoke XenAPI SDK calls """ def __init__(self): - pass + self.fail_next_call = False def get_xenapi(self): """ Return the xenapi object """ - raise NotImplementedError() + return self def get_xenapi_host(self): """ Return the xenapi host """ - raise NotImplementedError() + return 'FAKE_XENAPI_HOST' def call_xenapi(self, method, *args): """Call the specified XenAPI method on a background thread. Returns @@ -57,3 +59,70 @@ class FakeXenAPISession(object): """Return a Deferred that will give the result of the given task. The task is polled until it completes.""" raise NotImplementedError() + + def __getattr__(self, name): + return FakeXenAPIObject(name, self) + + +class FakeXenAPIObject(object): + def __init__(self, name, session): + self.name = name + self.session = session + self.FAKE_REF = 'FAKE_REFERENCE_%s' % name + + def get_by_name_label(self, label): + if label is None: + return '' # 'No object found' + else: + return 'FAKE_OBJECT_%s_%s' % (self.name, label) + + def getter(self, *args): + self._check_fail() + return self.FAKE_REF + + def ref_list(self, *args): + self._check_fail() + return [FakeXenAPIRecord()] + + def __getattr__(self, name): + if name == 'create': + return self._create + elif name == 'get_record': + return self._record + elif name == 'introduce': + return self._introduce + elif name.startswith('get_'): + getter = 'get_%s' % self.name + if name == getter: + return self.getter + else: + child = name[name.find('_') + 1:] + if child.endswith('s'): + return FakeXenAPIObject(child[:-1], self.session).ref_list + else: + return FakeXenAPIObject(child, self.session).getter + + def _create(self, *args): + self._check_fail() + return self.FAKE_REF + + def _record(self, *args): + self._check_fail() + return FakeXenAPIRecord() + + def _introduce(self, *args): + self._check_fail() + pass + + def _check_fail(self): + if self.session.fail_next_call: + self.session.fail_next_call = False # Reset! + raise Failure('Unable to create %s' % self.name) + + +class FakeXenAPIRecord(dict): + def __init__(self): + pass + + def __getitem__(self, attr): + return '' diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index a5d923a3b..9a8db0ad4 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -24,6 +24,7 @@ from twisted.internet import defer from nova import db from nova import context +from nova import exception from nova.auth.manager import AuthManager from nova.virt.xenapi.network_utils import NetworkHelper @@ -123,7 +124,7 @@ class VMOps(object): """ Return data about VM instance """ vm = VMHelper.lookup_blocking(self._session, instance_id) if vm is None: - raise Exception('instance not present %s' % instance_id) + raise exception.NotFound('Instance not found %s' % instance_id) rec = self._session.get_xenapi().VM.get_record(vm) return VMHelper.compile_info(rec) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 4ace6da14..d8d21e24d 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -91,7 +91,7 @@ flags.DEFINE_string('target_port', '3260', 'iSCSI Target Port, 3260 Default') flags.DEFINE_string('iqn_prefix', - 'iqn.2010-10.org.openstack', + 'iqn.2010-12.org.openstack', 'IQN Prefix') -- cgit From fe667352c3e25c744a989ca45f4f9ed472778ae3 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 13 Dec 2010 18:43:24 +0000 Subject: removing imports that should have not been there --- nova/virt/xenapi/fake.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 3a01f9c3d..2fed28609 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -1,6 +1,4 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -from twisted.web.domhelpers import _get -from aptdaemon.defer import defer # Copyright (c) 2010 Citrix Systems, Inc. # @@ -89,8 +87,10 @@ class FakeXenAPIObject(object): return self._create elif name == 'get_record': return self._record - elif name == 'introduce': - return self._introduce + elif name == 'introduce' or\ + name == 'forget' or\ + name == 'unplug': + return self._fake_action elif name.startswith('get_'): getter = 'get_%s' % self.name if name == getter: @@ -110,7 +110,7 @@ class FakeXenAPIObject(object): self._check_fail() return FakeXenAPIRecord() - def _introduce(self, *args): + def _fake_action(self, *args): self._check_fail() pass -- cgit From e30801445f8b543d78494ca63be60f85b94d3a53 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Mon, 13 Dec 2010 20:31:33 +0000 Subject: moving xenapi unittests changes into another branch --- nova/virt/xenapi/fake.py | 128 ----------------------------------------------- 1 file changed, 128 deletions(-) delete mode 100644 nova/virt/xenapi/fake.py (limited to 'nova/virt') diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py deleted file mode 100644 index 2fed28609..000000000 --- a/nova/virt/xenapi/fake.py +++ /dev/null @@ -1,128 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2010 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -A fake XenAPI SDK. - -Allows for xenapi helper classes testing. -""" - - -class Failure(Exception): - def __init__(self, message=None): - super(Failure, self).__init__(message) - self.details = [] - - def __str__(self): - return 'Fake XenAPI Exception' - - -class FakeXenAPISession(object): - """ The session to invoke XenAPI SDK calls """ - def __init__(self): - self.fail_next_call = False - - def get_xenapi(self): - """ Return the xenapi object """ - return self - - def get_xenapi_host(self): - """ Return the xenapi host """ - return 'FAKE_XENAPI_HOST' - - def call_xenapi(self, method, *args): - """Call the specified XenAPI method on a background thread. Returns - a Deferred for the result.""" - raise NotImplementedError() - - def async_call_plugin(self, plugin, fn, args): - """Call Async.host.call_plugin on a background thread. Returns a - Deferred with the task reference.""" - raise NotImplementedError() - - def wait_for_task(self, task): - """Return a Deferred that will give the result of the given task. - The task is polled until it completes.""" - raise NotImplementedError() - - def __getattr__(self, name): - return FakeXenAPIObject(name, self) - - -class FakeXenAPIObject(object): - def __init__(self, name, session): - self.name = name - self.session = session - self.FAKE_REF = 'FAKE_REFERENCE_%s' % name - - def get_by_name_label(self, label): - if label is None: - return '' # 'No object found' - else: - return 'FAKE_OBJECT_%s_%s' % (self.name, label) - - def getter(self, *args): - self._check_fail() - return self.FAKE_REF - - def ref_list(self, *args): - self._check_fail() - return [FakeXenAPIRecord()] - - def __getattr__(self, name): - if name == 'create': - return self._create - elif name == 'get_record': - return self._record - elif name == 'introduce' or\ - name == 'forget' or\ - name == 'unplug': - return self._fake_action - elif name.startswith('get_'): - getter = 'get_%s' % self.name - if name == getter: - return self.getter - else: - child = name[name.find('_') + 1:] - if child.endswith('s'): - return FakeXenAPIObject(child[:-1], self.session).ref_list - else: - return FakeXenAPIObject(child, self.session).getter - - def _create(self, *args): - self._check_fail() - return self.FAKE_REF - - def _record(self, *args): - self._check_fail() - return FakeXenAPIRecord() - - def _fake_action(self, *args): - self._check_fail() - pass - - def _check_fail(self): - if self.session.fail_next_call: - self.session.fail_next_call = False # Reset! - raise Failure('Unable to create %s' % self.name) - - -class FakeXenAPIRecord(dict): - def __init__(self): - pass - - def __getitem__(self, attr): - return '' -- cgit From 1395d31badc43bdce036e8da3927af22a22ca91e Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Mon, 13 Dec 2010 18:25:17 -0800 Subject: Fixed power state update with Twisted callback --- nova/virt/xenapi/vmops.py | 12 ++++++++---- nova/virt/xenapi_conn.py | 8 ++++---- 2 files changed, 12 insertions(+), 8 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 353e83873..03ee3dd58 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -121,24 +121,28 @@ class VMOps(object): logging.warn(exc) @defer.inlineCallbacks - def pause(self, instance): + def pause(self, instance, callback): """ Pause VM instance """ instance_name = instance.name vm = yield VMHelper.lookup(self._session, instance_name) if vm is None: raise Exception('instance not present %s' % instance_name) task = yield self._session.call_xenapi('Async.VM.pause', vm) - yield self._session.wait_for_task(task) + deferred = self._session.wait_for_task(task) + deferred.addCallback(callback) + yield deferred @defer.inlineCallbacks - def unpause(self, instance): + def unpause(self, instance, callback): """ Unpause VM instance """ instance_name = instance.name vm = yield VMHelper.lookup(self._session, instance_name) if vm is None: raise Exception('instance not present %s' % instance_name) task = yield self._session.call_xenapi('Async.VM.unpause', vm) - yield self._session.wait_for_task(task) + deferred = self._session.wait_for_task(task) + deferred.addCallback(callback) + yield deferred def get_info(self, instance_id): """ Return data about VM instance """ diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index df405e75f..bcfb48323 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -122,13 +122,13 @@ class XenAPIConnection(object): """ Destroy VM instance """ self._vmops.destroy(instance) - def pause(self, instance): + def pause(self, instance, callback): """ Pause VM instance """ - self._vmops.pause(instance) + self._vmops.pause(instance, callback) - def unpause(self, instance): + def unpause(self, instance, callback): """ Unpause paused VM instance """ - self._vmops.unpause(instance) + self._vmops.unpause(instance, callback) def get_info(self, instance_id): """ Return data about VM instance """ -- cgit From 99ba9bc7c4cd7bdb085e76a8f926ade27d558a84 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Tue, 14 Dec 2010 02:43:15 -0400 Subject: added callback param to fake_conn --- nova/virt/fake.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 4526f0042..c56907175 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -133,13 +133,13 @@ class FakeConnection(object): """ return defer.succeed(None) - def pause(self, instance): + def pause(self, instance, callback): """ Pause the specified instance. """ return defer.succeed(None) - def unpause(self, instance): + def unpause(self, instance, callback): """ Unpause the specified instance. """ -- cgit From 6e37cf42d758b5040442d9c296b21955d10a7327 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Tue, 14 Dec 2010 11:48:29 +0000 Subject: final cleanup, after moving unittest work into another branch --- nova/virt/xenapi/__init__.py | 30 ++++++++++++++++++ nova/virt/xenapi/fake.py | 66 +++++++++++++++++++++++++++++++++++++++ nova/virt/xenapi/network_utils.py | 3 +- nova/virt/xenapi/vm_utils.py | 19 ++--------- nova/virt/xenapi/vmops.py | 6 ++-- nova/virt/xenapi/volume_utils.py | 20 ++---------- nova/virt/xenapi/volumeops.py | 8 +++-- nova/virt/xenapi_conn.py | 10 ++++-- 8 files changed, 118 insertions(+), 44 deletions(-) create mode 100644 nova/virt/xenapi/fake.py (limited to 'nova/virt') diff --git a/nova/virt/xenapi/__init__.py b/nova/virt/xenapi/__init__.py index d9abe54c5..1a2903b98 100644 --- a/nova/virt/xenapi/__init__.py +++ b/nova/virt/xenapi/__init__.py @@ -18,3 +18,33 @@ :mod:`xenapi` -- Nova support for XenServer and XCP through XenAPI ================================================================== """ + + +def load_sdk(flags): + """ + This method is used for loading the XenAPI SDK (fake or real) + """ + xenapi_module = \ + flags.xenapi_use_fake_session and 'nova.virt.xenapi.fake' or 'XenAPI' + from_list = \ + flags.xenapi_use_fake_session and ['fake'] or [] + + return __import__(xenapi_module, globals(), locals(), from_list, -1) + + +class HelperBase(): + """ + The class that wraps the helper methods together. + """ + XenAPI = None + + def __init__(self): + return + + @classmethod + def late_import(cls, FLAGS): + """ + Load XenAPI module in for helper class + """ + if cls.XenAPI is None: + cls.XenAPI = load_sdk(FLAGS) diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py new file mode 100644 index 000000000..8d6a39a87 --- /dev/null +++ b/nova/virt/xenapi/fake.py @@ -0,0 +1,66 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +A fake XenAPI SDK. + +Allows for xenapi helper classes testing. +""" + + +class Failure(Exception): + def __init__(self, message=None): + super(Failure, self).__init__(message) + self.details = [] + + def __str__(self): + return 'Fake XenAPI Exception' + + +class FakeSession(object): + """ + The session to invoke XenAPI SDK calls. + FIXME(armando): this is a placeholder + for the xenapi unittests branch. + """ + def __init__(self, url): + pass + + def get_xenapi(self): + """ Return the xenapi object """ + raise NotImplementedError() + + def get_xenapi_host(self): + """ Return the xenapi host """ + raise NotImplementedError() + + def call_xenapi(self, method, *args): + """Call the specified XenAPI method on a background thread. Returns + a Deferred for the result.""" + raise NotImplementedError() + + def async_call_plugin(self, plugin, fn, args): + """Call Async.host.call_plugin on a background thread. Returns a + Deferred with the task reference.""" + raise NotImplementedError() + + def wait_for_task(self, task): + """Return a Deferred that will give the result of the given task. + The task is polled until it completes.""" + raise NotImplementedError() + + def __getattr__(self, name): + raise NotImplementedError() diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py index 8cb4cce3a..cb745cdaf 100644 --- a/nova/virt/xenapi/network_utils.py +++ b/nova/virt/xenapi/network_utils.py @@ -21,9 +21,10 @@ their lookup functions. """ from twisted.internet import defer +from nova.virt.xenapi import HelperBase -class NetworkHelper(): +class NetworkHelper(HelperBase): """ The class that wraps the helper methods together. """ diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 89831fe5d..2b0691c01 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -32,6 +32,7 @@ from nova.auth.manager import AuthManager from nova.compute import instance_types from nova.compute import power_state from nova.virt import images +from nova.virt.xenapi import HelperBase from nova.virt.xenapi.volume_utils import StorageError FLAGS = flags.FLAGS @@ -44,29 +45,13 @@ XENAPI_POWER_STATE = { 'Crashed': power_state.CRASHED} -class VMHelper(): +class VMHelper(HelperBase): """ The class that wraps the helper methods together. """ - - XenAPI = None - def __init__(self): return - @classmethod - def late_import(cls): - """ - Load XenAPI module in for helper class - """ - xenapi_module = \ - FLAGS.xenapi_use_fake_session and 'nova.virt.xenapi.fake' or 'XenAPI' - from_list = \ - FLAGS.xenapi_use_fake_session and ['fake'] or [] - if cls.XenAPI is None: - cls.XenAPI = __import__(xenapi_module, - globals(), locals(), from_list, -1) - @classmethod @defer.inlineCallbacks def create_vm(cls, session, instance, kernel, ramdisk): diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 9a8db0ad4..c79245972 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -24,9 +24,11 @@ from twisted.internet import defer from nova import db from nova import context +from nova import flags from nova import exception from nova.auth.manager import AuthManager +from nova.virt.xenapi import load_sdk from nova.virt.xenapi.network_utils import NetworkHelper from nova.virt.xenapi.vm_utils import VMHelper @@ -36,10 +38,10 @@ class VMOps(object): Management class for VM-related tasks """ def __init__(self, session): - self.XenAPI = __import__('XenAPI') + self.XenAPI = load_sdk(flags.FLAGS) self._session = session # Load XenAPI module in the helper class - VMHelper.late_import() + VMHelper.late_import(flags.FLAGS) def list_instances(self): """ List VM instances """ diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 05143ed91..4482e465c 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -30,7 +30,7 @@ from nova import context from nova import flags from nova import process from nova import utils - +from nova.virt.xenapi import HelperBase FLAGS = flags.FLAGS @@ -41,29 +41,13 @@ class StorageError(Exception): super(StorageError, self).__init__(message) -class VolumeHelper(): +class VolumeHelper(HelperBase): """ The class that wraps the helper methods together. """ - - XenAPI = None - def __init__(self): return - @classmethod - def late_import(cls): - """ - Load XenAPI module in for helper class - """ - xenapi_module = \ - FLAGS.xenapi_use_fake_session and 'nova.virt.xenapi.fake' or 'XenAPI' - from_list = \ - FLAGS.xenapi_use_fake_session and ['fake'] or [] - if cls.XenAPI is None: - cls.XenAPI = __import__(xenapi_module, - globals(), locals(), from_list, -1) - @classmethod @utils.deferredToThread def create_iscsi_storage(cls, session, info, label, description): diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index b1afdc811..1b337a6ed 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -21,6 +21,8 @@ import logging from twisted.internet import defer +from nova import flags +from nova.virt.xenapi import load_sdk from nova.virt.xenapi.vm_utils import VMHelper from nova.virt.xenapi.volume_utils import VolumeHelper from nova.virt.xenapi.volume_utils import StorageError @@ -31,11 +33,11 @@ class VolumeOps(object): Management class for Volume-related tasks """ def __init__(self, session): - self.XenAPI = __import__('XenAPI') + self.XenAPI = load_sdk(flags.FLAGS) self._session = session # Load XenAPI module in the helper classes respectively - VolumeHelper.late_import() - VMHelper.late_import() + VolumeHelper.late_import(flags.FLAGS) + VMHelper.late_import(flags.FLAGS) @defer.inlineCallbacks def attach_volume(self, instance_name, device_path, mountpoint): diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index d8d21e24d..649d5dd04 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -59,6 +59,7 @@ from twisted.internet import reactor from nova import utils from nova import flags +from nova.virt.xenapi import load_sdk from nova.virt.xenapi.vmops import VMOps from nova.virt.xenapi.volumeops import VolumeOps @@ -91,7 +92,7 @@ flags.DEFINE_string('target_port', '3260', 'iSCSI Target Port, 3260 Default') flags.DEFINE_string('iqn_prefix', - 'iqn.2010-12.org.openstack', + 'iqn.2010-10.org.openstack', 'IQN Prefix') @@ -156,8 +157,11 @@ class XenAPISession(object): def __init__(self, url, user, pw): # This is loaded late so that there's no need to install this # library when not using XenAPI. - self.XenAPI = __import__('XenAPI') - self._session = self.XenAPI.Session(url) + self.XenAPI = load_sdk(FLAGS) + if FLAGS.xenapi_use_fake_session: + self._session = self.XenAPI.FakeSession(url) + else: + self._session = self.XenAPI.Session(url) self._session.login_with_password(user, pw) def get_xenapi(self): -- cgit From bfe019e0de486eea09e4702262cd228791a4694c Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Tue, 14 Dec 2010 15:33:18 +0100 Subject: Now that we have a templating engine, let's use it. Consolidate all the libvirt templates into one, extending the unit tests to make sure I didn't mess up. --- nova/virt/libvirt.qemu.xml.template | 37 --------------- nova/virt/libvirt.rescue.qemu.xml.template | 37 --------------- nova/virt/libvirt.rescue.uml.xml.template | 26 ---------- nova/virt/libvirt.rescue.xen.xml.template | 34 ------------- nova/virt/libvirt.uml.xml.template | 26 ---------- nova/virt/libvirt.xen.xml.template | 35 -------------- nova/virt/libvirt.xml.template | 76 ++++++++++++++++++++++++++++++ nova/virt/libvirt_conn.py | 60 +++++------------------ 8 files changed, 89 insertions(+), 242 deletions(-) delete mode 100644 nova/virt/libvirt.qemu.xml.template delete mode 100644 nova/virt/libvirt.rescue.qemu.xml.template delete mode 100644 nova/virt/libvirt.rescue.uml.xml.template delete mode 100644 nova/virt/libvirt.rescue.xen.xml.template delete mode 100644 nova/virt/libvirt.uml.xml.template delete mode 100644 nova/virt/libvirt.xen.xml.template create mode 100644 nova/virt/libvirt.xml.template (limited to 'nova/virt') diff --git a/nova/virt/libvirt.qemu.xml.template b/nova/virt/libvirt.qemu.xml.template deleted file mode 100644 index 739eceaaa..000000000 --- a/nova/virt/libvirt.qemu.xml.template +++ /dev/null @@ -1,37 +0,0 @@ - - ${name} - - hvm -#if $getVar('kernel', None) - ${kernel} - #if $getVar('ramdisk', None) - ${ramdisk} - #end if - root=/dev/vda1 console=ttyS0 -#end if - - - - - ${memory_kb} - ${vcpus} - - - - - - - - - - - - - - - - - - - - diff --git a/nova/virt/libvirt.rescue.qemu.xml.template b/nova/virt/libvirt.rescue.qemu.xml.template deleted file mode 100644 index c0ffbdcee..000000000 --- a/nova/virt/libvirt.rescue.qemu.xml.template +++ /dev/null @@ -1,37 +0,0 @@ - - %(name)s - - hvm - %(basepath)s/rescue-kernel - %(basepath)s/rescue-ramdisk - root=/dev/vda1 console=ttyS0 - - - - - %(memory_kb)s - %(vcpus)s - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/nova/virt/libvirt.rescue.uml.xml.template b/nova/virt/libvirt.rescue.uml.xml.template deleted file mode 100644 index 836f47532..000000000 --- a/nova/virt/libvirt.rescue.uml.xml.template +++ /dev/null @@ -1,26 +0,0 @@ - - %(name)s - %(memory_kb)s - - %(type)s - /usr/bin/linux - /dev/ubda1 - - - - - - - - - - - - - - - - - - - diff --git a/nova/virt/libvirt.rescue.xen.xml.template b/nova/virt/libvirt.rescue.xen.xml.template deleted file mode 100644 index 3b8d27237..000000000 --- a/nova/virt/libvirt.rescue.xen.xml.template +++ /dev/null @@ -1,34 +0,0 @@ - - %(name)s - - linux - %(basepath)s/kernel - %(basepath)s/ramdisk - /dev/xvda1 - ro - - - - - %(memory_kb)s - %(vcpus)s - - - - - - - - - - - - - - - - - - - - diff --git a/nova/virt/libvirt.uml.xml.template b/nova/virt/libvirt.uml.xml.template deleted file mode 100644 index da9588049..000000000 --- a/nova/virt/libvirt.uml.xml.template +++ /dev/null @@ -1,26 +0,0 @@ - - ${name} - ${memory_kb} - - ${type} - /usr/bin/linux - /dev/ubda1 - - - - - - - - - - - - - - - - - - - diff --git a/nova/virt/libvirt.xen.xml.template b/nova/virt/libvirt.xen.xml.template deleted file mode 100644 index 8f650e512..000000000 --- a/nova/virt/libvirt.xen.xml.template +++ /dev/null @@ -1,35 +0,0 @@ - - ${name} - - linux -#if $getVar('kernel', None) - ${kernel} - #if $getVar('ramdisk', None) - ${ramdisk} - #end if - root=/dev/vda1 console=ttyS0 -#end if - /dev/xvda1 - ro - - - - - ${memory_kb} - ${vcpus} - - - - - - - - - - - - - - - - diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template new file mode 100644 index 000000000..13d087330 --- /dev/null +++ b/nova/virt/libvirt.xml.template @@ -0,0 +1,76 @@ + + ${name} + ${memory_kb} + +#if $type == 'uml' + #set $disk_prefix = 'ubd' + #set $disk_bus = 'uml' + uml + /usr/bin/linux + /dev/ubda1 +#else + #if $type == 'xen' + #set $disk_prefix = 'sd' + #set $disk_bus = 'scsi' + linux + /dev/xvda1 + #else + #set $disk_prefix = 'vd' + #set $disk_bus = 'virtio' + hvm + #end if + #if $getVar('rescue', False) + ${basepath}/rescue-kernel + ${basepath}/rescue-ramdisk + #else + #if $getVar('kernel', None) + ${kernel} + #if $type == 'xen' + ro + #else + root=/dev/vda1 console=ttyS0 + #end if + #if $getVar('ramdisk', None) + ${ramdisk} + #end if + #else + + #end if + #end if +#end if + + + + + ${vcpus} + +#if $getVar('rescue', False) + + + + + + + + +#else + + + + +#end if + + + + + + + + + + + + + + + diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 81dbbaad5..2865c18ac 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -27,12 +27,7 @@ Supports KVM, QEMU, UML, and XEN. :libvirt_type: Libvirt domain type. Can be kvm, qemu, uml, xen (default: kvm). :libvirt_uri: Override for the default libvirt URI (depends on libvirt_type). -:libvirt_xml_template: Libvirt XML Template (QEmu/KVM). -:libvirt_xen_xml_template: Libvirt XML Template (Xen). -:libvirt_uml_xml_template: Libvirt XML Template (User Mode Linux). -:libvirt_rescue_xml_template: XML template for rescue mode (KVM & QEMU). -:libvirt_rescue_xen_xml_template: XML templage for rescue mode (XEN). -:libvirt_rescue_uml_xml_template: XML template for rescue mode (UML). +:libvirt_xml_template: Libvirt XML Template. :rescue_image_id: Rescue ami image (default: ami-rescue). :rescue_kernel_id: Rescue aki image (default: aki-rescue). :rescue_ramdisk_id: Rescue ari image (default: ari-rescue). @@ -70,31 +65,13 @@ libxml2 = None FLAGS = flags.FLAGS -flags.DEFINE_string('libvirt_rescue_xml_template', - utils.abspath('virt/libvirt.rescue.qemu.xml.template'), - 'Libvirt RESCUE XML Template for QEmu/KVM') -flags.DEFINE_string('libvirt_rescue_xen_xml_template', - utils.abspath('virt/libvirt.rescue.xen.xml.template'), - 'Libvirt RESCUE XML Template for xen') -flags.DEFINE_string('libvirt_rescue_uml_xml_template', - utils.abspath('virt/libvirt.rescue.uml.xml.template'), - 'Libvirt RESCUE XML Template for user-mode-linux') # TODO(vish): These flags should probably go into a shared location flags.DEFINE_string('rescue_image_id', 'ami-rescue', 'Rescue ami image') flags.DEFINE_string('rescue_kernel_id', 'aki-rescue', 'Rescue aki image') flags.DEFINE_string('rescue_ramdisk_id', 'ari-rescue', 'Rescue ari image') flags.DEFINE_string('libvirt_xml_template', - utils.abspath('virt/libvirt.qemu.xml.template'), - 'Libvirt XML Template for QEmu/KVM') -flags.DEFINE_string('libvirt_xen_xml_template', - utils.abspath('virt/libvirt.xen.xml.template'), - 'Libvirt XML Template for Xen') -flags.DEFINE_string('libvirt_uml_xml_template', - utils.abspath('virt/libvirt.uml.xml.template'), - 'Libvirt XML Template for user-mode-linux') -flags.DEFINE_string('injected_network_template', - utils.abspath('virt/interfaces.template'), - 'Template file for injected network') + utils.abspath('virt/libvirt.xml.template'), + 'Libvirt XML Template') flags.DEFINE_string('libvirt_type', 'kvm', 'Libvirt domain type (valid options are: ' @@ -122,12 +99,9 @@ def get_connection(read_only): class LibvirtConnection(object): def __init__(self, read_only): - (self.libvirt_uri, - template_file, - rescue_file) = self.get_uri_and_templates() + self.libvirt_uri = self.get_uri() - self.libvirt_xml = open(template_file).read() - self.rescue_xml = open(rescue_file).read() + self.libvirt_xml = open(FLAGS.libvirt_xml_template).read() self._wrapped_conn = None self.read_only = read_only @@ -150,20 +124,14 @@ class LibvirtConnection(object): return False raise - def get_uri_and_templates(self): + def get_uri(self): if FLAGS.libvirt_type == 'uml': uri = FLAGS.libvirt_uri or 'uml:///system' - template_file = FLAGS.libvirt_uml_xml_template - rescue_file = FLAGS.libvirt_rescue_uml_xml_template elif FLAGS.libvirt_type == 'xen': uri = FLAGS.libvirt_uri or 'xen:///' - template_file = FLAGS.libvirt_xen_xml_template - rescue_file = FLAGS.libvirt_rescue_xen_xml_template else: uri = FLAGS.libvirt_uri or 'qemu:///system' - template_file = FLAGS.libvirt_xml_template - rescue_file = FLAGS.libvirt_rescue_xml_template - return uri, template_file, rescue_file + return uri def _connect(self, uri, read_only): auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], @@ -543,18 +511,16 @@ class LibvirtConnection(object): 'bridge_name': network['bridge'], 'mac_address': instance['mac_address'], 'ip_address': ip_address, - 'dhcp_server': dhcp_server} - if rescue: - xml = self.rescue_xml % xml_info - else: - if xml_info['kernel_id']: + 'dhcp_server': dhcp_server, + 'rescue': rescue} + if not rescue: + if instance['kernel_id']: xml_info['kernel'] = xml_info['basepath'] + "/kernel" - if xml_info['ramdisk_id']: + if instance['ramdisk_id']: xml_info['ramdisk'] = xml_info['basepath'] + "/ramdisk" - if xml_info['ramdisk_id'] or xml_info['kernel_id']: - xml_info['disk'] = xml_info['basepath'] + "/disk" + xml_info['disk'] = xml_info['basepath'] + "/disk" xml = str(Template(self.libvirt_xml, searchList=[xml_info])) logging.debug('instance %s: finished toXML method', instance['name']) -- cgit From fa7d288e6af3d997d6275d9e6778e932be9f1c3f Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Tue, 14 Dec 2010 17:56:42 -0400 Subject: pep8 --- nova/virt/fake.py | 1 + nova/virt/libvirt_conn.py | 5 +++-- nova/virt/xenapi/network_utils.py | 1 + nova/virt/xenapi/vm_utils.py | 1 + nova/virt/xenapi/vmops.py | 1 + nova/virt/xenapi/volumeops.py | 1 + nova/virt/xenapi_conn.py | 2 ++ 7 files changed, 10 insertions(+), 2 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/fake.py b/nova/virt/fake.py index c56907175..59acabc21 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -259,5 +259,6 @@ class FakeConnection(object): class FakeInstance(object): + def __init__(self): self._state = power_state.NOSTATE diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 97ff49a10..5939f0afe 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -119,6 +119,7 @@ def get_connection(read_only): class LibvirtConnection(object): + def __init__(self, read_only): (self.libvirt_uri, template_file, @@ -298,12 +299,12 @@ class LibvirtConnection(object): @exception.wrap_exception def pause(self, instance, callback): raise exception.APIError("pause not supported for libvirt.") - + @defer.inlineCallbacks @exception.wrap_exception def unpause(self, instance, callback): raise exception.APIError("unpause not supported for libvirt.") - + @defer.inlineCallbacks @exception.wrap_exception def rescue(self, instance): diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py index 8cb4cce3a..cffaf7f23 100644 --- a/nova/virt/xenapi/network_utils.py +++ b/nova/virt/xenapi/network_utils.py @@ -27,6 +27,7 @@ class NetworkHelper(): """ The class that wraps the helper methods together. """ + def __init__(self): return diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 77edb576e..c17dc0bed 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -49,6 +49,7 @@ class VMHelper(): """ The class that wraps the helper methods together. """ + def __init__(self): return diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 7c5db0b73..405a8518e 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -36,6 +36,7 @@ class VMOps(object): """ Management class for VM-related tasks """ + def __init__(self, session): global XenAPI if XenAPI is None: diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index a4c7a3861..1943ccab0 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -20,6 +20,7 @@ Management class for Storage-related functions (attach, detach, etc). class VolumeOps(object): + def __init__(self, session): self._session = session diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index ebd572258..fa87bb779 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -101,6 +101,7 @@ def get_connection(_): class XenAPIConnection(object): """ A connection to XenServer or Xen Cloud Platform """ + def __init__(self, url, user, pw): session = XenAPISession(url, user, pw) self._vmops = VMOps(session) @@ -155,6 +156,7 @@ class XenAPIConnection(object): class XenAPISession(object): """ The session to invoke XenAPI SDK calls """ + def __init__(self, url, user, pw): self._session = XenAPI.Session(url) self._session.login_with_password(user, pw) -- cgit From aab6a89ba1e9ace73dcb4fa68a67957e29c47f84 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Tue, 14 Dec 2010 23:34:08 +0100 Subject: Don't attempt to fiddle with partitions for whole-disk-images. --- nova/virt/libvirt_conn.py | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 5dcb05b1f..3529be333 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -421,13 +421,13 @@ class LibvirtConnection(object): yield images.fetch(inst.image_id, basepath('disk-raw'), user, project) - if inst.kernel_id: + if inst['kernel_id']: if not os.path.exists(basepath('kernel')): - yield images.fetch(inst.kernel_id, basepath('kernel'), + yield images.fetch(inst['kernel_id'], basepath('kernel'), user, project) - if inst.ramdisk_id: + if inst['ramdisk_id']: if not os.path.exists(basepath('ramdisk')): - yield images.fetch(inst.ramdisk_id, basepath('ramdisk'), + yield images.fetch(inst['ramdisk_id'], basepath('ramdisk'), user, project) execute = lambda cmd, process_input = None, check_exit_code = True: \ @@ -439,7 +439,7 @@ class LibvirtConnection(object): # partitioned disk image where the target partition is the first # partition target_partition = None - if not inst.kernel_id: + if not inst['kernel_id']: target_partition = "1" key = str(inst['key_data']) @@ -472,7 +472,7 @@ class LibvirtConnection(object): ' into image %s (%s)', inst['name'], inst.image_id, e) - if inst.kernel_id: + if inst['kernel_id']: if os.path.exists(basepath('disk')): yield process.simple_execute('rm -f %s' % basepath('disk')) @@ -483,8 +483,13 @@ class LibvirtConnection(object): resize = True if inst['instance_type'] == 'm1.tiny' or prefix == 'rescue-': resize = False - yield disk.partition(basepath('disk-raw'), basepath('disk'), - local_bytes, resize, execute=execute) + + if inst['kernel_id']: + yield disk.partition(basepath('disk-raw'), basepath('disk'), + local_bytes, resize, execute=execute) + else: + os.rename(basepath('disk-raw'), basepath('disk')) + yield disk.extend(basepath('disk'), local_bytes, execute=execute) if FLAGS.libvirt_type == 'uml': yield process.simple_execute('sudo chown root %s' % -- cgit From a2a8406b5d793545c8ecb359e18b80bba618c509 Mon Sep 17 00:00:00 2001 From: termie Date: Tue, 14 Dec 2010 16:05:39 -0800 Subject: updates per review --- nova/virt/fake.py | 4 ++-- nova/virt/libvirt_conn.py | 14 ++++++-------- nova/virt/xenapi/network_utils.py | 5 ++--- nova/virt/xenapi/vmops.py | 3 +-- nova/virt/xenapi_conn.py | 6 ++++-- 5 files changed, 15 insertions(+), 17 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 91dc8173b..77bc926c2 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -122,13 +122,13 @@ class FakeConnection(object): """ Rescue the specified instance. """ - return + pass def unrescue(self, instance): """ Unrescue the specified instance. """ - return + pass def destroy(self, instance): """ diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 715e7234c..ba51f8f69 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -215,7 +215,7 @@ class LibvirtConnection(object): self._cleanup(instance) done.send() - greenthread.spawn(_wait_for_time) + greenthread.spawn(_wait_for_timer) return done def _cleanup(self, instance): @@ -365,9 +365,9 @@ class LibvirtConnection(object): if virsh_output.startswith('/dev/'): logging.info('cool, it\'s a device') - r = utils.execute("sudo dd if=%s iflag=nonblock" % - virsh_output, check_exit_code=False) - return r[0] + out, err = utils.execute("sudo dd if=%s iflag=nonblock" % + virsh_output, check_exit_code=False) + return out else: return '' @@ -388,8 +388,7 @@ class LibvirtConnection(object): console_log = os.path.join(FLAGS.instances_path, instance['name'], 'console.log') - utils.execute('sudo chown %d %s' % (os.getuid(), - console_log)) + utils.execute('sudo chown %d %s' % (os.getuid(), console_log)) if FLAGS.libvirt_type == 'xen': # Xen is special @@ -476,7 +475,6 @@ class LibvirtConnection(object): ['local_gb'] * 1024 * 1024 * 1024) - resize = inst['instance_type'] != 'm1.tiny' resize = True if inst['instance_type'] == 'm1.tiny' or prefix == 'rescue-': resize = False @@ -743,7 +741,7 @@ class NWFilterFirewall(object): if callable(xml): xml = xml() - # execute in a native thread and block until done + # execute in a native thread and block current greenthread until done tpool.execute(self._conn.nwfilterDefineXML, xml) @staticmethod diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py index d8632f393..012954394 100644 --- a/nova/virt/xenapi/network_utils.py +++ b/nova/virt/xenapi/network_utils.py @@ -30,10 +30,9 @@ class NetworkHelper(): @classmethod def find_network_with_bridge(cls, session, bridge): - """ Return the network on which the bridge is attached, if found """ + """ Return the network on which the bridge is attached, if found.""" expr = 'field "bridge" = "%s"' % bridge - networks = session.call_xenapi('network.get_all_records_where', - expr) + networks = session.call_xenapi('network.get_all_records_where', expr) if len(networks) == 1: return networks.keys()[0] elif len(networks) > 1: diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index b6b92b926..3034df9e1 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -107,8 +107,7 @@ class VMOps(object): if vdis: for vdi in vdis: try: - task = self._session.call_xenapi('Async.VDI.destroy', - vdi) + task = self._session.call_xenapi('Async.VDI.destroy', vdi) self._session.wait_for_task(task) except XenAPI.Failure, exc: logging.warn(exc) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index df8e42d34..424311133 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -175,9 +175,11 @@ class XenAPISession(object): The task is polled until it completes.""" done = event.Event() - loop = utis.LoopingTask(self._poll_task, task, done) + loop = utils.LoopingTask(self._poll_task, task, done) loop.start(FLAGS.xenapi_task_poll_interval, now=True) - return done.wait() + rv = done.wait() + loop.stop() + return rv def _poll_task(self, task, done): """Poll the given XenAPI task, and fire the given Deferred if we -- cgit From dada56794679b213b2d80e4e1f907a212b73f54e Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Wed, 15 Dec 2010 17:50:05 +0000 Subject: * code cleanup * revised unittest approach * added stubout and a number of tests --- nova/virt/xenapi/__init__.py | 22 +--------------------- nova/virt/xenapi/fake.py | 40 +++------------------------------------- nova/virt/xenapi/vmops.py | 6 ++---- nova/virt/xenapi/volumeops.py | 7 +++---- nova/virt/xenapi_conn.py | 24 +++++++++++------------- 5 files changed, 20 insertions(+), 79 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/__init__.py b/nova/virt/xenapi/__init__.py index 1a2903b98..c7038deae 100644 --- a/nova/virt/xenapi/__init__.py +++ b/nova/virt/xenapi/__init__.py @@ -20,31 +20,11 @@ """ -def load_sdk(flags): - """ - This method is used for loading the XenAPI SDK (fake or real) - """ - xenapi_module = \ - flags.xenapi_use_fake_session and 'nova.virt.xenapi.fake' or 'XenAPI' - from_list = \ - flags.xenapi_use_fake_session and ['fake'] or [] - - return __import__(xenapi_module, globals(), locals(), from_list, -1) - - class HelperBase(): """ - The class that wraps the helper methods together. + The base for helper classes. This adds the XenAPI class attribute """ XenAPI = None def __init__(self): return - - @classmethod - def late_import(cls, FLAGS): - """ - Load XenAPI module in for helper class - """ - if cls.XenAPI is None: - cls.XenAPI = load_sdk(FLAGS) diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index f5fea3cc2..038064e8e 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -154,7 +154,7 @@ class Failure(Exception): def __str__(self): try: return str(self.details) - except Exception, exn: + except Exception, exc: return "XenAPI Fake Failure: %s" % str(self.details) def _details_map(self): @@ -324,8 +324,8 @@ class SessionBase(object): try: task['result'] = self.xenapi_request(func, params[1:]) task['status'] = 'success' - except Failure, exn: - task['error_info'] = exn.details + except Failure, exc: + task['error_info'] = exc.details task['status'] = 'failed' task['finished'] = datetime.datetime.now() return task_ref @@ -372,37 +372,3 @@ class _Dispatcher: def __call__(self, *args): return self.__send(self.__name, args) - - -class FakeSession(SessionBase): - def __init__(self, uri): - super(FakeSession, self).__init__(uri) - - def network_get_all_records_where(self, _1, _2): - return self.xenapi.network.get_all_records() - - def host_call_plugin(self, _1, _2, _3, _4, _5): - return '' - - def VM_start(self, _1, ref, _2, _3): - vm = get_record('VM', ref) - if vm['power_state'] != 'Halted': - raise Failure(['VM_BAD_POWER_STATE', ref, 'Halted', - vm['power_state']]) - vm['power_state'] = 'Running' - - def VBD_plug(self, _1, _2): - #FIXME(armando):make proper plug - pass - - def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, - _6, _7, _8, _9, _10, _11): - #FIXME(armando):make proper introduce - valid_vdi = False - refs = get_all('VDI') - for ref in refs: - rec = get_record('VDI', ref) - if rec['uuid'] == uuid: - valid_vdi = True - if not valid_vdi: - raise Failure([['INVALID_VDI', 'session', self._session]]) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 36b8fecc2..abaa1f5f1 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -28,7 +28,6 @@ from nova import flags from nova import exception from nova.auth.manager import AuthManager -from nova.virt.xenapi import load_sdk from nova.virt.xenapi.network_utils import NetworkHelper from nova.virt.xenapi.vm_utils import VMHelper @@ -38,10 +37,9 @@ class VMOps(object): Management class for VM-related tasks """ def __init__(self, session): - self.XenAPI = load_sdk(flags.FLAGS) + self.XenAPI = session.get_imported_xenapi() self._session = session - # Load XenAPI module in the helper class - VMHelper.late_import(flags.FLAGS) + VMHelper.XenAPI = self.XenAPI def list_instances(self): """ List VM instances """ diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index 1b337a6ed..68806c4c2 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -22,7 +22,6 @@ import logging from twisted.internet import defer from nova import flags -from nova.virt.xenapi import load_sdk from nova.virt.xenapi.vm_utils import VMHelper from nova.virt.xenapi.volume_utils import VolumeHelper from nova.virt.xenapi.volume_utils import StorageError @@ -33,11 +32,11 @@ class VolumeOps(object): Management class for Volume-related tasks """ def __init__(self, session): - self.XenAPI = load_sdk(flags.FLAGS) + self.XenAPI = session.get_imported_xenapi() self._session = session # Load XenAPI module in the helper classes respectively - VolumeHelper.late_import(flags.FLAGS) - VMHelper.late_import(flags.FLAGS) + VolumeHelper.XenAPI = self.XenAPI + VMHelper.XenAPI = self.XenAPI @defer.inlineCallbacks def attach_volume(self, instance_name, device_path, mountpoint): diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 5f2b9c7c6..a7e3a6723 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -36,7 +36,6 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block. **Related Flags** -:xenapi_use_fake_session: To be set for unit testing :xenapi_connection_url: URL for connection to XenServer/Xen Cloud Platform. :xenapi_connection_username: Username for connection to XenServer/Xen Cloud Platform (default: root). @@ -59,15 +58,11 @@ from twisted.internet import reactor from nova import utils from nova import flags -from nova.virt.xenapi import load_sdk from nova.virt.xenapi.vmops import VMOps from nova.virt.xenapi.volumeops import VolumeOps FLAGS = flags.FLAGS -flags.DEFINE_boolean('xenapi_use_fake_session', - False, - 'Set to true in order to use the fake XenAPI SDK') flags.DEFINE_string('xenapi_connection_url', None, 'URL for connection to XenServer/Xen Cloud Platform.' @@ -159,15 +154,14 @@ class XenAPIConnection(object): class XenAPISession(object): """ The session to invoke XenAPI SDK calls """ def __init__(self, url, user, pw): - # This is loaded late so that there's no need to install this - # library when not using XenAPI. - self.XenAPI = load_sdk(FLAGS) - if FLAGS.xenapi_use_fake_session: - self._session = self.XenAPI.FakeSession(url) - else: - self._session = self.XenAPI.Session(url) + self.XenAPI = self.get_imported_xenapi() + self._session = self._create_session(url) self._session.login_with_password(user, pw) + def get_imported_xenapi(self): + """Stubout point. This can be replaced with a mock xenapi module.""" + return __import__('XenAPI') + def get_xenapi(self): """ Return the xenapi object """ return self._session.xenapi @@ -200,6 +194,10 @@ class XenAPISession(object): reactor.callLater(0, self._poll_task, task, d) return d + def _create_session(self, url): + """Stubout point. This can be replaced with a mock session.""" + return self.XenAPI.Session(url) + @utils.deferredToThread def _poll_task(self, task, deferred): """Poll the given XenAPI task, and fire the given Deferred if we @@ -220,7 +218,7 @@ class XenAPISession(object): error_info) deferred.errback(self.XenAPI.Failure(error_info)) #logging.debug('Polling task %s done.', task) - except self.XenAPI.Failure, exc: + except Exception, exc: logging.warn(exc) deferred.errback(exc) -- cgit From fdf067037981c2b4b4501258919af0f9e1d0ec26 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 15 Dec 2010 10:38:30 -0800 Subject: add missing import --- nova/virt/xenapi_conn.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'nova/virt') diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 424311133..a88101ad0 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -51,6 +51,8 @@ import logging import sys import xmlrpclib +from eventlet import event + from nova import utils from nova import flags from nova.virt.xenapi.vmops import VMOps -- cgit From 9a8113584edc9a8dbf42e7039b373429c11a7760 Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 15 Dec 2010 11:53:18 -0800 Subject: fixes for xenapi (thanks sandywalsh) --- nova/virt/xenapi_conn.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index a88101ad0..09d399da4 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -52,6 +52,7 @@ import sys import xmlrpclib from eventlet import event +from eventlet import tpool from nova import utils from nova import flags @@ -164,20 +165,20 @@ class XenAPISession(object): f = self._session.xenapi for m in method.split('.'): f = f.__getattr__(m) - return f(*args) + return tpool.execute(f, *args) def async_call_plugin(self, plugin, fn, args): """Call Async.host.call_plugin on a background thread.""" - return _unwrap_plugin_exceptions( - self._session.xenapi.Async.host.call_plugin, - self.get_xenapi_host(), plugin, fn, args) + return tpool.execute(_unwrap_plugin_exceptions, + self._session.xenapi.Async.host.call_plugin, + self.get_xenapi_host(), plugin, fn, args) def wait_for_task(self, task): """Return a Deferred that will give the result of the given task. The task is polled until it completes.""" done = event.Event() - loop = utils.LoopingTask(self._poll_task, task, done) + loop = utils.LoopingCall(self._poll_task, task, done) loop.start(FLAGS.xenapi_task_poll_interval, now=True) rv = done.wait() loop.stop() -- cgit From a87b4081c6617ba193836ad12008204d62814549 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Wed, 15 Dec 2010 18:23:51 -0400 Subject: fixup after merge with trunk --- nova/virt/libvirt_conn.py | 2 -- 1 file changed, 2 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index d7abc874a..d4d616b31 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -295,12 +295,10 @@ class LibvirtConnection(object): def pause(self, instance, callback): raise exception.APIError("pause not supported for libvirt.") - @defer.inlineCallbacks @exception.wrap_exception def unpause(self, instance, callback): raise exception.APIError("unpause not supported for libvirt.") - @defer.inlineCallbacks @exception.wrap_exception def rescue(self, instance): self.destroy(instance, False) -- cgit From 8152acf7c3df83a04591fdafb21201965da7bfad Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 16 Dec 2010 17:47:48 +0000 Subject: fake session clean-up --- nova/virt/xenapi/fake.py | 34 ++++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 12 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 038064e8e..a46f3fd80 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -105,9 +105,10 @@ def create_vdi(name_label, read_only, sr_ref, sharable): }) -def create_pbd(config, attached): +def create_pbd(config, sr_ref, attached): return _create_object('PBD', { 'device-config': config, + 'SR': sr_ref, 'currently-attached': attached, }) @@ -126,6 +127,21 @@ def _create_object(table, obj): return ref +def _create_sr(table, obj): + sr_type = obj[6] + # Forces fake to support iscsi only + if sr_type != 'iscsi': + raise Failure(['SR_UNKNOWN_DRIVER', sr_type]) + sr_ref = _create_object(table, obj[2]) + vdi_ref = create_vdi('', False, sr_ref, False) + pbd_ref = create_pbd('', sr_ref, True) + _db_content['SR'][sr_ref]['VDIs'] = [vdi_ref] + _db_content['SR'][sr_ref]['PBDs'] = [pbd_ref] + _db_content['VDI'][vdi_ref]['SR'] = sr_ref + _db_content['PBD'][pbd_ref]['SR'] = sr_ref + return sr_ref + + def get_all(table): return _db_content[table].keys() @@ -296,19 +312,13 @@ class SessionBase(object): def _create(self, name, params): self._check_session(params) - expected = 2 - if name == 'SR.create': - expected = 10 + is_sr_create = name == 'SR.create' + # Storage Repositories have a different API + expected = is_sr_create and 10 or 2 self._check_arg_count(params, expected) (cls, _) = name.split('.') - if name == 'SR.create': - vdi_ref = create_vdi('', False, '', False) - pbd_ref = create_pbd('', True) - params[2]['VDIs'] = [vdi_ref] - params[2]['PBDs'] = [pbd_ref] - ref = _create_object(cls, params[2]) - else: - ref = _create_object(cls, params[1]) + ref = is_sr_create and \ + _create_sr(cls, params) or _create_object(cls, params[1]) obj = get_record(cls, ref) # Add RO fields -- cgit From db96fd559d28bcfdf8cc29d79b9afca6dea1cfb7 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 16 Dec 2010 18:44:42 +0000 Subject: reviewed the FIXMEs, and spotted an uncaught exception in volume_utils...yay! --- nova/virt/xenapi/fake.py | 1 + nova/virt/xenapi/volume_utils.py | 29 +++++++++++++++++------------ 2 files changed, 18 insertions(+), 12 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index a46f3fd80..7877b5905 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -102,6 +102,7 @@ def create_vdi(name_label, read_only, sr_ref, sharable): 'location': '', 'xenstore_data': '', 'sm_config': {}, + 'VBDs': {}, }) diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 4482e465c..8d1d6fb81 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -152,18 +152,23 @@ class VolumeHelper(HelperBase): logging.warn(exc) raise StorageError('Unable to get record of VDI %s on' % vdis[0]) else: - return session.get_xenapi().VDI.introduce( - vdi_rec['uuid'], - vdi_rec['name_label'], - vdi_rec['name_description'], - vdi_rec['SR'], - vdi_rec['type'], - vdi_rec['sharable'], - vdi_rec['read_only'], - vdi_rec['other_config'], - vdi_rec['location'], - vdi_rec['xenstore_data'], - vdi_rec['sm_config']) + try: + vdi_ref = session.get_xenapi().VDI.introduce( + vdi_rec['uuid'], + vdi_rec['name_label'], + vdi_rec['name_description'], + vdi_rec['SR'], + vdi_rec['type'], + vdi_rec['sharable'], + vdi_rec['read_only'], + vdi_rec['other_config'], + vdi_rec['location'], + vdi_rec['xenstore_data'], + vdi_rec['sm_config']) + except cls.XenAPI.Failure, exc: + logging.warn(exc) + raise StorageError('Unable to introduce VDI for SR %s' + % sr_ref) @classmethod @defer.inlineCallbacks -- cgit From 9b049acc27d477a1ab9e13c9e064e59d8bd0a3ae Mon Sep 17 00:00:00 2001 From: termie Date: Thu, 16 Dec 2010 10:52:30 -0800 Subject: pep8 fixes --- nova/virt/libvirt_conn.py | 12 ++++++------ nova/virt/xenapi_conn.py | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index ba51f8f69..5a8c71850 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -205,7 +205,7 @@ class LibvirtConnection(object): timer.f = _wait_for_shutdown timer_done = timer.start(interval=0.5, now=True) - + # NOTE(termie): this is strictly superfluous (we could put the # cleanup code in the timer), but this emulates the # previous model so I am keeping it around until @@ -387,7 +387,7 @@ class LibvirtConnection(object): def get_console_output(self, instance): console_log = os.path.join(FLAGS.instances_path, instance['name'], 'console.log') - + utils.execute('sudo chown %d %s' % (os.getuid(), console_log)) if FLAGS.libvirt_type == 'xen': @@ -439,11 +439,11 @@ class LibvirtConnection(object): if not os.path.exists(basepath('ramdisk')): images.fetch(inst.ramdisk_id, basepath('ramdisk'), user, project) - + def execute(cmd, process_input=None, check_exit_code=True): - return utils.execute(cmd=cmd, - process_input=process_input, - check_exit_code=check_exit_code) + return utils.execute(cmd=cmd, + process_input=process_input, + check_exit_code=check_exit_code) key = str(inst['key_data']) net = None diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 09d399da4..6beb08f5e 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -200,7 +200,7 @@ class XenAPISession(object): error_info = self._session.xenapi.task.get_error_info(task) logging.warn('Task %s status: %s. %s', task, status, error_info) - done.send_exception(XenAPI.Failure(error_info)) + done.send_exception(XenAPI.Failure(error_info)) #logging.debug('Polling task %s done.', task) except XenAPI.Failure, exc: logging.warn(exc) -- cgit From 611935aa3e3a66e9638b0c127041a6fca4788b9c Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 16 Dec 2010 15:03:37 -0600 Subject: Put flags back in vm_utils --- nova/virt/xenapi/vm_utils.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index dde138404..b83ae9475 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -21,15 +21,20 @@ their attributes like VDIs, VIFs, as well as their lookup functions. import logging import urllib + from xml.dom import minidom +from nova import flags from nova import utils + from nova.auth.manager import AuthManager from nova.compute import instance_types from nova.compute import power_state from nova.virt import images +FLAGS = flags.FLAGS + XENAPI_POWER_STATE = { 'Halted': power_state.SHUTDOWN, 'Running': power_state.RUNNING, @@ -37,7 +42,6 @@ XENAPI_POWER_STATE = { 'Suspended': power_state.SHUTDOWN, # FIXME 'Crashed': power_state.CRASHED} - XenAPI = None -- cgit From e5a3d993cb13c8dc5e984a67521f77ce8fdf8e4c Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 16 Dec 2010 15:19:35 -0600 Subject: Removed unnecessary blank lines --- nova/virt/xenapi/vm_utils.py | 2 -- 1 file changed, 2 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index b83ae9475..2f5d78e75 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -21,12 +21,10 @@ their attributes like VDIs, VIFs, as well as their lookup functions. import logging import urllib - from xml.dom import minidom from nova import flags from nova import utils - from nova.auth.manager import AuthManager from nova.compute import instance_types from nova.compute import power_state -- cgit From 7954862c8133bacd5e612864a26e7d0ae9b0d663 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 16 Dec 2010 15:54:38 -0600 Subject: Added Instance Diagnostics DB model --- nova/virt/xenapi/vm_utils.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 2f5d78e75..659559c31 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -228,11 +228,7 @@ class VMHelper(): try: host = session.get_xenapi_host() host_ip = session.get_xenapi().host.get_record(host)["address"] - metrics = session.get_xenapi().VM_guest_metrics.get_record( - record["guest_metrics"]) - diags = { - "Kernel": metrics["os_version"]["uname"], - "Distro": metrics["os_version"]["name"]} + diags = {} xml = get_rrd(host_ip, record["uuid"]) if xml: rrd = minidom.parseString(xml) -- cgit From ca81b0c12a3853942e9ce85154c38dad381ead0e Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Sat, 18 Dec 2010 00:50:49 +0000 Subject: fixed unittests and further clean-up post-eventlet merge --- nova/virt/xenapi/__init__.py | 2 +- nova/virt/xenapi/network_utils.py | 2 +- nova/virt/xenapi/vm_utils.py | 3 +-- nova/virt/xenapi/vmops.py | 8 +++++--- nova/virt/xenapi/volume_utils.py | 2 +- nova/virt/xenapi/volumeops.py | 1 - nova/virt/xenapi_conn.py | 8 ++++---- 7 files changed, 13 insertions(+), 13 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/__init__.py b/nova/virt/xenapi/__init__.py index c7038deae..c75162f08 100644 --- a/nova/virt/xenapi/__init__.py +++ b/nova/virt/xenapi/__init__.py @@ -20,7 +20,7 @@ """ -class HelperBase(): +class HelperBase(object): """ The base for helper classes. This adds the XenAPI class attribute """ diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py index c292383b6..e783120fe 100644 --- a/nova/virt/xenapi/network_utils.py +++ b/nova/virt/xenapi/network_utils.py @@ -29,7 +29,7 @@ class NetworkHelper(HelperBase): The class that wraps the helper methods together. """ def __init__(self): - return + super(NetworkHelper, self).__init__() @classmethod def find_network_with_bridge(cls, session, bridge): diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 57d419773..911fcc9b2 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -24,7 +24,6 @@ import urllib from xml.dom import minidom from nova import flags -from nova import utils from nova.auth.manager import AuthManager from nova.compute import instance_types from nova.compute import power_state @@ -48,7 +47,7 @@ class VMHelper(HelperBase): The class that wraps the helper methods together. """ def __init__(self): - return + super(VMHelper, self).__init__() @classmethod def create_vm(cls, session, instance, kernel, ramdisk): diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 0be4ed07d..aa3a3ae53 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -22,7 +22,6 @@ import logging from nova import db from nova import context -from nova import flags from nova import exception from nova import utils @@ -78,6 +77,7 @@ class VMOps(object): logging.info('Spawning VM %s created %s.', instance.name, vm_ref) + # NOTE(armando): Do we really need to do this in virt? timer = utils.LoopingCall(f=None) def _wait_for_boot(): @@ -88,7 +88,8 @@ class VMOps(object): if state == power_state.RUNNING: logging.debug('Instance %s: booted', instance['name']) timer.stop() - except: + except Exception, exc: + logging.warn(exc) logging.exception('instance %s: failed to boot', instance['name']) db.instance_set_state(context.get_admin_context(), @@ -131,6 +132,7 @@ class VMOps(object): self._session.wait_for_task(task) except self.XenAPI.Failure, exc: logging.warn(exc) + # VM Destroy try: task = self._session.call_xenapi('Async.VM.destroy', vm) self._session.wait_for_task(task) @@ -149,7 +151,7 @@ class VMOps(object): """Return data about VM diagnostics""" vm = VMHelper.lookup(self._session, instance_id) if vm is None: - raise exception.Exception("Instance not found %s" % instance_id) + raise exception.NotFound("Instance not found %s" % instance_id) rec = self._session.get_xenapi().VM.get_record(vm) return VMHelper.compile_diagnostics(self._session, rec) diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 255f23d88..5366078ce 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -43,7 +43,7 @@ class VolumeHelper(HelperBase): The class that wraps the helper methods together. """ def __init__(self): - return + super(VolumeHelper, self).__init__() @classmethod def create_iscsi_storage(cls, session, info, label, description): diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index 6c7516073..9dbb1bb14 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -19,7 +19,6 @@ Management class for Storage-related functions (attach, detach, etc). """ import logging -from nova import flags from nova.virt.xenapi.vm_utils import VMHelper from nova.virt.xenapi.volume_utils import VolumeHelper from nova.virt.xenapi.volume_utils import StorageError diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 207222744..2a8614cfd 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -119,11 +119,11 @@ class XenAPIConnection(object): def spawn(self, instance): """ Create VM instance """ - return self._vmops.spawn(instance) + self._vmops.spawn(instance) def reboot(self, instance): """ Reboot VM instance """ - return self._vmops.reboot(instance) + self._vmops.reboot(instance) def destroy(self, instance): """ Destroy VM instance """ @@ -143,13 +143,13 @@ class XenAPIConnection(object): def attach_volume(self, instance_name, device_path, mountpoint): """ Attach volume storage to VM instance """ - return self._volumeops.attach_volume(instance_name, + self._volumeops.attach_volume(instance_name, device_path, mountpoint) def detach_volume(self, instance_name, mountpoint): """ Detach volume storage to VM instance """ - return self._volumeops.detach_volume(instance_name, mountpoint) + self._volumeops.detach_volume(instance_name, mountpoint) class XenAPISession(object): -- cgit From a2019a14f7e7902c0bfef9fe3e9b576d9f45defe Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 21 Dec 2010 03:39:28 +0000 Subject: add missing greenthread import --- nova/virt/libvirt_conn.py | 1 + 1 file changed, 1 insertion(+) (limited to 'nova/virt') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 5a8c71850..feef1390c 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -45,6 +45,7 @@ import logging import os import shutil +from eventlet import greenthread from eventlet import event from eventlet import tpool -- cgit From d2eb04cea6b7f0a669758fc1fba32e77a008a7eb Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Tue, 21 Dec 2010 11:42:25 -0600 Subject: PEP8 cleanup --- nova/virt/xenapi/vmops.py | 16 ++++++++-------- nova/virt/xenapi_conn.py | 30 +++++++++++++++--------------- 2 files changed, 23 insertions(+), 23 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index a18eacf07..bedf131df 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -44,12 +44,12 @@ class VMOps(object): VMHelper.late_import() def list_instances(self): - """ List VM instances """ + """List VM instances""" return [self._session.get_xenapi().VM.get_name_label(vm) \ for vm in self._session.get_xenapi().VM.get_all()] def spawn(self, instance): - """ Create VM instance """ + """Create VM instance""" vm = VMHelper.lookup(self._session, instance.name) if vm is not None: raise Exception('Attempted to create non-unique name %s' % @@ -81,7 +81,7 @@ class VMOps(object): vm_ref) def reboot(self, instance): - """ Reboot VM instance """ + """Reboot VM instance""" instance_name = instance.name vm = VMHelper.lookup(self._session, instance_name) if vm is None: @@ -90,7 +90,7 @@ class VMOps(object): self._session.wait_for_task(task) def destroy(self, instance): - """ Destroy VM instance """ + """Destroy VM instance""" vm = VMHelper.lookup(self._session, instance.name) if vm is None: # Don't complain, just return. This lets us clean up instances @@ -127,7 +127,7 @@ class VMOps(object): callback(ret) def pause(self, instance, callback): - """ Pause VM instance """ + """Pause VM instance""" instance_name = instance.name vm = VMHelper.lookup(self._session, instance_name) if vm is None: @@ -136,7 +136,7 @@ class VMOps(object): self._wait_with_callback(task, callback) def unpause(self, instance, callback): - """ Unpause VM instance """ + """Unpause VM instance""" instance_name = instance.name vm = VMHelper.lookup(self._session, instance_name) if vm is None: @@ -145,7 +145,7 @@ class VMOps(object): self._wait_with_callback(task, callback) def get_info(self, instance_id): - """ Return data about VM instance """ + """Return data about VM instance""" vm = VMHelper.lookup_blocking(self._session, instance_id) if vm is None: raise Exception('instance not present %s' % instance_id) @@ -161,6 +161,6 @@ class VMOps(object): return VMHelper.compile_diagnostics(self._session, rec) def get_console_output(self, instance): - """ Return snapshot of console """ + """Return snapshot of console""" # TODO: implement this to fix pylint! return 'FAKE CONSOLE OUTPUT of instance' diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 21ed2cd65..3a9084d89 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -101,7 +101,7 @@ def get_connection(_): class XenAPIConnection(object): - """ A connection to XenServer or Xen Cloud Platform """ + """A connection to XenServer or Xen Cloud Platform""" def __init__(self, url, user, pw): session = XenAPISession(url, user, pw) @@ -109,31 +109,31 @@ class XenAPIConnection(object): self._volumeops = VolumeOps(session) def list_instances(self): - """ List VM instances """ + """List VM instances""" return self._vmops.list_instances() def spawn(self, instance): - """ Create VM instance """ + """Create VM instance""" self._vmops.spawn(instance) def reboot(self, instance): - """ Reboot VM instance """ + """Reboot VM instance""" self._vmops.reboot(instance) def destroy(self, instance): - """ Destroy VM instance """ + """Destroy VM instance""" self._vmops.destroy(instance) def pause(self, instance, callback): - """ Pause VM instance """ + """Pause VM instance""" self._vmops.pause(instance, callback) def unpause(self, instance, callback): - """ Unpause paused VM instance """ + """Unpause paused VM instance""" self._vmops.unpause(instance, callback) def get_info(self, instance_id): - """ Return data about VM instance """ + """Return data about VM instance""" return self._vmops.get_info(instance_id) def get_diagnostics(self, instance_id): @@ -141,33 +141,33 @@ class XenAPIConnection(object): return self._vmops.get_diagnostics(instance_id) def get_console_output(self, instance): - """ Return snapshot of console """ + """Return snapshot of console""" return self._vmops.get_console_output(instance) def attach_volume(self, instance_name, device_path, mountpoint): - """ Attach volume storage to VM instance """ + """Attach volume storage to VM instance""" return self._volumeops.attach_volume(instance_name, device_path, mountpoint) def detach_volume(self, instance_name, mountpoint): - """ Detach volume storage to VM instance """ + """Detach volume storage to VM instance""" return self._volumeops.detach_volume(instance_name, mountpoint) class XenAPISession(object): - """ The session to invoke XenAPI SDK calls """ + """The session to invoke XenAPI SDK calls""" def __init__(self, url, user, pw): self._session = XenAPI.Session(url) self._session.login_with_password(user, pw) def get_xenapi(self): - """ Return the xenapi object """ + """Return the xenapi object""" return self._session.xenapi def get_xenapi_host(self): - """ Return the xenapi host """ + """Return the xenapi host""" return self._session.xenapi.session.get_this_host(self._session.handle) def call_xenapi(self, method, *args): @@ -218,7 +218,7 @@ class XenAPISession(object): def _unwrap_plugin_exceptions(func, *args, **kwargs): - """ Parse exception details """ + """Parse exception details""" try: return func(*args, **kwargs) except XenAPI.Failure, exc: -- cgit From aa0639b00c3cd4b7bd5dd7dc9027e86d0f43150a Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 21 Dec 2010 18:57:11 +0000 Subject: change virtualization to not get network through project --- nova/virt/libvirt_conn.py | 4 ++-- nova/virt/xenapi/vmops.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index feef1390c..9be363661 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -488,8 +488,8 @@ class LibvirtConnection(object): def to_xml(self, instance, rescue=False): # TODO(termie): cache? logging.debug('instance %s: starting toXML method', instance['name']) - network = db.project_get_network(context.get_admin_context(), - instance['project_id']) + network = db.network_get_by_instance(context.get_admin_context(), + instance['id']) # FIXME(vish): stick this in db instance_type = instance['instance_type'] instance_type = instance_types.INSTANCE_TYPES[instance_type] diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 3034df9e1..de40815e2 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -54,8 +54,8 @@ class VMOps(object): raise Exception('Attempted to create non-unique name %s' % instance.name) - bridge = db.project_get_network(context.get_admin_context(), - instance.project_id).bridge + bridge = db.network_get_by_instance(context.get_admin_context(), + instance['id'])['bridge'] network_ref = \ NetworkHelper.find_network_with_bridge(self._session, bridge) -- cgit From b3fce81e384aec46c0963db1f144cc58d02340a4 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Tue, 21 Dec 2010 14:13:18 -0600 Subject: Log all XenAPI actions --- nova/virt/xenapi/vmops.py | 16 ++++++++-------- nova/virt/xenapi_conn.py | 33 +++++++++++++++++++++++---------- 2 files changed, 31 insertions(+), 18 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index bedf131df..5b9495b67 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -87,7 +87,7 @@ class VMOps(object): if vm is None: raise Exception('instance not present %s' % instance_name) task = self._session.call_xenapi('Async.VM.clean_reboot', vm) - self._session.wait_for_task(task) + self._session.wait_for_task(instance.id, task) def destroy(self, instance): """Destroy VM instance""" @@ -101,7 +101,7 @@ class VMOps(object): try: task = self._session.call_xenapi('Async.VM.hard_shutdown', vm) - self._session.wait_for_task(task) + self._session.wait_for_task(instance.id, task) except XenAPI.Failure, exc: logging.warn(exc) # Disk clean-up @@ -109,19 +109,19 @@ class VMOps(object): for vdi in vdis: try: task = self._session.call_xenapi('Async.VDI.destroy', vdi) - self._session.wait_for_task(task) + self._session.wait_for_task(instance.id, task) except XenAPI.Failure, exc: logging.warn(exc) try: task = self._session.call_xenapi('Async.VM.destroy', vm) - self._session.wait_for_task(task) + self._session.wait_for_task(instance.id, task) except XenAPI.Failure, exc: logging.warn(exc) - def _wait_with_callback(self, task, callback): + def _wait_with_callback(self, instance_id, task, callback): ret = None try: - ret = self._session.wait_for_task(task) + ret = self._session.wait_for_task(instance_id, task) except XenAPI.Failure, exc: logging.warn(exc) callback(ret) @@ -133,7 +133,7 @@ class VMOps(object): if vm is None: raise Exception('instance not present %s' % instance_name) task = self._session.call_xenapi('Async.VM.pause', vm) - self._wait_with_callback(task, callback) + self._wait_with_callback(instance.id, task, callback) def unpause(self, instance, callback): """Unpause VM instance""" @@ -142,7 +142,7 @@ class VMOps(object): if vm is None: raise Exception('instance not present %s' % instance_name) task = self._session.call_xenapi('Async.VM.unpause', vm) - self._wait_with_callback(task, callback) + self._wait_with_callback(instance.id, task, callback) def get_info(self, instance_id): """Return data about VM instance""" diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 3a9084d89..33a55d7b2 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -54,6 +54,8 @@ import xmlrpclib from eventlet import event from eventlet import tpool +from nova import context +from nova import db from nova import utils from nova import flags from nova.virt.xenapi.vmops import VMOps @@ -183,35 +185,46 @@ class XenAPISession(object): self._session.xenapi.Async.host.call_plugin, self.get_xenapi_host(), plugin, fn, args) - def wait_for_task(self, task): + def wait_for_task(self, instance_id, task): """Return a Deferred that will give the result of the given task. The task is polled until it completes.""" done = event.Event() - loop = utils.LoopingCall(self._poll_task, task, done) + loop = utils.LoopingCall(self._poll_task, instance_id, task, done) loop.start(FLAGS.xenapi_task_poll_interval, now=True) rv = done.wait() loop.stop() return rv - def _poll_task(self, task, done): + def _poll_task(self, instance_id, task, done): """Poll the given XenAPI task, and fire the given Deferred if we get a result.""" try: - #logging.debug('Polling task %s...', task) + name = self._session.xenapi.task.get_name_label(task) status = self._session.xenapi.task.get_status(task) - if status == 'pending': + action = dict( + instance_id=int(instance_id), + action=name, + error=None) + if status == "pending": return - elif status == 'success': + elif status == "success": result = self._session.xenapi.task.get_result(task) - logging.info('Task %s status: success. %s', task, result) + logging.info("Task [%s] %s status: success %s" % ( + name, + task, + result)) done.send(_parse_xmlrpc_value(result)) else: error_info = self._session.xenapi.task.get_error_info(task) - logging.warn('Task %s status: %s. %s', task, status, - error_info) + action["error"] = str(error_info) + logging.warn("Task [%s] %s status: %s %s" % ( + name, + task, + status, + error_info)) done.send_exception(XenAPI.Failure(error_info)) - #logging.debug('Polling task %s done.', task) + db.instance_action_create(context.get_admin_context(), action) except XenAPI.Failure, exc: logging.warn(exc) done.send_exception(*sys.exc_info()) -- cgit From 555bea30cddfd32c42b6d7453b5afd2e7fcfb7f2 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Tue, 21 Dec 2010 15:46:44 -0600 Subject: Filter templates and dom0 from list_instances() --- nova/virt/xenapi/vmops.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 5b9495b67..3b00ce8bf 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -45,8 +45,12 @@ class VMOps(object): def list_instances(self): """List VM instances""" - return [self._session.get_xenapi().VM.get_name_label(vm) \ - for vm in self._session.get_xenapi().VM.get_all()] + vms = [] + for vm in self._session.get_xenapi().VM.get_all(): + rec = self._session.get_xenapi().VM.get_record(vm) + if not rec["is_a_template"] and not rec["is_control_domain"]: + vms.append(rec["name_label"]) + return vms def spawn(self, instance): """Create VM instance""" -- cgit From f0195ebfd2cc56cee5797fff19fb9702c51df51b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 21 Dec 2010 21:47:13 +0000 Subject: fix reboot command to work even if a host is rebooted --- nova/virt/libvirt_conn.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'nova/virt') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index ad101db2a..845167d9f 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -40,6 +40,7 @@ import logging import os import shutil +from eventlet import greenthread from eventlet import event from eventlet import tpool @@ -183,7 +184,8 @@ class LibvirtConnection(object): # everything has been vetted a bit def _wait_for_timer(): timer_done.wait() - self._cleanup(instance) + if cleanup: + self._cleanup(instance) done.send() greenthread.spawn(_wait_for_timer) -- cgit From 81191660cf6d1e5ea47630ed45041dc923f6b57a Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 22 Dec 2010 20:59:53 +0000 Subject: merge trunk and upgrade to cheetah templating --- nova/virt/connection.py | 2 +- nova/virt/fake.py | 16 ++- nova/virt/libvirt.qemu.xml.template | 34 ----- nova/virt/libvirt.rescue.qemu.xml.template | 38 ------ nova/virt/libvirt.rescue.qemu.xml.template.THIS | 38 ++++++ nova/virt/libvirt.rescue.uml.xml.template | 31 ----- nova/virt/libvirt.rescue.uml.xml.template.THIS | 31 +++++ nova/virt/libvirt.rescue.xen.xml.template | 34 ----- nova/virt/libvirt.uml.xml.template | 27 ---- nova/virt/libvirt.uml.xml.template.THIS | 27 ++++ nova/virt/libvirt.xen.xml.template | 30 ----- nova/virt/libvirt.xml.template | 79 +++++++++++ nova/virt/libvirt_conn.py | 168 +++++++++++++----------- nova/virt/xenapi/network_utils.py | 1 + nova/virt/xenapi/vm_utils.py | 7 +- nova/virt/xenapi/vmops.py | 55 ++++++-- nova/virt/xenapi/volumeops.py | 1 + nova/virt/xenapi_conn.py | 81 ++++++++---- 18 files changed, 380 insertions(+), 320 deletions(-) delete mode 100644 nova/virt/libvirt.qemu.xml.template delete mode 100644 nova/virt/libvirt.rescue.qemu.xml.template create mode 100644 nova/virt/libvirt.rescue.qemu.xml.template.THIS delete mode 100644 nova/virt/libvirt.rescue.uml.xml.template create mode 100644 nova/virt/libvirt.rescue.uml.xml.template.THIS delete mode 100644 nova/virt/libvirt.rescue.xen.xml.template delete mode 100644 nova/virt/libvirt.uml.xml.template create mode 100644 nova/virt/libvirt.uml.xml.template.THIS delete mode 100644 nova/virt/libvirt.xen.xml.template create mode 100644 nova/virt/libvirt.xml.template (limited to 'nova/virt') diff --git a/nova/virt/connection.py b/nova/virt/connection.py index c40bb4bb4..61e99944e 100644 --- a/nova/virt/connection.py +++ b/nova/virt/connection.py @@ -66,6 +66,6 @@ def get_connection(read_only=False): raise Exception('Unknown connection type "%s"' % t) if conn is None: - logging.error('Failed to open connection to the hypervisor') + logging.error(_('Failed to open connection to the hypervisor')) sys.exit(1) return conn diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 76a04f18f..238acf798 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -136,6 +136,18 @@ class FakeConnection(object): """ pass + def pause(self, instance, callback): + """ + Pause the specified instance. + """ + pass + + def unpause(self, instance, callback): + """ + Unpause the specified instance. + """ + pass + def destroy(self, instance): """ Destroy (shutdown and delete) the specified instance. @@ -169,7 +181,8 @@ class FakeConnection(object): knowledge of the instance """ if instance_name not in self.instances: - raise exception.NotFound("Instance %s Not Found" % instance_name) + raise exception.NotFound(_("Instance %s Not Found") + % instance_name) i = self.instances[instance_name] return {'state': i._state, 'max_mem': 0, @@ -249,5 +262,6 @@ class FakeConnection(object): class FakeInstance(object): + def __init__(self): self._state = power_state.NOSTATE diff --git a/nova/virt/libvirt.qemu.xml.template b/nova/virt/libvirt.qemu.xml.template deleted file mode 100644 index d90afea81..000000000 --- a/nova/virt/libvirt.qemu.xml.template +++ /dev/null @@ -1,34 +0,0 @@ - - %(name)s - - hvm - %(basepath)s/kernel - %(basepath)s/ramdisk - root=/dev/vda1 console=ttyS0 - - - - - %(memory_kb)s - %(vcpus)s - - - - - - - - - - - - - %(extra_params)s - - - - - - - - diff --git a/nova/virt/libvirt.rescue.qemu.xml.template b/nova/virt/libvirt.rescue.qemu.xml.template deleted file mode 100644 index a3b88106c..000000000 --- a/nova/virt/libvirt.rescue.qemu.xml.template +++ /dev/null @@ -1,38 +0,0 @@ - - %(name)s - - hvm - %(basepath)s/rescue-kernel - %(basepath)s/rescue-ramdisk - root=/dev/vda1 console=ttyS0 - - - - - %(memory_kb)s - %(vcpus)s - - - - - - - - - - - - - - - - - %(extra_params)s - - - - - - - - diff --git a/nova/virt/libvirt.rescue.qemu.xml.template.THIS b/nova/virt/libvirt.rescue.qemu.xml.template.THIS new file mode 100644 index 000000000..a3b88106c --- /dev/null +++ b/nova/virt/libvirt.rescue.qemu.xml.template.THIS @@ -0,0 +1,38 @@ + + %(name)s + + hvm + %(basepath)s/rescue-kernel + %(basepath)s/rescue-ramdisk + root=/dev/vda1 console=ttyS0 + + + + + %(memory_kb)s + %(vcpus)s + + + + + + + + + + + + + + + + + %(extra_params)s + + + + + + + + diff --git a/nova/virt/libvirt.rescue.uml.xml.template b/nova/virt/libvirt.rescue.uml.xml.template deleted file mode 100644 index a254692d4..000000000 --- a/nova/virt/libvirt.rescue.uml.xml.template +++ /dev/null @@ -1,31 +0,0 @@ - - %(name)s - %(memory_kb)s - - %(type)s - /usr/bin/linux - /dev/ubda1 - - - - - - - - - - - - - - - - - %(extra_params)s - - - - - - - diff --git a/nova/virt/libvirt.rescue.uml.xml.template.THIS b/nova/virt/libvirt.rescue.uml.xml.template.THIS new file mode 100644 index 000000000..a254692d4 --- /dev/null +++ b/nova/virt/libvirt.rescue.uml.xml.template.THIS @@ -0,0 +1,31 @@ + + %(name)s + %(memory_kb)s + + %(type)s + /usr/bin/linux + /dev/ubda1 + + + + + + + + + + + + + + + + + %(extra_params)s + + + + + + + diff --git a/nova/virt/libvirt.rescue.xen.xml.template b/nova/virt/libvirt.rescue.xen.xml.template deleted file mode 100644 index 3b8d27237..000000000 --- a/nova/virt/libvirt.rescue.xen.xml.template +++ /dev/null @@ -1,34 +0,0 @@ - - %(name)s - - linux - %(basepath)s/kernel - %(basepath)s/ramdisk - /dev/xvda1 - ro - - - - - %(memory_kb)s - %(vcpus)s - - - - - - - - - - - - - - - - - - - - diff --git a/nova/virt/libvirt.uml.xml.template b/nova/virt/libvirt.uml.xml.template deleted file mode 100644 index 506f2ef72..000000000 --- a/nova/virt/libvirt.uml.xml.template +++ /dev/null @@ -1,27 +0,0 @@ - - %(name)s - %(memory_kb)s - - %(type)s - /usr/bin/linux - /dev/ubda1 - - - - - - - - - - - - - %(extra_params)s - - - - - - - diff --git a/nova/virt/libvirt.uml.xml.template.THIS b/nova/virt/libvirt.uml.xml.template.THIS new file mode 100644 index 000000000..506f2ef72 --- /dev/null +++ b/nova/virt/libvirt.uml.xml.template.THIS @@ -0,0 +1,27 @@ + + %(name)s + %(memory_kb)s + + %(type)s + /usr/bin/linux + /dev/ubda1 + + + + + + + + + + + + + %(extra_params)s + + + + + + + diff --git a/nova/virt/libvirt.xen.xml.template b/nova/virt/libvirt.xen.xml.template deleted file mode 100644 index 9677902c6..000000000 --- a/nova/virt/libvirt.xen.xml.template +++ /dev/null @@ -1,30 +0,0 @@ - - %(name)s - - linux - %(basepath)s/kernel - %(basepath)s/ramdisk - /dev/xvda1 - ro - - - - - %(memory_kb)s - %(vcpus)s - - - - - - - - - - - - - - - - diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template new file mode 100644 index 000000000..3fb2243da --- /dev/null +++ b/nova/virt/libvirt.xml.template @@ -0,0 +1,79 @@ + + ${name} + ${memory_kb} + +#if $type == 'uml' + #set $disk_prefix = 'ubd' + #set $disk_bus = 'uml' + uml + /usr/bin/linux + /dev/ubda1 +#else + #if $type == 'xen' + #set $disk_prefix = 'sd' + #set $disk_bus = 'scsi' + linux + /dev/xvda1 + #else + #set $disk_prefix = 'vd' + #set $disk_bus = 'virtio' + hvm + #end if + #if $getVar('rescue', False) + ${basepath}/rescue-kernel + ${basepath}/rescue-ramdisk + #else + #if $getVar('kernel', None) + ${kernel} + #if $type == 'xen' + ro + #else + root=/dev/vda1 console=ttyS0 + #end if + #if $getVar('ramdisk', None) + ${ramdisk} + #end if + #else + + #end if + #end if +#end if + + + + + ${vcpus} + +#if $getVar('rescue', False) + + + + + + + + +#else + + + + +#end if + + + + + + + +#if $getVar('extra_params', False) + ${extra_params} +#end if + + + + + + + + diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 0d42ce2f8..16bcfe3c0 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -27,12 +27,7 @@ Supports KVM, QEMU, UML, and XEN. :libvirt_type: Libvirt domain type. Can be kvm, qemu, uml, xen (default: kvm). :libvirt_uri: Override for the default libvirt URI (depends on libvirt_type). -:libvirt_xml_template: Libvirt XML Template (QEmu/KVM). -:libvirt_xen_xml_template: Libvirt XML Template (Xen). -:libvirt_uml_xml_template: Libvirt XML Template (User Mode Linux). -:libvirt_rescue_xml_template: XML template for rescue mode (KVM & QEMU). -:libvirt_rescue_xen_xml_template: XML templage for rescue mode (XEN). -:libvirt_rescue_uml_xml_template: XML template for rescue mode (UML). +:libvirt_xml_template: Libvirt XML Template. :rescue_image_id: Rescue ami image (default: ami-rescue). :rescue_kernel_id: Rescue aki image (default: aki-rescue). :rescue_ramdisk_id: Rescue ari image (default: ari-rescue). @@ -62,36 +57,20 @@ from nova.compute import instance_types from nova.compute import power_state from nova.virt import images +from Cheetah.Template import Template + libvirt = None libxml2 = None FLAGS = flags.FLAGS -flags.DEFINE_string('libvirt_rescue_xml_template', - utils.abspath('virt/libvirt.rescue.qemu.xml.template'), - 'Libvirt RESCUE XML Template for QEmu/KVM') -flags.DEFINE_string('libvirt_rescue_xen_xml_template', - utils.abspath('virt/libvirt.rescue.xen.xml.template'), - 'Libvirt RESCUE XML Template for xen') -flags.DEFINE_string('libvirt_rescue_uml_xml_template', - utils.abspath('virt/libvirt.rescue.uml.xml.template'), - 'Libvirt RESCUE XML Template for user-mode-linux') # TODO(vish): These flags should probably go into a shared location flags.DEFINE_string('rescue_image_id', 'ami-rescue', 'Rescue ami image') flags.DEFINE_string('rescue_kernel_id', 'aki-rescue', 'Rescue aki image') flags.DEFINE_string('rescue_ramdisk_id', 'ari-rescue', 'Rescue ari image') flags.DEFINE_string('libvirt_xml_template', - utils.abspath('virt/libvirt.qemu.xml.template'), - 'Libvirt XML Template for QEmu/KVM') -flags.DEFINE_string('libvirt_xen_xml_template', - utils.abspath('virt/libvirt.xen.xml.template'), - 'Libvirt XML Template for Xen') -flags.DEFINE_string('libvirt_uml_xml_template', - utils.abspath('virt/libvirt.uml.xml.template'), - 'Libvirt XML Template for user-mode-linux') -flags.DEFINE_string('injected_network_template', - utils.abspath('virt/interfaces.template'), - 'Template file for injected network') + utils.abspath('virt/libvirt.xml.template'), + 'Libvirt XML Template') flags.DEFINE_string('libvirt_type', 'kvm', 'Libvirt domain type (valid options are: ' @@ -123,13 +102,11 @@ def _get_net_and_mask(cidr): class LibvirtConnection(object): + def __init__(self, read_only): - (self.libvirt_uri, - template_file, - rescue_file) = self.get_uri_and_templates() + self.libvirt_uri = self.get_uri() - self.libvirt_xml = open(template_file).read() - self.rescue_xml = open(rescue_file).read() + self.libvirt_xml = open(FLAGS.libvirt_xml_template).read() self._wrapped_conn = None self.read_only = read_only @@ -139,7 +116,7 @@ class LibvirtConnection(object): @property def _conn(self): if not self._wrapped_conn or not self._test_connection(): - logging.debug('Connecting to libvirt: %s' % self.libvirt_uri) + logging.debug(_('Connecting to libvirt: %s') % self.libvirt_uri) self._wrapped_conn = self._connect(self.libvirt_uri, self.read_only) return self._wrapped_conn @@ -151,24 +128,18 @@ class LibvirtConnection(object): except libvirt.libvirtError as e: if e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and \ e.get_error_domain() == libvirt.VIR_FROM_REMOTE: - logging.debug('Connection to libvirt broke') + logging.debug(_('Connection to libvirt broke')) return False raise - def get_uri_and_templates(self): + def get_uri(self): if FLAGS.libvirt_type == 'uml': uri = FLAGS.libvirt_uri or 'uml:///system' - template_file = FLAGS.libvirt_uml_xml_template - rescue_file = FLAGS.libvirt_rescue_uml_xml_template elif FLAGS.libvirt_type == 'xen': uri = FLAGS.libvirt_uri or 'xen:///' - template_file = FLAGS.libvirt_xen_xml_template - rescue_file = FLAGS.libvirt_rescue_xen_xml_template else: uri = FLAGS.libvirt_uri or 'qemu:///system' - template_file = FLAGS.libvirt_xml_template - rescue_file = FLAGS.libvirt_rescue_xml_template - return uri, template_file, rescue_file + return uri def _connect(self, uri, read_only): auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], @@ -228,7 +199,7 @@ class LibvirtConnection(object): def _cleanup(self, instance): target = os.path.join(FLAGS.instances_path, instance['name']) - logging.info('instance %s: deleting instance files %s', + logging.info(_('instance %s: deleting instance files %s'), instance['name'], target) if os.path.exists(target): shutil.rmtree(target) @@ -270,7 +241,7 @@ class LibvirtConnection(object): mount_device = mountpoint.rpartition("/")[2] xml = self._get_disk_xml(virt_dom.XMLDesc(0), mount_device) if not xml: - raise exception.NotFound("No disk at %s" % mount_device) + raise exception.NotFound(_("No disk at %s") % mount_device) virt_dom.detachDevice(xml) @exception.wrap_exception @@ -286,10 +257,10 @@ class LibvirtConnection(object): db.instance_set_state(context.get_admin_context(), instance['id'], state) if state == power_state.RUNNING: - logging.debug('instance %s: rebooted', instance['name']) + logging.debug(_('instance %s: rebooted'), instance['name']) timer.stop() except Exception, exn: - logging.error('_wait_for_reboot failed: %s', exn) + logging.error(_('_wait_for_reboot failed: %s'), exn) db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) @@ -298,6 +269,14 @@ class LibvirtConnection(object): timer.f = _wait_for_reboot return timer.start(interval=0.5, now=True) + @exception.wrap_exception + def pause(self, instance, callback): + raise exception.APIError("pause not supported for libvirt.") + + @exception.wrap_exception + def unpause(self, instance, callback): + raise exception.APIError("unpause not supported for libvirt.") + @exception.wrap_exception def rescue(self, instance): self.destroy(instance, False) @@ -316,10 +295,10 @@ class LibvirtConnection(object): state = self.get_info(instance['name'])['state'] db.instance_set_state(None, instance['id'], state) if state == power_state.RUNNING: - logging.debug('instance %s: rescued', instance['name']) + logging.debug(_('instance %s: rescued'), instance['name']) timer.stop() except Exception, exn: - logging.error('_wait_for_rescue failed: %s', exn) + logging.error(_('_wait_for_rescue failed: %s'), exn) db.instance_set_state(None, instance['id'], power_state.SHUTDOWN) @@ -344,7 +323,7 @@ class LibvirtConnection(object): NWFilterFirewall(self._conn).setup_nwfilters_for_instance(instance) self._create_image(instance, xml) self._conn.createXML(xml, 0) - logging.debug("instance %s: is running", instance['name']) + logging.debug(_("instance %s: is running"), instance['name']) timer = utils.LoopingCall(f=None) @@ -354,10 +333,10 @@ class LibvirtConnection(object): db.instance_set_state(context.get_admin_context(), instance['id'], state) if state == power_state.RUNNING: - logging.debug('instance %s: booted', instance['name']) + logging.debug(_('instance %s: booted'), instance['name']) timer.stop() except: - logging.exception('instance %s: failed to boot', + logging.exception(_('instance %s: failed to boot'), instance['name']) db.instance_set_state(context.get_admin_context(), instance['id'], @@ -372,7 +351,7 @@ class LibvirtConnection(object): virsh_output = virsh_output[0].strip() if virsh_output.startswith('/dev/'): - logging.info('cool, it\'s a device') + logging.info(_('cool, it\'s a device')) out, err = utils.execute("sudo dd if=%s iflag=nonblock" % virsh_output, check_exit_code=False) return out @@ -380,7 +359,7 @@ class LibvirtConnection(object): return '' def _append_to_file(self, data, fpath): - logging.info('data: %r, fpath: %r' % (data, fpath)) + logging.info(_('data: %r, fpath: %r') % (data, fpath)) fp = open(fpath, 'a+') fp.write(data) return fpath @@ -422,7 +401,7 @@ class LibvirtConnection(object): # TODO(termie): these are blocking calls, it would be great # if they weren't. - logging.info('instance %s: Creating image', inst['name']) + logging.info(_('instance %s: Creating image'), inst['name']) f = open(basepath('libvirt.xml'), 'w') f.write(libvirt_xml) f.close() @@ -441,18 +420,28 @@ class LibvirtConnection(object): if not os.path.exists(basepath('disk')): images.fetch(inst.image_id, basepath('disk-raw'), user, project) - if not os.path.exists(basepath('kernel')): - images.fetch(inst.kernel_id, basepath('kernel'), user, - project) - if not os.path.exists(basepath('ramdisk')): - images.fetch(inst.ramdisk_id, basepath('ramdisk'), user, - project) + + if inst['kernel_id']: + if not os.path.exists(basepath('kernel')): + images.fetch(inst['kernel_id'], basepath('kernel'), + user, project) + if inst['ramdisk_id']: + if not os.path.exists(basepath('ramdisk')): + images.fetch(inst['ramdisk_id'], basepath('ramdisk'), + user, project) def execute(cmd, process_input=None, check_exit_code=True): return utils.execute(cmd=cmd, process_input=process_input, check_exit_code=check_exit_code) + # For now, we assume that if we're not using a kernel, we're using a + # partitioned disk image where the target partition is the first + # partition + target_partition = None + if not inst['kernel_id']: + target_partition = "1" + key = str(inst['key_data']) net = None network_ref = db.network_get_by_instance(context.get_admin_context(), @@ -468,16 +457,24 @@ class LibvirtConnection(object): 'dns': network_ref['dns']} if key or net: if key: - logging.info('instance %s: injecting key into image %s', + logging.info(_('instance %s: injecting key into image %s'), inst['name'], inst.image_id) if net: - logging.info('instance %s: injecting net into image %s', - inst['name'], inst.image_id) - disk.inject_data(basepath('disk-raw'), key, net, - execute=execute) - - if os.path.exists(basepath('disk')): - utils.execute('rm -f %s' % basepath('disk')) + logging.info(_('instance %s: injecting net into image %s'), + inst['name'], inst.image_id) + try: + disk.inject_data(basepath('disk-raw'), key, net, + partition=target_partition, + execute=execute) + except Exception as e: + # This could be a windows image, or a vmdk format disk + logging.warn(_('instance %s: ignoring error injecting data' + ' into image %s (%s)'), + inst['name'], inst.image_id, e) + + if inst['kernel_id']: + if os.path.exists(basepath('disk')): + utils.execute('rm -f %s' % basepath('disk')) local_bytes = (instance_types.INSTANCE_TYPES[inst.instance_type] ['local_gb'] @@ -486,15 +483,21 @@ class LibvirtConnection(object): resize = True if inst['instance_type'] == 'm1.tiny' or prefix == 'rescue-': resize = False - disk.partition(basepath('disk-raw'), basepath('disk'), - local_bytes, resize, execute=execute) + + if inst['kernel_id']: + disk.partition(basepath('disk-raw'), basepath('disk'), + local_bytes, resize, execute=execute) + else: + os.rename(basepath('disk-raw'), basepath('disk')) + disk.extend(basepath('disk'), local_bytes, execute=execute) if FLAGS.libvirt_type == 'uml': utils.execute('sudo chown root %s' % basepath('disk')) def to_xml(self, instance, rescue=False): # TODO(termie): cache? - logging.debug('instance %s: starting toXML method', instance['name']) + logging.debug(_('instance %s: starting toXML method'), + instance['name']) network = db.project_get_network(context.get_admin_context(), instance['project_id']) # FIXME(vish): stick this in db @@ -523,20 +526,29 @@ class LibvirtConnection(object): 'mac_address': instance['mac_address'], 'ip_address': ip_address, 'dhcp_server': dhcp_server, - 'extra_params': extra_params} - if rescue: - libvirt_xml = self.rescue_xml % xml_info - else: - libvirt_xml = self.libvirt_xml % xml_info - logging.debug('instance %s: finished toXML method', instance['name']) + 'extra_params': extra_params, + 'rescue': rescue} + if not rescue: + if instance['kernel_id']: + xml_info['kernel'] = xml_info['basepath'] + "/kernel" + + if instance['ramdisk_id']: + xml_info['ramdisk'] = xml_info['basepath'] + "/ramdisk" + + xml_info['disk'] = xml_info['basepath'] + "/disk" - return libvirt_xml + xml = str(Template(self.libvirt_xml, searchList=[xml_info])) + logging.debug(_('instance %s: finished toXML method'), + instance['name']) + + return xml def get_info(self, instance_name): try: virt_dom = self._conn.lookupByName(instance_name) except: - raise exception.NotFound("Instance %s not found" % instance_name) + raise exception.NotFound(_("Instance %s not found") + % instance_name) (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info() return {'state': state, 'max_mem': max_mem, diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py index 012954394..ce2c68ce0 100644 --- a/nova/virt/xenapi/network_utils.py +++ b/nova/virt/xenapi/network_utils.py @@ -25,6 +25,7 @@ class NetworkHelper(): """ The class that wraps the helper methods together. """ + def __init__(self): return diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 2f5d78e75..badaaedc1 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -47,6 +47,7 @@ class VMHelper(): """ The class that wraps the helper methods together. """ + def __init__(self): return @@ -228,11 +229,7 @@ class VMHelper(): try: host = session.get_xenapi_host() host_ip = session.get_xenapi().host.get_record(host)["address"] - metrics = session.get_xenapi().VM_guest_metrics.get_record( - record["guest_metrics"]) - diags = { - "Kernel": metrics["os_version"]["uname"], - "Distro": metrics["os_version"]["name"]} + diags = {} xml = get_rrd(host_ip, record["uuid"]) if xml: rrd = minidom.parseString(xml) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 3034df9e1..3b00ce8bf 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -34,6 +34,7 @@ class VMOps(object): """ Management class for VM-related tasks """ + def __init__(self, session): global XenAPI if XenAPI is None: @@ -43,12 +44,16 @@ class VMOps(object): VMHelper.late_import() def list_instances(self): - """ List VM instances """ - return [self._session.get_xenapi().VM.get_name_label(vm) \ - for vm in self._session.get_xenapi().VM.get_all()] + """List VM instances""" + vms = [] + for vm in self._session.get_xenapi().VM.get_all(): + rec = self._session.get_xenapi().VM.get_record(vm) + if not rec["is_a_template"] and not rec["is_control_domain"]: + vms.append(rec["name_label"]) + return vms def spawn(self, instance): - """ Create VM instance """ + """Create VM instance""" vm = VMHelper.lookup(self._session, instance.name) if vm is not None: raise Exception('Attempted to create non-unique name %s' % @@ -80,16 +85,16 @@ class VMOps(object): vm_ref) def reboot(self, instance): - """ Reboot VM instance """ + """Reboot VM instance""" instance_name = instance.name vm = VMHelper.lookup(self._session, instance_name) if vm is None: raise Exception('instance not present %s' % instance_name) task = self._session.call_xenapi('Async.VM.clean_reboot', vm) - self._session.wait_for_task(task) + self._session.wait_for_task(instance.id, task) def destroy(self, instance): - """ Destroy VM instance """ + """Destroy VM instance""" vm = VMHelper.lookup(self._session, instance.name) if vm is None: # Don't complain, just return. This lets us clean up instances @@ -100,7 +105,7 @@ class VMOps(object): try: task = self._session.call_xenapi('Async.VM.hard_shutdown', vm) - self._session.wait_for_task(task) + self._session.wait_for_task(instance.id, task) except XenAPI.Failure, exc: logging.warn(exc) # Disk clean-up @@ -108,17 +113,43 @@ class VMOps(object): for vdi in vdis: try: task = self._session.call_xenapi('Async.VDI.destroy', vdi) - self._session.wait_for_task(task) + self._session.wait_for_task(instance.id, task) except XenAPI.Failure, exc: logging.warn(exc) try: task = self._session.call_xenapi('Async.VM.destroy', vm) - self._session.wait_for_task(task) + self._session.wait_for_task(instance.id, task) + except XenAPI.Failure, exc: + logging.warn(exc) + + def _wait_with_callback(self, instance_id, task, callback): + ret = None + try: + ret = self._session.wait_for_task(instance_id, task) except XenAPI.Failure, exc: logging.warn(exc) + callback(ret) + + def pause(self, instance, callback): + """Pause VM instance""" + instance_name = instance.name + vm = VMHelper.lookup(self._session, instance_name) + if vm is None: + raise Exception('instance not present %s' % instance_name) + task = self._session.call_xenapi('Async.VM.pause', vm) + self._wait_with_callback(instance.id, task, callback) + + def unpause(self, instance, callback): + """Unpause VM instance""" + instance_name = instance.name + vm = VMHelper.lookup(self._session, instance_name) + if vm is None: + raise Exception('instance not present %s' % instance_name) + task = self._session.call_xenapi('Async.VM.unpause', vm) + self._wait_with_callback(instance.id, task, callback) def get_info(self, instance_id): - """ Return data about VM instance """ + """Return data about VM instance""" vm = VMHelper.lookup_blocking(self._session, instance_id) if vm is None: raise Exception('instance not present %s' % instance_id) @@ -134,6 +165,6 @@ class VMOps(object): return VMHelper.compile_diagnostics(self._session, rec) def get_console_output(self, instance): - """ Return snapshot of console """ + """Return snapshot of console""" # TODO: implement this to fix pylint! return 'FAKE CONSOLE OUTPUT of instance' diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index a4c7a3861..1943ccab0 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -20,6 +20,7 @@ Management class for Storage-related functions (attach, detach, etc). class VolumeOps(object): + def __init__(self, session): self._session = session diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 6beb08f5e..146e2f153 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -54,6 +54,8 @@ import xmlrpclib from eventlet import event from eventlet import tpool +from nova import context +from nova import db from nova import utils from nova import flags from nova.virt.xenapi.vmops import VMOps @@ -93,38 +95,47 @@ def get_connection(_): username = FLAGS.xenapi_connection_username password = FLAGS.xenapi_connection_password if not url or password is None: - raise Exception('Must specify xenapi_connection_url, ' - 'xenapi_connection_username (optionally), and ' - 'xenapi_connection_password to use ' - 'connection_type=xenapi') + raise Exception(_('Must specify xenapi_connection_url, ' + 'xenapi_connection_username (optionally), and ' + 'xenapi_connection_password to use ' + 'connection_type=xenapi')) return XenAPIConnection(url, username, password) class XenAPIConnection(object): - """ A connection to XenServer or Xen Cloud Platform """ + """A connection to XenServer or Xen Cloud Platform""" + def __init__(self, url, user, pw): session = XenAPISession(url, user, pw) self._vmops = VMOps(session) self._volumeops = VolumeOps(session) def list_instances(self): - """ List VM instances """ + """List VM instances""" return self._vmops.list_instances() def spawn(self, instance): - """ Create VM instance """ + """Create VM instance""" self._vmops.spawn(instance) def reboot(self, instance): - """ Reboot VM instance """ + """Reboot VM instance""" self._vmops.reboot(instance) def destroy(self, instance): - """ Destroy VM instance """ + """Destroy VM instance""" self._vmops.destroy(instance) + def pause(self, instance, callback): + """Pause VM instance""" + self._vmops.pause(instance, callback) + + def unpause(self, instance, callback): + """Unpause paused VM instance""" + self._vmops.unpause(instance, callback) + def get_info(self, instance_id): - """ Return data about VM instance """ + """Return data about VM instance""" return self._vmops.get_info(instance_id) def get_diagnostics(self, instance_id): @@ -132,32 +143,33 @@ class XenAPIConnection(object): return self._vmops.get_diagnostics(instance_id) def get_console_output(self, instance): - """ Return snapshot of console """ + """Return snapshot of console""" return self._vmops.get_console_output(instance) def attach_volume(self, instance_name, device_path, mountpoint): - """ Attach volume storage to VM instance """ + """Attach volume storage to VM instance""" return self._volumeops.attach_volume(instance_name, device_path, mountpoint) def detach_volume(self, instance_name, mountpoint): - """ Detach volume storage to VM instance """ + """Detach volume storage to VM instance""" return self._volumeops.detach_volume(instance_name, mountpoint) class XenAPISession(object): - """ The session to invoke XenAPI SDK calls """ + """The session to invoke XenAPI SDK calls""" + def __init__(self, url, user, pw): self._session = XenAPI.Session(url) self._session.login_with_password(user, pw) def get_xenapi(self): - """ Return the xenapi object """ + """Return the xenapi object""" return self._session.xenapi def get_xenapi_host(self): - """ Return the xenapi host """ + """Return the xenapi host""" return self._session.xenapi.session.get_this_host(self._session.handle) def call_xenapi(self, method, *args): @@ -173,46 +185,57 @@ class XenAPISession(object): self._session.xenapi.Async.host.call_plugin, self.get_xenapi_host(), plugin, fn, args) - def wait_for_task(self, task): + def wait_for_task(self, instance_id, task): """Return a Deferred that will give the result of the given task. The task is polled until it completes.""" done = event.Event() - loop = utils.LoopingCall(self._poll_task, task, done) + loop = utils.LoopingCall(self._poll_task, instance_id, task, done) loop.start(FLAGS.xenapi_task_poll_interval, now=True) rv = done.wait() loop.stop() return rv - def _poll_task(self, task, done): + def _poll_task(self, instance_id, task, done): """Poll the given XenAPI task, and fire the given Deferred if we get a result.""" try: - #logging.debug('Polling task %s...', task) + name = self._session.xenapi.task.get_name_label(task) status = self._session.xenapi.task.get_status(task) - if status == 'pending': + action = dict( + instance_id=int(instance_id), + action=name, + error=None) + if status == "pending": return - elif status == 'success': + elif status == "success": result = self._session.xenapi.task.get_result(task) - logging.info('Task %s status: success. %s', task, result) + logging.info(_("Task [%s] %s status: success %s") % ( + name, + task, + result)) done.send(_parse_xmlrpc_value(result)) else: error_info = self._session.xenapi.task.get_error_info(task) - logging.warn('Task %s status: %s. %s', task, status, - error_info) + action["error"] = str(error_info) + logging.warn(_("Task [%s] %s status: %s %s") % ( + name, + task, + status, + error_info)) done.send_exception(XenAPI.Failure(error_info)) - #logging.debug('Polling task %s done.', task) + db.instance_action_create(context.get_admin_context(), action) except XenAPI.Failure, exc: logging.warn(exc) done.send_exception(*sys.exc_info()) def _unwrap_plugin_exceptions(func, *args, **kwargs): - """ Parse exception details """ + """Parse exception details""" try: return func(*args, **kwargs) except XenAPI.Failure, exc: - logging.debug("Got exception: %s", exc) + logging.debug(_("Got exception: %s"), exc) if (len(exc.details) == 4 and exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and exc.details[2] == 'Failure'): @@ -225,7 +248,7 @@ def _unwrap_plugin_exceptions(func, *args, **kwargs): else: raise except xmlrpclib.ProtocolError, exc: - logging.debug("Got exception: %s", exc) + logging.debug(_("Got exception: %s"), exc) raise -- cgit From 1509d51c7e9be04f8ca7aa9fb50d7b06b20a4e71 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 22 Dec 2010 21:41:40 +0000 Subject: remove extra files that slipped in --- nova/virt/libvirt.uml.xml.template.THIS | 27 --------------------------- 1 file changed, 27 deletions(-) delete mode 100644 nova/virt/libvirt.uml.xml.template.THIS (limited to 'nova/virt') diff --git a/nova/virt/libvirt.uml.xml.template.THIS b/nova/virt/libvirt.uml.xml.template.THIS deleted file mode 100644 index 506f2ef72..000000000 --- a/nova/virt/libvirt.uml.xml.template.THIS +++ /dev/null @@ -1,27 +0,0 @@ - - %(name)s - %(memory_kb)s - - %(type)s - /usr/bin/linux - /dev/ubda1 - - - - - - - - - - - - - %(extra_params)s - - - - - - - -- cgit From a7e5a4a39b93b32974ca82b77391368c4f01cdd8 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 22 Dec 2010 22:54:43 +0000 Subject: removed extra files --- nova/virt/libvirt.rescue.qemu.xml.template.THIS | 38 ------------------------- nova/virt/libvirt.rescue.uml.xml.template.THIS | 31 -------------------- 2 files changed, 69 deletions(-) delete mode 100644 nova/virt/libvirt.rescue.qemu.xml.template.THIS delete mode 100644 nova/virt/libvirt.rescue.uml.xml.template.THIS (limited to 'nova/virt') diff --git a/nova/virt/libvirt.rescue.qemu.xml.template.THIS b/nova/virt/libvirt.rescue.qemu.xml.template.THIS deleted file mode 100644 index a3b88106c..000000000 --- a/nova/virt/libvirt.rescue.qemu.xml.template.THIS +++ /dev/null @@ -1,38 +0,0 @@ - - %(name)s - - hvm - %(basepath)s/rescue-kernel - %(basepath)s/rescue-ramdisk - root=/dev/vda1 console=ttyS0 - - - - - %(memory_kb)s - %(vcpus)s - - - - - - - - - - - - - - - - - %(extra_params)s - - - - - - - - diff --git a/nova/virt/libvirt.rescue.uml.xml.template.THIS b/nova/virt/libvirt.rescue.uml.xml.template.THIS deleted file mode 100644 index a254692d4..000000000 --- a/nova/virt/libvirt.rescue.uml.xml.template.THIS +++ /dev/null @@ -1,31 +0,0 @@ - - %(name)s - %(memory_kb)s - - %(type)s - /usr/bin/linux - /dev/ubda1 - - - - - - - - - - - - - - - - - %(extra_params)s - - - - - - - -- cgit