diff options
| author | Ryan Lane <laner@controller> | 2010-12-22 23:47:31 +0000 |
|---|---|---|
| committer | Ryan Lane <laner@controller> | 2010-12-22 23:47:31 +0000 |
| commit | 5012ccb22724c2f7fb0fcdcb7b146d5d5e61337d (patch) | |
| tree | 3d44d084647c86c41cc189d3a8bf8bcdedba3ef7 /nova/virt | |
| parent | e893be0a8d32cf1eb2c91187b81a6febf90e5b7c (diff) | |
| parent | 3f37287c1adfe35756c58938ea8d826181bad2e2 (diff) | |
Merge from trunk, and resolve conflict with nova/auth/ldapdriver.py
Diffstat (limited to 'nova/virt')
| -rw-r--r-- | nova/virt/connection.py | 2 | ||||
| -rw-r--r-- | nova/virt/fake.py | 32 | ||||
| -rw-r--r-- | nova/virt/images.py | 11 | ||||
| -rw-r--r-- | nova/virt/libvirt.qemu.xml.template | 33 | ||||
| -rw-r--r-- | nova/virt/libvirt.rescue.qemu.xml.template.THIS (renamed from nova/virt/libvirt.rescue.qemu.xml.template) | 1 | ||||
| -rw-r--r-- | nova/virt/libvirt.rescue.uml.xml.template | 26 | ||||
| -rw-r--r-- | nova/virt/libvirt.rescue.uml.xml.template.THIS (renamed from nova/virt/libvirt.uml.xml.template) | 7 | ||||
| -rw-r--r-- | nova/virt/libvirt.rescue.xen.xml.template | 34 | ||||
| -rw-r--r-- | nova/virt/libvirt.xen.xml.template | 30 | ||||
| -rw-r--r-- | nova/virt/libvirt.xml.template | 79 | ||||
| -rw-r--r-- | nova/virt/libvirt_conn.py | 399 | ||||
| -rw-r--r-- | nova/virt/xenapi/network_utils.py | 11 | ||||
| -rw-r--r-- | nova/virt/xenapi/vm_utils.py | 75 | ||||
| -rw-r--r-- | nova/virt/xenapi/vmops.py | 122 | ||||
| -rw-r--r-- | nova/virt/xenapi/volumeops.py | 1 | ||||
| -rw-r--r-- | nova/virt/xenapi_conn.py | 124 |
16 files changed, 541 insertions, 446 deletions
diff --git a/nova/virt/connection.py b/nova/virt/connection.py index c40bb4bb4..61e99944e 100644 --- a/nova/virt/connection.py +++ b/nova/virt/connection.py @@ -66,6 +66,6 @@ def get_connection(read_only=False): raise Exception('Unknown connection type "%s"' % t) if conn is None: - logging.error('Failed to open connection to the hypervisor') + logging.error(_('Failed to open connection to the hypervisor')) sys.exit(1) return conn diff --git a/nova/virt/fake.py b/nova/virt/fake.py index f855523d3..238acf798 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -25,8 +25,6 @@ semantics of real hypervisor connections. """ -from twisted.internet import defer - from nova import exception from nova.compute import power_state @@ -78,6 +76,12 @@ class FakeConnection(object): cls._instance = cls() return cls._instance + def init_host(self): + """ + Initialize anything that is necessary for the driver to function + """ + return + def list_instances(self): """ Return the names of all the instances known to the virtualization @@ -107,7 +111,6 @@ class FakeConnection(object): fake_instance = FakeInstance() self.instances[instance.name] = fake_instance fake_instance._state = power_state.RUNNING - return defer.succeed(None) def reboot(self, instance): """ @@ -119,19 +122,31 @@ class FakeConnection(object): The work will be done asynchronously. This function returns a Deferred that allows the caller to detect when it is complete. """ - return defer.succeed(None) + pass def rescue(self, instance): """ Rescue the specified instance. """ - return defer.succeed(None) + pass def unrescue(self, instance): """ Unrescue the specified instance. """ - return defer.succeed(None) + pass + + def pause(self, instance, callback): + """ + Pause the specified instance. + """ + pass + + def unpause(self, instance, callback): + """ + Unpause the specified instance. + """ + pass def destroy(self, instance): """ @@ -144,7 +159,6 @@ class FakeConnection(object): Deferred that allows the caller to detect when it is complete. """ del self.instances[instance.name] - return defer.succeed(None) def attach_volume(self, instance_name, device_path, mountpoint): """Attach the disk at device_path to the instance at mountpoint""" @@ -167,7 +181,8 @@ class FakeConnection(object): knowledge of the instance """ if instance_name not in self.instances: - raise exception.NotFound("Instance %s Not Found" % instance_name) + raise exception.NotFound(_("Instance %s Not Found") + % instance_name) i = self.instances[instance_name] return {'state': i._state, 'max_mem': 0, @@ -247,5 +262,6 @@ class FakeConnection(object): class FakeInstance(object): + def __init__(self): self._state = power_state.NOSTATE diff --git a/nova/virt/images.py b/nova/virt/images.py index 981aa5cf3..1c9b2e093 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -26,7 +26,7 @@ import time import urlparse from nova import flags -from nova import process +from nova import utils from nova.auth import manager from nova.auth import signer from nova.objectstore import image @@ -50,7 +50,7 @@ def _fetch_s3_image(image, path, user, project): # This should probably move somewhere else, like e.g. a download_as # method on User objects and at the same time get rewritten to use - # twisted web client. + # a web client. headers = {} headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) @@ -63,15 +63,16 @@ def _fetch_s3_image(image, path, user, project): cmd = ['/usr/bin/curl', '--fail', '--silent', url] for (k, v) in headers.iteritems(): - cmd += ['-H', '%s: %s' % (k, v)] + cmd += ['-H', '"%s: %s"' % (k, v)] cmd += ['-o', path] - return process.SharedPool().execute(executable=cmd[0], args=cmd[1:]) + cmd_out = ' '.join(cmd) + return utils.execute(cmd_out) def _fetch_local_image(image, path, user, project): source = _image_path('%s/image' % image) - return process.simple_execute('cp %s %s' % (source, path)) + return utils.execute('cp %s %s' % (source, path)) def _image_path(path): diff --git a/nova/virt/libvirt.qemu.xml.template b/nova/virt/libvirt.qemu.xml.template deleted file mode 100644 index 2538b1ade..000000000 --- a/nova/virt/libvirt.qemu.xml.template +++ /dev/null @@ -1,33 +0,0 @@ -<domain type='%(type)s'> - <name>%(name)s</name> - <os> - <type>hvm</type> - <kernel>%(basepath)s/kernel</kernel> - <initrd>%(basepath)s/ramdisk</initrd> - <cmdline>root=/dev/vda1 console=ttyS0</cmdline> - </os> - <features> - <acpi/> - </features> - <memory>%(memory_kb)s</memory> - <vcpu>%(vcpus)s</vcpu> - <devices> - <disk type='file'> - <source file='%(basepath)s/disk'/> - <target dev='vda' bus='virtio'/> - </disk> - <interface type='bridge'> - <source bridge='%(bridge_name)s'/> - <mac address='%(mac_address)s'/> - <!-- <model type='virtio'/> CANT RUN virtio network right now --> - <filterref filter="nova-instance-%(name)s"> - <parameter name="IP" value="%(ip_address)s" /> - <parameter name="DHCPSERVER" value="%(dhcp_server)s" /> - </filterref> - </interface> - <serial type="file"> - <source path='%(basepath)s/console.log'/> - <target port='1'/> - </serial> - </devices> -</domain> diff --git a/nova/virt/libvirt.rescue.qemu.xml.template b/nova/virt/libvirt.rescue.qemu.xml.template.THIS index c0ffbdcee..a3b88106c 100644 --- a/nova/virt/libvirt.rescue.qemu.xml.template +++ b/nova/virt/libvirt.rescue.qemu.xml.template.THIS @@ -27,6 +27,7 @@ <filterref filter="nova-instance-%(name)s"> <parameter name="IP" value="%(ip_address)s" /> <parameter name="DHCPSERVER" value="%(dhcp_server)s" /> + %(extra_params)s </filterref> </interface> <serial type="file"> diff --git a/nova/virt/libvirt.rescue.uml.xml.template b/nova/virt/libvirt.rescue.uml.xml.template deleted file mode 100644 index 836f47532..000000000 --- a/nova/virt/libvirt.rescue.uml.xml.template +++ /dev/null @@ -1,26 +0,0 @@ -<domain type='%(type)s'> - <name>%(name)s</name> - <memory>%(memory_kb)s</memory> - <os> - <type>%(type)s</type> - <kernel>/usr/bin/linux</kernel> - <root>/dev/ubda1</root> - </os> - <devices> - <disk type='file'> - <source file='%(basepath)s/rescue-disk'/> - <target dev='ubd0' bus='uml'/> - </disk> - <disk type='file'> - <source file='%(basepath)s/disk'/> - <target dev='ubd1' bus='uml'/> - </disk> - <interface type='bridge'> - <source bridge='%(bridge_name)s'/> - <mac address='%(mac_address)s'/> - </interface> - <console type="file"> - <source path='%(basepath)s/console.log'/> - </console> - </devices> -</domain> diff --git a/nova/virt/libvirt.uml.xml.template b/nova/virt/libvirt.rescue.uml.xml.template.THIS index bb8b47911..a254692d4 100644 --- a/nova/virt/libvirt.uml.xml.template +++ b/nova/virt/libvirt.rescue.uml.xml.template.THIS @@ -8,15 +8,20 @@ </os> <devices> <disk type='file'> - <source file='%(basepath)s/disk'/> + <source file='%(basepath)s/rescue-disk'/> <target dev='ubd0' bus='uml'/> </disk> + <disk type='file'> + <source file='%(basepath)s/disk'/> + <target dev='ubd1' bus='uml'/> + </disk> <interface type='bridge'> <source bridge='%(bridge_name)s'/> <mac address='%(mac_address)s'/> <filterref filter="nova-instance-%(name)s"> <parameter name="IP" value="%(ip_address)s" /> <parameter name="DHCPSERVER" value="%(dhcp_server)s" /> + %(extra_params)s </filterref> </interface> <console type="file"> diff --git a/nova/virt/libvirt.rescue.xen.xml.template b/nova/virt/libvirt.rescue.xen.xml.template deleted file mode 100644 index 3b8d27237..000000000 --- a/nova/virt/libvirt.rescue.xen.xml.template +++ /dev/null @@ -1,34 +0,0 @@ -<domain type='%(type)s'> - <name>%(name)s</name> - <os> - <type>linux</type> - <kernel>%(basepath)s/kernel</kernel> - <initrd>%(basepath)s/ramdisk</initrd> - <root>/dev/xvda1</root> - <cmdline>ro</cmdline> - </os> - <features> - <acpi/> - </features> - <memory>%(memory_kb)s</memory> - <vcpu>%(vcpus)s</vcpu> - <devices> - <disk type='file'> - <source file='%(basepath)s/rescue-disk'/> - <target dev='sda' /> - </disk> - <disk type='file'> - <source file='%(basepath)s/disk'/> - <target dev='sdb' /> - </disk> - <interface type='bridge'> - <source bridge='%(bridge_name)s'/> - <mac address='%(mac_address)s'/> - </interface> - <console type="file"> - <source path='%(basepath)s/console.log'/> - <target port='1'/> - </console> - </devices> -</domain> - diff --git a/nova/virt/libvirt.xen.xml.template b/nova/virt/libvirt.xen.xml.template deleted file mode 100644 index 9677902c6..000000000 --- a/nova/virt/libvirt.xen.xml.template +++ /dev/null @@ -1,30 +0,0 @@ -<domain type='%(type)s'> - <name>%(name)s</name> - <os> - <type>linux</type> - <kernel>%(basepath)s/kernel</kernel> - <initrd>%(basepath)s/ramdisk</initrd> - <root>/dev/xvda1</root> - <cmdline>ro</cmdline> - </os> - <features> - <acpi/> - </features> - <memory>%(memory_kb)s</memory> - <vcpu>%(vcpus)s</vcpu> - <devices> - <disk type='file'> - <source file='%(basepath)s/disk'/> - <target dev='sda' /> - </disk> - <interface type='bridge'> - <source bridge='%(bridge_name)s'/> - <mac address='%(mac_address)s'/> - </interface> - <console type="file"> - <source path='%(basepath)s/console.log'/> - <target port='1'/> - </console> - </devices> -</domain> - diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template new file mode 100644 index 000000000..3fb2243da --- /dev/null +++ b/nova/virt/libvirt.xml.template @@ -0,0 +1,79 @@ +<domain type='${type}'> + <name>${name}</name> + <memory>${memory_kb}</memory> + <os> +#if $type == 'uml' + #set $disk_prefix = 'ubd' + #set $disk_bus = 'uml' + <type>uml</type> + <kernel>/usr/bin/linux</kernel> + <root>/dev/ubda1</root> +#else + #if $type == 'xen' + #set $disk_prefix = 'sd' + #set $disk_bus = 'scsi' + <type>linux</type> + <root>/dev/xvda1</root> + #else + #set $disk_prefix = 'vd' + #set $disk_bus = 'virtio' + <type>hvm</type> + #end if + #if $getVar('rescue', False) + <kernel>${basepath}/rescue-kernel</kernel> + <initrd>${basepath}/rescue-ramdisk</initrd> + #else + #if $getVar('kernel', None) + <kernel>${kernel}</kernel> + #if $type == 'xen' + <cmdline>ro</cmdline> + #else + <cmdline>root=/dev/vda1 console=ttyS0</cmdline> + #end if + #if $getVar('ramdisk', None) + <initrd>${ramdisk}</initrd> + #end if + #else + <boot dev="hd" /> + #end if + #end if +#end if + </os> + <features> + <acpi/> + </features> + <vcpu>${vcpus}</vcpu> + <devices> +#if $getVar('rescue', False) + <disk type='file'> + <source file='${basepath}/rescue-disk'/> + <target dev='${disk_prefix}a' bus='${disk_bus}'/> + </disk> + <disk type='file'> + <source file='${basepath}/disk'/> + <target dev='${disk_prefix}b' bus='${disk_bus}'/> + </disk> +#else + <disk type='file'> + <source file='${basepath}/disk'/> + <target dev='${disk_prefix}a' bus='${disk_bus}'/> + </disk> +#end if + <interface type='bridge'> + <source bridge='${bridge_name}'/> + <mac address='${mac_address}'/> + <!-- <model type='virtio'/> CANT RUN virtio network right now --> + <filterref filter="nova-instance-${name}"> + <parameter name="IP" value="${ip_address}" /> + <parameter name="DHCPSERVER" value="${dhcp_server}" /> +#if $getVar('extra_params', False) + ${extra_params} +#end if + </filterref> + </interface> + <serial type="file"> + <source path='${basepath}/console.log'/> + <target port='1'/> + </serial> + </devices> +</domain> diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 18085089f..8d3a6a261 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -27,12 +27,7 @@ Supports KVM, QEMU, UML, and XEN. :libvirt_type: Libvirt domain type. Can be kvm, qemu, uml, xen (default: kvm). :libvirt_uri: Override for the default libvirt URI (depends on libvirt_type). -:libvirt_xml_template: Libvirt XML Template (QEmu/KVM). -:libvirt_xen_xml_template: Libvirt XML Template (Xen). -:libvirt_uml_xml_template: Libvirt XML Template (User Mode Linux). -:libvirt_rescue_xml_template: XML template for rescue mode (KVM & QEMU). -:libvirt_rescue_xen_xml_template: XML templage for rescue mode (XEN). -:libvirt_rescue_uml_xml_template: XML template for rescue mode (UML). +:libvirt_xml_template: Libvirt XML Template. :rescue_image_id: Rescue ami image (default: ami-rescue). :rescue_kernel_id: Rescue aki image (default: aki-rescue). :rescue_ramdisk_id: Rescue ari image (default: ari-rescue). @@ -45,16 +40,16 @@ import logging import os import shutil +from eventlet import greenthread +from eventlet import event +from eventlet import tpool + import IPy -from twisted.internet import defer -from twisted.internet import task -from twisted.internet import threads from nova import context from nova import db from nova import exception from nova import flags -from nova import process from nova import utils #from nova.api import context from nova.auth import manager @@ -63,36 +58,20 @@ from nova.compute import instance_types from nova.compute import power_state from nova.virt import images +from Cheetah.Template import Template + libvirt = None libxml2 = None FLAGS = flags.FLAGS -flags.DEFINE_string('libvirt_rescue_xml_template', - utils.abspath('virt/libvirt.rescue.qemu.xml.template'), - 'Libvirt RESCUE XML Template for QEmu/KVM') -flags.DEFINE_string('libvirt_rescue_xen_xml_template', - utils.abspath('virt/libvirt.rescue.xen.xml.template'), - 'Libvirt RESCUE XML Template for xen') -flags.DEFINE_string('libvirt_rescue_uml_xml_template', - utils.abspath('virt/libvirt.rescue.uml.xml.template'), - 'Libvirt RESCUE XML Template for user-mode-linux') # TODO(vish): These flags should probably go into a shared location flags.DEFINE_string('rescue_image_id', 'ami-rescue', 'Rescue ami image') flags.DEFINE_string('rescue_kernel_id', 'aki-rescue', 'Rescue aki image') flags.DEFINE_string('rescue_ramdisk_id', 'ari-rescue', 'Rescue ari image') flags.DEFINE_string('libvirt_xml_template', - utils.abspath('virt/libvirt.qemu.xml.template'), - 'Libvirt XML Template for QEmu/KVM') -flags.DEFINE_string('libvirt_xen_xml_template', - utils.abspath('virt/libvirt.xen.xml.template'), - 'Libvirt XML Template for Xen') -flags.DEFINE_string('libvirt_uml_xml_template', - utils.abspath('virt/libvirt.uml.xml.template'), - 'Libvirt XML Template for user-mode-linux') -flags.DEFINE_string('injected_network_template', - utils.abspath('virt/interfaces.template'), - 'Template file for injected network') + utils.abspath('virt/libvirt.xml.template'), + 'Libvirt XML Template') flags.DEFINE_string('libvirt_type', 'kvm', 'Libvirt domain type (valid options are: ' @@ -118,21 +97,27 @@ def get_connection(read_only): return LibvirtConnection(read_only) +def _get_net_and_mask(cidr): + net = IPy.IP(cidr) + return str(net.net()), str(net.netmask()) + + class LibvirtConnection(object): + def __init__(self, read_only): - (self.libvirt_uri, - template_file, - rescue_file) = self.get_uri_and_templates() + self.libvirt_uri = self.get_uri() - self.libvirt_xml = open(template_file).read() - self.rescue_xml = open(rescue_file).read() + self.libvirt_xml = open(FLAGS.libvirt_xml_template).read() self._wrapped_conn = None self.read_only = read_only + def init_host(self): + NWFilterFirewall(self._conn).setup_base_nwfilters() + @property def _conn(self): if not self._wrapped_conn or not self._test_connection(): - logging.debug('Connecting to libvirt: %s' % self.libvirt_uri) + logging.debug(_('Connecting to libvirt: %s') % self.libvirt_uri) self._wrapped_conn = self._connect(self.libvirt_uri, self.read_only) return self._wrapped_conn @@ -144,24 +129,18 @@ class LibvirtConnection(object): except libvirt.libvirtError as e: if e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and \ e.get_error_domain() == libvirt.VIR_FROM_REMOTE: - logging.debug('Connection to libvirt broke') + logging.debug(_('Connection to libvirt broke')) return False raise - def get_uri_and_templates(self): + def get_uri(self): if FLAGS.libvirt_type == 'uml': uri = FLAGS.libvirt_uri or 'uml:///system' - template_file = FLAGS.libvirt_uml_xml_template - rescue_file = FLAGS.libvirt_rescue_uml_xml_template elif FLAGS.libvirt_type == 'xen': uri = FLAGS.libvirt_uri or 'xen:///' - template_file = FLAGS.libvirt_xen_xml_template - rescue_file = FLAGS.libvirt_rescue_xen_xml_template else: uri = FLAGS.libvirt_uri or 'qemu:///system' - template_file = FLAGS.libvirt_xml_template - rescue_file = FLAGS.libvirt_rescue_xml_template - return uri, template_file, rescue_file + return uri def _connect(self, uri, read_only): auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], @@ -184,14 +163,12 @@ class LibvirtConnection(object): except Exception as _err: pass # If the instance is already terminated, we're still happy - d = defer.Deferred() - if cleanup: - d.addCallback(lambda _: self._cleanup(instance)) - # FIXME: What does this comment mean? - # TODO(termie): short-circuit me for tests - # WE'LL save this for when we do shutdown, + + done = event.Event() + + # We'll save this for when we do shutdown, # instead of destroy - but destroy returns immediately - timer = task.LoopingCall(f=None) + timer = utils.LoopingCall(f=None) def _wait_for_shutdown(): try: @@ -200,26 +177,34 @@ class LibvirtConnection(object): instance['id'], state) if state == power_state.SHUTDOWN: timer.stop() - d.callback(None) except Exception: db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) timer.stop() - d.callback(None) timer.f = _wait_for_shutdown - timer.start(interval=0.5, now=True) - return d + timer_done = timer.start(interval=0.5, now=True) + + # NOTE(termie): this is strictly superfluous (we could put the + # cleanup code in the timer), but this emulates the + # previous model so I am keeping it around until + # everything has been vetted a bit + def _wait_for_timer(): + timer_done.wait() + self._cleanup(instance) + done.send() + + greenthread.spawn(_wait_for_timer) + return done def _cleanup(self, instance): target = os.path.join(FLAGS.instances_path, instance['name']) - logging.info('instance %s: deleting instance files %s', + logging.info(_('instance %s: deleting instance files %s'), instance['name'], target) if os.path.exists(target): shutil.rmtree(target) - @defer.inlineCallbacks @exception.wrap_exception def attach_volume(self, instance_name, device_path, mountpoint): virt_dom = self._conn.lookupByName(instance_name) @@ -230,7 +215,6 @@ class LibvirtConnection(object): <target dev='%s' bus='virtio'/> </disk>""" % (device_path, mount_device) virt_dom.attachDevice(xml) - yield def _get_disk_xml(self, xml, device): """Returns the xml for the disk mounted at device""" @@ -252,26 +236,21 @@ class LibvirtConnection(object): if doc != None: doc.freeDoc() - @defer.inlineCallbacks @exception.wrap_exception def detach_volume(self, instance_name, mountpoint): virt_dom = self._conn.lookupByName(instance_name) mount_device = mountpoint.rpartition("/")[2] xml = self._get_disk_xml(virt_dom.XMLDesc(0), mount_device) if not xml: - raise exception.NotFound("No disk at %s" % mount_device) + raise exception.NotFound(_("No disk at %s") % mount_device) virt_dom.detachDevice(xml) - yield - @defer.inlineCallbacks @exception.wrap_exception def reboot(self, instance): - yield self.destroy(instance, False) + self.destroy(instance, False) xml = self.to_xml(instance) - yield self._conn.createXML(xml, 0) - - d = defer.Deferred() - timer = task.LoopingCall(f=None) + self._conn.createXML(xml, 0) + timer = utils.LoopingCall(f=None) def _wait_for_reboot(): try: @@ -279,64 +258,62 @@ class LibvirtConnection(object): db.instance_set_state(context.get_admin_context(), instance['id'], state) if state == power_state.RUNNING: - logging.debug('instance %s: rebooted', instance['name']) + logging.debug(_('instance %s: rebooted'), instance['name']) timer.stop() - d.callback(None) except Exception, exn: - logging.error('_wait_for_reboot failed: %s', exn) + logging.error(_('_wait_for_reboot failed: %s'), exn) db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) timer.stop() - d.callback(None) timer.f = _wait_for_reboot - timer.start(interval=0.5, now=True) - yield d + return timer.start(interval=0.5, now=True) + + @exception.wrap_exception + def pause(self, instance, callback): + raise exception.APIError("pause not supported for libvirt.") + + @exception.wrap_exception + def unpause(self, instance, callback): + raise exception.APIError("unpause not supported for libvirt.") - @defer.inlineCallbacks @exception.wrap_exception def rescue(self, instance): - yield self.destroy(instance, False) + self.destroy(instance, False) xml = self.to_xml(instance, rescue=True) rescue_images = {'image_id': FLAGS.rescue_image_id, 'kernel_id': FLAGS.rescue_kernel_id, 'ramdisk_id': FLAGS.rescue_ramdisk_id} - yield self._create_image(instance, xml, 'rescue-', rescue_images) - yield self._conn.createXML(xml, 0) + self._create_image(instance, xml, 'rescue-', rescue_images) + self._conn.createXML(xml, 0) - d = defer.Deferred() - timer = task.LoopingCall(f=None) + timer = utils.LoopingCall(f=None) def _wait_for_rescue(): try: state = self.get_info(instance['name'])['state'] db.instance_set_state(None, instance['id'], state) if state == power_state.RUNNING: - logging.debug('instance %s: rescued', instance['name']) + logging.debug(_('instance %s: rescued'), instance['name']) timer.stop() - d.callback(None) except Exception, exn: - logging.error('_wait_for_rescue failed: %s', exn) + logging.error(_('_wait_for_rescue failed: %s'), exn) db.instance_set_state(None, instance['id'], power_state.SHUTDOWN) timer.stop() - d.callback(None) timer.f = _wait_for_rescue - timer.start(interval=0.5, now=True) - yield d + return timer.start(interval=0.5, now=True) - @defer.inlineCallbacks @exception.wrap_exception def unrescue(self, instance): # NOTE(vish): Because reboot destroys and recreates an instance using # the normal xml file, we can just call reboot here - yield self.reboot(instance) + self.reboot(instance) - @defer.inlineCallbacks @exception.wrap_exception def spawn(self, instance): xml = self.to_xml(instance) @@ -344,14 +321,12 @@ class LibvirtConnection(object): instance['id'], power_state.NOSTATE, 'launching') - yield NWFilterFirewall(self._conn).\ - setup_nwfilters_for_instance(instance) - yield self._create_image(instance, xml) - yield self._conn.createXML(xml, 0) - logging.debug("instance %s: is running", instance['name']) + NWFilterFirewall(self._conn).setup_nwfilters_for_instance(instance) + self._create_image(instance, xml) + self._conn.createXML(xml, 0) + logging.debug(_("instance %s: is running"), instance['name']) - local_d = defer.Deferred() - timer = task.LoopingCall(f=None) + timer = utils.LoopingCall(f=None) def _wait_for_boot(): try: @@ -359,36 +334,33 @@ class LibvirtConnection(object): db.instance_set_state(context.get_admin_context(), instance['id'], state) if state == power_state.RUNNING: - logging.debug('instance %s: booted', instance['name']) + logging.debug(_('instance %s: booted'), instance['name']) timer.stop() - local_d.callback(None) except: - logging.exception('instance %s: failed to boot', + logging.exception(_('instance %s: failed to boot'), instance['name']) db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) timer.stop() - local_d.callback(None) + timer.f = _wait_for_boot - timer.start(interval=0.5, now=True) - yield local_d + return timer.start(interval=0.5, now=True) def _flush_xen_console(self, virsh_output): logging.info('virsh said: %r' % (virsh_output,)) virsh_output = virsh_output[0].strip() if virsh_output.startswith('/dev/'): - logging.info('cool, it\'s a device') - d = process.simple_execute("sudo dd if=%s iflag=nonblock" % - virsh_output, check_exit_code=False) - d.addCallback(lambda r: r[0]) - return d + logging.info(_('cool, it\'s a device')) + out, err = utils.execute("sudo dd if=%s iflag=nonblock" % + virsh_output, check_exit_code=False) + return out else: return '' def _append_to_file(self, data, fpath): - logging.info('data: %r, fpath: %r' % (data, fpath)) + logging.info(_('data: %r, fpath: %r') % (data, fpath)) fp = open(fpath, 'a+') fp.write(data) return fpath @@ -403,21 +375,20 @@ class LibvirtConnection(object): def get_console_output(self, instance): console_log = os.path.join(FLAGS.instances_path, instance['name'], 'console.log') - d = process.simple_execute('sudo chown %d %s' % (os.getuid(), - console_log)) + + utils.execute('sudo chown %d %s' % (os.getuid(), console_log)) + if FLAGS.libvirt_type == 'xen': - # Xen is spethial - d.addCallback(lambda _: - process.simple_execute("virsh ttyconsole %s" % - instance['name'])) - d.addCallback(self._flush_xen_console) - d.addCallback(self._append_to_file, console_log) + # Xen is special + virsh_output = utils.execute("virsh ttyconsole %s" % + instance['name']) + data = self._flush_xen_console(virsh_output) + fpath = self._append_to_file(data, console_log) else: - d.addCallback(lambda _: defer.succeed(console_log)) - d.addCallback(self._dump_file) - return d + fpath = console_log + + return self._dump_file(fpath) - @defer.inlineCallbacks def _create_image(self, inst, libvirt_xml, prefix='', disk_images=None): # syntactic nicety basepath = lambda fname = '', prefix = prefix: os.path.join( @@ -426,12 +397,12 @@ class LibvirtConnection(object): prefix + fname) # ensure directories exist and are writable - yield process.simple_execute('mkdir -p %s' % basepath(prefix='')) - yield process.simple_execute('chmod 0777 %s' % basepath(prefix='')) + utils.execute('mkdir -p %s' % basepath(prefix='')) + utils.execute('chmod 0777 %s' % basepath(prefix='')) # TODO(termie): these are blocking calls, it would be great # if they weren't. - logging.info('instance %s: Creating image', inst['name']) + logging.info(_('instance %s: Creating image'), inst['name']) f = open(basepath('libvirt.xml'), 'w') f.write(libvirt_xml) f.close() @@ -448,19 +419,29 @@ class LibvirtConnection(object): 'kernel_id': inst['kernel_id'], 'ramdisk_id': inst['ramdisk_id']} if not os.path.exists(basepath('disk')): - yield images.fetch(inst.image_id, basepath('disk-raw'), user, - project) - if not os.path.exists(basepath('kernel')): - yield images.fetch(inst.kernel_id, basepath('kernel'), user, - project) - if not os.path.exists(basepath('ramdisk')): - yield images.fetch(inst.ramdisk_id, basepath('ramdisk'), user, - project) - - execute = lambda cmd, process_input = None, check_exit_code = True: \ - process.simple_execute(cmd=cmd, - process_input=process_input, - check_exit_code=check_exit_code) + images.fetch(inst.image_id, basepath('disk-raw'), user, + project) + + if inst['kernel_id']: + if not os.path.exists(basepath('kernel')): + images.fetch(inst['kernel_id'], basepath('kernel'), + user, project) + if inst['ramdisk_id']: + if not os.path.exists(basepath('ramdisk')): + images.fetch(inst['ramdisk_id'], basepath('ramdisk'), + user, project) + + def execute(cmd, process_input=None, check_exit_code=True): + return utils.execute(cmd=cmd, + process_input=process_input, + check_exit_code=check_exit_code) + + # For now, we assume that if we're not using a kernel, we're using a + # partitioned disk image where the target partition is the first + # partition + target_partition = None + if not inst['kernel_id']: + target_partition = "1" key = str(inst['key_data']) net = None @@ -477,16 +458,24 @@ class LibvirtConnection(object): 'dns': network_ref['dns']} if key or net: if key: - logging.info('instance %s: injecting key into image %s', + logging.info(_('instance %s: injecting key into image %s'), inst['name'], inst.image_id) if net: - logging.info('instance %s: injecting net into image %s', - inst['name'], inst.image_id) - yield disk.inject_data(basepath('disk-raw'), key, net, - execute=execute) - - if os.path.exists(basepath('disk')): - yield process.simple_execute('rm -f %s' % basepath('disk')) + logging.info(_('instance %s: injecting net into image %s'), + inst['name'], inst.image_id) + try: + disk.inject_data(basepath('disk-raw'), key, net, + partition=target_partition, + execute=execute) + except Exception as e: + # This could be a windows image, or a vmdk format disk + logging.warn(_('instance %s: ignoring error injecting data' + ' into image %s (%s)'), + inst['name'], inst.image_id, e) + + if inst['kernel_id']: + if os.path.exists(basepath('disk')): + utils.execute('rm -f %s' % basepath('disk')) local_bytes = (instance_types.INSTANCE_TYPES[inst.instance_type] ['local_gb'] @@ -495,18 +484,23 @@ class LibvirtConnection(object): resize = True if inst['instance_type'] == 'm1.tiny' or prefix == 'rescue-': resize = False - yield disk.partition(basepath('disk-raw'), basepath('disk'), - local_bytes, resize, execute=execute) + + if inst['kernel_id']: + disk.partition(basepath('disk-raw'), basepath('disk'), + local_bytes, resize, execute=execute) + else: + os.rename(basepath('disk-raw'), basepath('disk')) + disk.extend(basepath('disk'), local_bytes, execute=execute) if FLAGS.libvirt_type == 'uml': - yield process.simple_execute('sudo chown root %s' % - basepath('disk')) + utils.execute('sudo chown root %s' % basepath('disk')) def to_xml(self, instance, rescue=False): # TODO(termie): cache? - logging.debug('instance %s: starting toXML method', instance['name']) - network = db.project_get_network(context.get_admin_context(), - instance['project_id']) + logging.debug(_('instance %s: starting toXML method'), + instance['name']) + network = db.network_get_by_instance(context.get_admin_context(), + instance['id']) # FIXME(vish): stick this in db instance_type = instance['instance_type'] instance_type = instance_types.INSTANCE_TYPES[instance_type] @@ -514,6 +508,15 @@ class LibvirtConnection(object): instance['id']) # Assume that the gateway also acts as the dhcp server. dhcp_server = network['gateway'] + + if FLAGS.allow_project_net_traffic: + net, mask = _get_net_and_mask(network['cidr']) + extra_params = ("<parameter name=\"PROJNET\" value=\"%s\" />\n" + "<parameter name=\"PROJMASK\" value=\"%s\" />\n" + ) % (net, mask) + else: + extra_params = "\n" + xml_info = {'type': FLAGS.libvirt_type, 'name': instance['name'], 'basepath': os.path.join(FLAGS.instances_path, @@ -523,20 +526,30 @@ class LibvirtConnection(object): 'bridge_name': network['bridge'], 'mac_address': instance['mac_address'], 'ip_address': ip_address, - 'dhcp_server': dhcp_server} - if rescue: - libvirt_xml = self.rescue_xml % xml_info - else: - libvirt_xml = self.libvirt_xml % xml_info - logging.debug('instance %s: finished toXML method', instance['name']) + 'dhcp_server': dhcp_server, + 'extra_params': extra_params, + 'rescue': rescue} + if not rescue: + if instance['kernel_id']: + xml_info['kernel'] = xml_info['basepath'] + "/kernel" - return libvirt_xml + if instance['ramdisk_id']: + xml_info['ramdisk'] = xml_info['basepath'] + "/ramdisk" + + xml_info['disk'] = xml_info['basepath'] + "/disk" + + xml = str(Template(self.libvirt_xml, searchList=[xml_info])) + logging.debug(_('instance %s: finished toXML method'), + instance['name']) + + return xml def get_info(self, instance_name): try: virt_dom = self._conn.lookupByName(instance_name) except: - raise exception.NotFound("Instance %s not found" % instance_name) + raise exception.NotFound(_("Instance %s not found") + % instance_name) (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info() return {'state': state, 'max_mem': max_mem, @@ -722,6 +735,14 @@ class NWFilterFirewall(object): </rule> </filter>''' + nova_vpn_filter = '''<filter name='nova-vpn' chain='root'> + <uuid>2086015e-cf03-11df-8c5d-080027c27973</uuid> + <filterref filter='allow-dhcp-server'/> + <filterref filter='nova-allow-dhcp-server'/> + <filterref filter='nova-base-ipv4'/> + <filterref filter='nova-base-ipv6'/> + </filter>''' + def nova_base_ipv4_filter(self): retval = "<filter name='nova-base-ipv4' chain='ipv4'>" for protocol in ['tcp', 'udp', 'icmp']: @@ -746,27 +767,31 @@ class NWFilterFirewall(object): retval += '</filter>' return retval - def nova_project_filter(self, project, net, mask): - retval = "<filter name='nova-project-%s' chain='ipv4'>" % project + def nova_project_filter(self): + retval = "<filter name='nova-project' chain='ipv4'>" for protocol in ['tcp', 'udp', 'icmp']: retval += """<rule action='accept' direction='in' priority='200'> - <%s srcipaddr='%s' srcipmask='%s' /> - </rule>""" % (protocol, net, mask) + <%s srcipaddr='$PROJNET' srcipmask='$PROJMASK' /> + </rule>""" % protocol retval += '</filter>' return retval def _define_filter(self, xml): if callable(xml): xml = xml() - d = threads.deferToThread(self._conn.nwfilterDefineXML, xml) - return d - @staticmethod - def _get_net_and_mask(cidr): - net = IPy.IP(cidr) - return str(net.net()), str(net.netmask()) + # execute in a native thread and block current greenthread until done + tpool.execute(self._conn.nwfilterDefineXML, xml) + + def setup_base_nwfilters(self): + self._define_filter(self.nova_base_ipv4_filter) + self._define_filter(self.nova_base_ipv6_filter) + self._define_filter(self.nova_dhcp_filter) + self._define_filter(self.nova_base_filter) + self._define_filter(self.nova_vpn_filter) + if FLAGS.allow_project_net_traffic: + self._define_filter(self.nova_project_filter) - @defer.inlineCallbacks def setup_nwfilters_for_instance(self, instance): """ Creates an NWFilter for the given instance. In the process, @@ -774,35 +799,25 @@ class NWFilterFirewall(object): the base filter are all in place. """ - yield self._define_filter(self.nova_base_ipv4_filter) - yield self._define_filter(self.nova_base_ipv6_filter) - yield self._define_filter(self.nova_dhcp_filter) - yield self._define_filter(self.nova_base_filter) + nwfilter_xml = ("<filter name='nova-instance-%s' chain='root'>\n" + ) % instance['name'] - nwfilter_xml = "<filter name='nova-instance-%s' chain='root'>\n" \ - " <filterref filter='nova-base' />\n" % \ - instance['name'] + if instance['image_id'] == FLAGS.vpn_image_id: + nwfilter_xml += " <filterref filter='nova-vpn' />\n" + else: + nwfilter_xml += " <filterref filter='nova-base' />\n" if FLAGS.allow_project_net_traffic: - network_ref = db.project_get_network(context.get_admin_context(), - instance['project_id']) - net, mask = self._get_net_and_mask(network_ref['cidr']) - project_filter = self.nova_project_filter(instance['project_id'], - net, mask) - yield self._define_filter(project_filter) - - nwfilter_xml += " <filterref filter='nova-project-%s' />\n" % \ - instance['project_id'] + nwfilter_xml += " <filterref filter='nova-project' />\n" for security_group in instance.security_groups: - yield self.ensure_security_group_filter(security_group['id']) + self.ensure_security_group_filter(security_group['id']) - nwfilter_xml += " <filterref filter='nova-secgroup-%d' />\n" % \ - security_group['id'] + nwfilter_xml += (" <filterref filter='nova-secgroup-%d' />\n" + ) % security_group['id'] nwfilter_xml += "</filter>" - yield self._define_filter(nwfilter_xml) - return + self._define_filter(nwfilter_xml) def ensure_security_group_filter(self, security_group_id): return self._define_filter( @@ -815,7 +830,7 @@ class NWFilterFirewall(object): for rule in security_group.rules: rule_xml += "<rule action='accept' direction='in' priority='300'>" if rule.cidr: - net, mask = self._get_net_and_mask(rule.cidr) + net, mask = _get_net_and_mask(rule.cidr) rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ (rule.protocol, net, mask) if rule.protocol in ['tcp', 'udp']: diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py index 8cb4cce3a..ce2c68ce0 100644 --- a/nova/virt/xenapi/network_utils.py +++ b/nova/virt/xenapi/network_utils.py @@ -20,25 +20,22 @@ records and their attributes like bridges, PIFs, QoS, as well as their lookup functions. """ -from twisted.internet import defer - class NetworkHelper(): """ The class that wraps the helper methods together. """ + def __init__(self): return @classmethod - @defer.inlineCallbacks def find_network_with_bridge(cls, session, bridge): - """ Return the network on which the bridge is attached, if found """ + """ Return the network on which the bridge is attached, if found.""" expr = 'field "bridge" = "%s"' % bridge - networks = yield session.call_xenapi('network.get_all_records_where', - expr) + networks = session.call_xenapi('network.get_all_records_where', expr) if len(networks) == 1: - defer.returnValue(networks.keys()[0]) + return networks.keys()[0] elif len(networks) > 1: raise Exception('Found non-unique network for bridge %s' % bridge) else: diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 99d484ca2..badaaedc1 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -20,15 +20,19 @@ their attributes like VDIs, VIFs, as well as their lookup functions. """ import logging +import urllib +from xml.dom import minidom -from twisted.internet import defer - +from nova import flags from nova import utils from nova.auth.manager import AuthManager from nova.compute import instance_types from nova.compute import power_state from nova.virt import images + +FLAGS = flags.FLAGS + XENAPI_POWER_STATE = { 'Halted': power_state.SHUTDOWN, 'Running': power_state.RUNNING, @@ -43,13 +47,22 @@ class VMHelper(): """ The class that wraps the helper methods together. """ + def __init__(self): + return + + @classmethod + def late_import(cls): + """ + Load the XenAPI module in for helper class, if required. + This is to avoid to install the XenAPI library when other + hypervisors are used + """ global XenAPI if XenAPI is None: XenAPI = __import__('XenAPI') @classmethod - @defer.inlineCallbacks def create_vm(cls, session, instance, kernel, ramdisk): """Create a VM record. Returns a Deferred that gives the new VM reference.""" @@ -87,12 +100,11 @@ class VMHelper(): 'other_config': {}, } logging.debug('Created VM %s...', instance.name) - vm_ref = yield session.call_xenapi('VM.create', rec) + vm_ref = session.call_xenapi('VM.create', rec) logging.debug('Created VM %s as %s.', instance.name, vm_ref) - defer.returnValue(vm_ref) + return vm_ref @classmethod - @defer.inlineCallbacks def create_vbd(cls, session, vm_ref, vdi_ref, userdevice, bootable): """Create a VBD record. Returns a Deferred that gives the new VBD reference.""" @@ -111,13 +123,12 @@ class VMHelper(): vbd_rec['qos_algorithm_params'] = {} vbd_rec['qos_supported_algorithms'] = [] logging.debug('Creating VBD for VM %s, VDI %s ... ', vm_ref, vdi_ref) - vbd_ref = yield session.call_xenapi('VBD.create', vbd_rec) + vbd_ref = session.call_xenapi('VBD.create', vbd_rec) logging.debug('Created VBD %s for VM %s, VDI %s.', vbd_ref, vm_ref, vdi_ref) - defer.returnValue(vbd_ref) + return vbd_ref @classmethod - @defer.inlineCallbacks def create_vif(cls, session, vm_ref, network_ref, mac_address): """Create a VIF record. Returns a Deferred that gives the new VIF reference.""" @@ -133,13 +144,12 @@ class VMHelper(): vif_rec['qos_algorithm_params'] = {} logging.debug('Creating VIF for VM %s, network %s ... ', vm_ref, network_ref) - vif_ref = yield session.call_xenapi('VIF.create', vif_rec) + vif_ref = session.call_xenapi('VIF.create', vif_rec) logging.debug('Created VIF %s for VM %s, network %s.', vif_ref, vm_ref, network_ref) - defer.returnValue(vif_ref) + return vif_ref @classmethod - @defer.inlineCallbacks def fetch_image(cls, session, image, user, project, use_sr): """use_sr: True to put the image as a VDI in an SR, False to place it on dom0's filesystem. The former is for VM disks, the latter for @@ -156,12 +166,11 @@ class VMHelper(): args['password'] = user.secret if use_sr: args['add_partition'] = 'true' - task = yield session.async_call_plugin('objectstore', fn, args) - uuid = yield session.wait_for_task(task) - defer.returnValue(uuid) + task = session.async_call_plugin('objectstore', fn, args) + uuid = session.wait_for_task(task) + return uuid @classmethod - @utils.deferredToThread def lookup(cls, session, i): """ Look the instance i up, and returns it if available """ return VMHelper.lookup_blocking(session, i) @@ -179,7 +188,6 @@ class VMHelper(): return vms[0] @classmethod - @utils.deferredToThread def lookup_vm_vdis(cls, session, vm): """ Look for the VDIs that are attached to the VM """ return VMHelper.lookup_vm_vdis_blocking(session, vm) @@ -214,3 +222,36 @@ class VMHelper(): 'mem': long(record['memory_dynamic_max']) >> 10, 'num_cpu': record['VCPUs_max'], 'cpu_time': 0} + + @classmethod + def compile_diagnostics(cls, session, record): + """Compile VM diagnostics data""" + try: + host = session.get_xenapi_host() + host_ip = session.get_xenapi().host.get_record(host)["address"] + diags = {} + xml = get_rrd(host_ip, record["uuid"]) + if xml: + rrd = minidom.parseString(xml) + for i, node in enumerate(rrd.firstChild.childNodes): + # We don't want all of the extra garbage + if i >= 3 and i <= 11: + ref = node.childNodes + # Name and Value + diags[ref[0].firstChild.data] = ref[6].firstChild.data + return diags + except XenAPI.Failure as e: + return {"Unable to retrieve diagnostics": e} + + +def get_rrd(host, uuid): + """Return the VM RRD XML as a string""" + try: + xml = urllib.urlopen("http://%s:%s@%s/vm_rrd?uuid=%s" % ( + FLAGS.xenapi_connection_username, + FLAGS.xenapi_connection_password, + host, + uuid)) + return xml.read() + except IOError: + return None diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index d36cdaea5..4d897af35 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -20,10 +20,9 @@ Management class for VM-related functions (spawn, reboot, etc). import logging -from twisted.internet import defer - from nova import db from nova import context + from nova.auth.manager import AuthManager from nova.virt.xenapi.network_utils import NetworkHelper from nova.virt.xenapi.vm_utils import VMHelper @@ -35,100 +34,137 @@ class VMOps(object): """ Management class for VM-related tasks """ + def __init__(self, session): global XenAPI if XenAPI is None: XenAPI = __import__('XenAPI') self._session = session + # Load XenAPI module in the helper class + VMHelper.late_import() def list_instances(self): - """ List VM instances """ - return [self._session.get_xenapi().VM.get_name_label(vm) \ - for vm in self._session.get_xenapi().VM.get_all()] + """List VM instances""" + vms = [] + for vm in self._session.get_xenapi().VM.get_all(): + rec = self._session.get_xenapi().VM.get_record(vm) + if not rec["is_a_template"] and not rec["is_control_domain"]: + vms.append(rec["name_label"]) + return vms - @defer.inlineCallbacks def spawn(self, instance): - """ Create VM instance """ - vm = yield VMHelper.lookup(self._session, instance.name) + """Create VM instance""" + vm = VMHelper.lookup(self._session, instance.name) if vm is not None: raise Exception('Attempted to create non-unique name %s' % instance.name) - bridge = db.project_get_network(context.get_admin_context(), - instance.project_id).bridge + bridge = db.network_get_by_instance(context.get_admin_context(), + instance['id'])['bridge'] network_ref = \ - yield NetworkHelper.find_network_with_bridge(self._session, bridge) + NetworkHelper.find_network_with_bridge(self._session, bridge) user = AuthManager().get_user(instance.user_id) project = AuthManager().get_project(instance.project_id) - vdi_uuid = yield VMHelper.fetch_image(self._session, - instance.image_id, user, project, True) - kernel = yield VMHelper.fetch_image(self._session, - instance.kernel_id, user, project, False) - ramdisk = yield VMHelper.fetch_image(self._session, - instance.ramdisk_id, user, project, False) - vdi_ref = yield self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) - vm_ref = yield VMHelper.create_vm(self._session, - instance, kernel, ramdisk) - yield VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True) + vdi_uuid = VMHelper.fetch_image( + self._session, instance.image_id, user, project, True) + kernel = VMHelper.fetch_image( + self._session, instance.kernel_id, user, project, False) + ramdisk = VMHelper.fetch_image( + self._session, instance.ramdisk_id, user, project, False) + vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) + vm_ref = VMHelper.create_vm( + self._session, instance, kernel, ramdisk) + VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True) if network_ref: - yield VMHelper.create_vif(self._session, vm_ref, - network_ref, instance.mac_address) + VMHelper.create_vif(self._session, vm_ref, + network_ref, instance.mac_address) logging.debug('Starting VM %s...', vm_ref) - yield self._session.call_xenapi('VM.start', vm_ref, False, False) + self._session.call_xenapi('VM.start', vm_ref, False, False) logging.info('Spawning VM %s created %s.', instance.name, vm_ref) - @defer.inlineCallbacks def reboot(self, instance): - """ Reboot VM instance """ + """Reboot VM instance""" instance_name = instance.name - vm = yield VMHelper.lookup(self._session, instance_name) + vm = VMHelper.lookup(self._session, instance_name) if vm is None: raise Exception('instance not present %s' % instance_name) - task = yield self._session.call_xenapi('Async.VM.clean_reboot', vm) - yield self._session.wait_for_task(task) + task = self._session.call_xenapi('Async.VM.clean_reboot', vm) + self._session.wait_for_task(instance.id, task) - @defer.inlineCallbacks def destroy(self, instance): - """ Destroy VM instance """ - vm = yield VMHelper.lookup(self._session, instance.name) + """Destroy VM instance""" + vm = VMHelper.lookup(self._session, instance.name) if vm is None: # Don't complain, just return. This lets us clean up instances # that have already disappeared from the underlying platform. - defer.returnValue(None) + return # Get the VDIs related to the VM - vdis = yield VMHelper.lookup_vm_vdis(self._session, vm) + vdis = VMHelper.lookup_vm_vdis(self._session, vm) try: - task = yield self._session.call_xenapi('Async.VM.hard_shutdown', + task = self._session.call_xenapi('Async.VM.hard_shutdown', vm) - yield self._session.wait_for_task(task) + self._session.wait_for_task(instance.id, task) except XenAPI.Failure, exc: logging.warn(exc) # Disk clean-up if vdis: for vdi in vdis: try: - task = yield self._session.call_xenapi('Async.VDI.destroy', - vdi) - yield self._session.wait_for_task(task) + task = self._session.call_xenapi('Async.VDI.destroy', vdi) + self._session.wait_for_task(instance.id, task) except XenAPI.Failure, exc: logging.warn(exc) try: - task = yield self._session.call_xenapi('Async.VM.destroy', vm) - yield self._session.wait_for_task(task) + task = self._session.call_xenapi('Async.VM.destroy', vm) + self._session.wait_for_task(instance.id, task) except XenAPI.Failure, exc: logging.warn(exc) + def _wait_with_callback(self, instance_id, task, callback): + ret = None + try: + ret = self._session.wait_for_task(instance_id, task) + except XenAPI.Failure, exc: + logging.warn(exc) + callback(ret) + + def pause(self, instance, callback): + """Pause VM instance""" + instance_name = instance.name + vm = VMHelper.lookup(self._session, instance_name) + if vm is None: + raise Exception('instance not present %s' % instance_name) + task = self._session.call_xenapi('Async.VM.pause', vm) + self._wait_with_callback(instance.id, task, callback) + + def unpause(self, instance, callback): + """Unpause VM instance""" + instance_name = instance.name + vm = VMHelper.lookup(self._session, instance_name) + if vm is None: + raise Exception('instance not present %s' % instance_name) + task = self._session.call_xenapi('Async.VM.unpause', vm) + self._wait_with_callback(instance.id, task, callback) + def get_info(self, instance_id): - """ Return data about VM instance """ + """Return data about VM instance""" vm = VMHelper.lookup_blocking(self._session, instance_id) if vm is None: raise Exception('instance not present %s' % instance_id) rec = self._session.get_xenapi().VM.get_record(vm) return VMHelper.compile_info(rec) + def get_diagnostics(self, instance_id): + """Return data about VM diagnostics""" + vm = VMHelper.lookup(self._session, instance_id) + if vm is None: + raise Exception("instance not present %s" % instance_id) + rec = self._session.get_xenapi().VM.get_record(vm) + return VMHelper.compile_diagnostics(self._session, rec) + def get_console_output(self, instance): - """ Return snapshot of console """ + """Return snapshot of console""" # TODO: implement this to fix pylint! return 'FAKE CONSOLE OUTPUT of instance' diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index a4c7a3861..1943ccab0 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -20,6 +20,7 @@ Management class for Storage-related functions (attach, detach, etc). class VolumeOps(object): + def __init__(self, session): self._session = session diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 26b30bf92..146e2f153 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -48,11 +48,14 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block. """ import logging +import sys import xmlrpclib -from twisted.internet import defer -from twisted.internet import reactor +from eventlet import event +from eventlet import tpool +from nova import context +from nova import db from nova import utils from nova import flags from nova.virt.xenapi.vmops import VMOps @@ -92,124 +95,147 @@ def get_connection(_): username = FLAGS.xenapi_connection_username password = FLAGS.xenapi_connection_password if not url or password is None: - raise Exception('Must specify xenapi_connection_url, ' - 'xenapi_connection_username (optionally), and ' - 'xenapi_connection_password to use ' - 'connection_type=xenapi') + raise Exception(_('Must specify xenapi_connection_url, ' + 'xenapi_connection_username (optionally), and ' + 'xenapi_connection_password to use ' + 'connection_type=xenapi')) return XenAPIConnection(url, username, password) class XenAPIConnection(object): - """ A connection to XenServer or Xen Cloud Platform """ + """A connection to XenServer or Xen Cloud Platform""" + def __init__(self, url, user, pw): session = XenAPISession(url, user, pw) self._vmops = VMOps(session) self._volumeops = VolumeOps(session) def list_instances(self): - """ List VM instances """ + """List VM instances""" return self._vmops.list_instances() def spawn(self, instance): - """ Create VM instance """ + """Create VM instance""" self._vmops.spawn(instance) def reboot(self, instance): - """ Reboot VM instance """ + """Reboot VM instance""" self._vmops.reboot(instance) def destroy(self, instance): - """ Destroy VM instance """ + """Destroy VM instance""" self._vmops.destroy(instance) + def pause(self, instance, callback): + """Pause VM instance""" + self._vmops.pause(instance, callback) + + def unpause(self, instance, callback): + """Unpause paused VM instance""" + self._vmops.unpause(instance, callback) + def get_info(self, instance_id): - """ Return data about VM instance """ + """Return data about VM instance""" return self._vmops.get_info(instance_id) + def get_diagnostics(self, instance_id): + """Return data about VM diagnostics""" + return self._vmops.get_diagnostics(instance_id) + def get_console_output(self, instance): - """ Return snapshot of console """ + """Return snapshot of console""" return self._vmops.get_console_output(instance) def attach_volume(self, instance_name, device_path, mountpoint): - """ Attach volume storage to VM instance """ + """Attach volume storage to VM instance""" return self._volumeops.attach_volume(instance_name, device_path, mountpoint) def detach_volume(self, instance_name, mountpoint): - """ Detach volume storage to VM instance """ + """Detach volume storage to VM instance""" return self._volumeops.detach_volume(instance_name, mountpoint) class XenAPISession(object): - """ The session to invoke XenAPI SDK calls """ + """The session to invoke XenAPI SDK calls""" + def __init__(self, url, user, pw): self._session = XenAPI.Session(url) self._session.login_with_password(user, pw) def get_xenapi(self): - """ Return the xenapi object """ + """Return the xenapi object""" return self._session.xenapi def get_xenapi_host(self): - """ Return the xenapi host """ + """Return the xenapi host""" return self._session.xenapi.session.get_this_host(self._session.handle) - @utils.deferredToThread def call_xenapi(self, method, *args): - """Call the specified XenAPI method on a background thread. Returns - a Deferred for the result.""" + """Call the specified XenAPI method on a background thread.""" f = self._session.xenapi for m in method.split('.'): f = f.__getattr__(m) - return f(*args) + return tpool.execute(f, *args) - @utils.deferredToThread def async_call_plugin(self, plugin, fn, args): - """Call Async.host.call_plugin on a background thread. Returns a - Deferred with the task reference.""" - return _unwrap_plugin_exceptions( - self._session.xenapi.Async.host.call_plugin, - self.get_xenapi_host(), plugin, fn, args) + """Call Async.host.call_plugin on a background thread.""" + return tpool.execute(_unwrap_plugin_exceptions, + self._session.xenapi.Async.host.call_plugin, + self.get_xenapi_host(), plugin, fn, args) - def wait_for_task(self, task): + def wait_for_task(self, instance_id, task): """Return a Deferred that will give the result of the given task. The task is polled until it completes.""" - d = defer.Deferred() - reactor.callLater(0, self._poll_task, task, d) - return d - @utils.deferredToThread - def _poll_task(self, task, deferred): + done = event.Event() + loop = utils.LoopingCall(self._poll_task, instance_id, task, done) + loop.start(FLAGS.xenapi_task_poll_interval, now=True) + rv = done.wait() + loop.stop() + return rv + + def _poll_task(self, instance_id, task, done): """Poll the given XenAPI task, and fire the given Deferred if we get a result.""" try: - #logging.debug('Polling task %s...', task) + name = self._session.xenapi.task.get_name_label(task) status = self._session.xenapi.task.get_status(task) - if status == 'pending': - reactor.callLater(FLAGS.xenapi_task_poll_interval, - self._poll_task, task, deferred) - elif status == 'success': + action = dict( + instance_id=int(instance_id), + action=name, + error=None) + if status == "pending": + return + elif status == "success": result = self._session.xenapi.task.get_result(task) - logging.info('Task %s status: success. %s', task, result) - deferred.callback(_parse_xmlrpc_value(result)) + logging.info(_("Task [%s] %s status: success %s") % ( + name, + task, + result)) + done.send(_parse_xmlrpc_value(result)) else: error_info = self._session.xenapi.task.get_error_info(task) - logging.warn('Task %s status: %s. %s', task, status, - error_info) - deferred.errback(XenAPI.Failure(error_info)) - #logging.debug('Polling task %s done.', task) + action["error"] = str(error_info) + logging.warn(_("Task [%s] %s status: %s %s") % ( + name, + task, + status, + error_info)) + done.send_exception(XenAPI.Failure(error_info)) + db.instance_action_create(context.get_admin_context(), action) except XenAPI.Failure, exc: logging.warn(exc) - deferred.errback(exc) + done.send_exception(*sys.exc_info()) def _unwrap_plugin_exceptions(func, *args, **kwargs): - """ Parse exception details """ + """Parse exception details""" try: return func(*args, **kwargs) except XenAPI.Failure, exc: - logging.debug("Got exception: %s", exc) + logging.debug(_("Got exception: %s"), exc) if (len(exc.details) == 4 and exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and exc.details[2] == 'Failure'): @@ -222,7 +248,7 @@ def _unwrap_plugin_exceptions(func, *args, **kwargs): else: raise except xmlrpclib.ProtocolError, exc: - logging.debug("Got exception: %s", exc) + logging.debug(_("Got exception: %s"), exc) raise |
