summaryrefslogtreecommitdiffstats
path: root/nova/virt
diff options
context:
space:
mode:
authorTrey Morris <trey.morris@rackspace.com>2010-12-23 21:53:33 +0000
committerTrey Morris <trey.morris@rackspace.com>2010-12-23 21:53:33 +0000
commit5f8d02b39fb8917b34b68bbbf450656e1b68211c (patch)
tree4a8489e10fecea511e3fffece42de81c9fdc7837 /nova/virt
parent1c26d2b2ce824dbc64525eea699efbfa8bf04617 (diff)
parent75e2cbec9eb5132a49446f1b6d563d5f43d007de (diff)
downloadnova-5f8d02b39fb8917b34b68bbbf450656e1b68211c.tar.gz
nova-5f8d02b39fb8917b34b68bbbf450656e1b68211c.tar.xz
nova-5f8d02b39fb8917b34b68bbbf450656e1b68211c.zip
fixed merge conflict
Diffstat (limited to 'nova/virt')
-rw-r--r--nova/virt/connection.py2
-rw-r--r--nova/virt/fake.py32
-rw-r--r--nova/virt/images.py11
-rw-r--r--nova/virt/libvirt.qemu.xml.template33
-rw-r--r--nova/virt/libvirt.rescue.qemu.xml.template37
-rw-r--r--nova/virt/libvirt.rescue.uml.xml.template26
-rw-r--r--nova/virt/libvirt.rescue.xen.xml.template34
-rw-r--r--nova/virt/libvirt.uml.xml.template26
-rw-r--r--nova/virt/libvirt.xen.xml.template30
-rw-r--r--nova/virt/libvirt.xml.template79
-rw-r--r--nova/virt/libvirt_conn.py400
-rw-r--r--nova/virt/xenapi/__init__.py15
-rw-r--r--nova/virt/xenapi/fake.py388
-rw-r--r--nova/virt/xenapi/network_utils.py20
-rw-r--r--nova/virt/xenapi/vm_utils.py135
-rw-r--r--nova/virt/xenapi/vmops.py174
-rw-r--r--nova/virt/xenapi/volume_utils.py268
-rw-r--r--nova/virt/xenapi/volumeops.py102
-rw-r--r--nova/virt/xenapi_conn.py205
19 files changed, 1401 insertions, 616 deletions
diff --git a/nova/virt/connection.py b/nova/virt/connection.py
index c40bb4bb4..61e99944e 100644
--- a/nova/virt/connection.py
+++ b/nova/virt/connection.py
@@ -66,6 +66,6 @@ def get_connection(read_only=False):
raise Exception('Unknown connection type "%s"' % t)
if conn is None:
- logging.error('Failed to open connection to the hypervisor')
+ logging.error(_('Failed to open connection to the hypervisor'))
sys.exit(1)
return conn
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index f855523d3..238acf798 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -25,8 +25,6 @@ semantics of real hypervisor connections.
"""
-from twisted.internet import defer
-
from nova import exception
from nova.compute import power_state
@@ -78,6 +76,12 @@ class FakeConnection(object):
cls._instance = cls()
return cls._instance
+ def init_host(self):
+ """
+ Initialize anything that is necessary for the driver to function
+ """
+ return
+
def list_instances(self):
"""
Return the names of all the instances known to the virtualization
@@ -107,7 +111,6 @@ class FakeConnection(object):
fake_instance = FakeInstance()
self.instances[instance.name] = fake_instance
fake_instance._state = power_state.RUNNING
- return defer.succeed(None)
def reboot(self, instance):
"""
@@ -119,19 +122,31 @@ class FakeConnection(object):
The work will be done asynchronously. This function returns a
Deferred that allows the caller to detect when it is complete.
"""
- return defer.succeed(None)
+ pass
def rescue(self, instance):
"""
Rescue the specified instance.
"""
- return defer.succeed(None)
+ pass
def unrescue(self, instance):
"""
Unrescue the specified instance.
"""
- return defer.succeed(None)
+ pass
+
+ def pause(self, instance, callback):
+ """
+ Pause the specified instance.
+ """
+ pass
+
+ def unpause(self, instance, callback):
+ """
+ Unpause the specified instance.
+ """
+ pass
def destroy(self, instance):
"""
@@ -144,7 +159,6 @@ class FakeConnection(object):
Deferred that allows the caller to detect when it is complete.
"""
del self.instances[instance.name]
- return defer.succeed(None)
def attach_volume(self, instance_name, device_path, mountpoint):
"""Attach the disk at device_path to the instance at mountpoint"""
@@ -167,7 +181,8 @@ class FakeConnection(object):
knowledge of the instance
"""
if instance_name not in self.instances:
- raise exception.NotFound("Instance %s Not Found" % instance_name)
+ raise exception.NotFound(_("Instance %s Not Found")
+ % instance_name)
i = self.instances[instance_name]
return {'state': i._state,
'max_mem': 0,
@@ -247,5 +262,6 @@ class FakeConnection(object):
class FakeInstance(object):
+
def __init__(self):
self._state = power_state.NOSTATE
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 981aa5cf3..1c9b2e093 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -26,7 +26,7 @@ import time
import urlparse
from nova import flags
-from nova import process
+from nova import utils
from nova.auth import manager
from nova.auth import signer
from nova.objectstore import image
@@ -50,7 +50,7 @@ def _fetch_s3_image(image, path, user, project):
# This should probably move somewhere else, like e.g. a download_as
# method on User objects and at the same time get rewritten to use
- # twisted web client.
+ # a web client.
headers = {}
headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
@@ -63,15 +63,16 @@ def _fetch_s3_image(image, path, user, project):
cmd = ['/usr/bin/curl', '--fail', '--silent', url]
for (k, v) in headers.iteritems():
- cmd += ['-H', '%s: %s' % (k, v)]
+ cmd += ['-H', '"%s: %s"' % (k, v)]
cmd += ['-o', path]
- return process.SharedPool().execute(executable=cmd[0], args=cmd[1:])
+ cmd_out = ' '.join(cmd)
+ return utils.execute(cmd_out)
def _fetch_local_image(image, path, user, project):
source = _image_path('%s/image' % image)
- return process.simple_execute('cp %s %s' % (source, path))
+ return utils.execute('cp %s %s' % (source, path))
def _image_path(path):
diff --git a/nova/virt/libvirt.qemu.xml.template b/nova/virt/libvirt.qemu.xml.template
deleted file mode 100644
index 2538b1ade..000000000
--- a/nova/virt/libvirt.qemu.xml.template
+++ /dev/null
@@ -1,33 +0,0 @@
-<domain type='%(type)s'>
- <name>%(name)s</name>
- <os>
- <type>hvm</type>
- <kernel>%(basepath)s/kernel</kernel>
- <initrd>%(basepath)s/ramdisk</initrd>
- <cmdline>root=/dev/vda1 console=ttyS0</cmdline>
- </os>
- <features>
- <acpi/>
- </features>
- <memory>%(memory_kb)s</memory>
- <vcpu>%(vcpus)s</vcpu>
- <devices>
- <disk type='file'>
- <source file='%(basepath)s/disk'/>
- <target dev='vda' bus='virtio'/>
- </disk>
- <interface type='bridge'>
- <source bridge='%(bridge_name)s'/>
- <mac address='%(mac_address)s'/>
- <!-- <model type='virtio'/> CANT RUN virtio network right now -->
- <filterref filter="nova-instance-%(name)s">
- <parameter name="IP" value="%(ip_address)s" />
- <parameter name="DHCPSERVER" value="%(dhcp_server)s" />
- </filterref>
- </interface>
- <serial type="file">
- <source path='%(basepath)s/console.log'/>
- <target port='1'/>
- </serial>
- </devices>
-</domain>
diff --git a/nova/virt/libvirt.rescue.qemu.xml.template b/nova/virt/libvirt.rescue.qemu.xml.template
deleted file mode 100644
index c0ffbdcee..000000000
--- a/nova/virt/libvirt.rescue.qemu.xml.template
+++ /dev/null
@@ -1,37 +0,0 @@
-<domain type='%(type)s'>
- <name>%(name)s</name>
- <os>
- <type>hvm</type>
- <kernel>%(basepath)s/rescue-kernel</kernel>
- <initrd>%(basepath)s/rescue-ramdisk</initrd>
- <cmdline>root=/dev/vda1 console=ttyS0</cmdline>
- </os>
- <features>
- <acpi/>
- </features>
- <memory>%(memory_kb)s</memory>
- <vcpu>%(vcpus)s</vcpu>
- <devices>
- <disk type='file'>
- <source file='%(basepath)s/rescue-disk'/>
- <target dev='vda' bus='virtio'/>
- </disk>
- <disk type='file'>
- <source file='%(basepath)s/disk'/>
- <target dev='vdb' bus='virtio'/>
- </disk>
- <interface type='bridge'>
- <source bridge='%(bridge_name)s'/>
- <mac address='%(mac_address)s'/>
- <!-- <model type='virtio'/> CANT RUN virtio network right now -->
- <filterref filter="nova-instance-%(name)s">
- <parameter name="IP" value="%(ip_address)s" />
- <parameter name="DHCPSERVER" value="%(dhcp_server)s" />
- </filterref>
- </interface>
- <serial type="file">
- <source path='%(basepath)s/console.log'/>
- <target port='1'/>
- </serial>
- </devices>
-</domain>
diff --git a/nova/virt/libvirt.rescue.uml.xml.template b/nova/virt/libvirt.rescue.uml.xml.template
deleted file mode 100644
index 836f47532..000000000
--- a/nova/virt/libvirt.rescue.uml.xml.template
+++ /dev/null
@@ -1,26 +0,0 @@
-<domain type='%(type)s'>
- <name>%(name)s</name>
- <memory>%(memory_kb)s</memory>
- <os>
- <type>%(type)s</type>
- <kernel>/usr/bin/linux</kernel>
- <root>/dev/ubda1</root>
- </os>
- <devices>
- <disk type='file'>
- <source file='%(basepath)s/rescue-disk'/>
- <target dev='ubd0' bus='uml'/>
- </disk>
- <disk type='file'>
- <source file='%(basepath)s/disk'/>
- <target dev='ubd1' bus='uml'/>
- </disk>
- <interface type='bridge'>
- <source bridge='%(bridge_name)s'/>
- <mac address='%(mac_address)s'/>
- </interface>
- <console type="file">
- <source path='%(basepath)s/console.log'/>
- </console>
- </devices>
-</domain>
diff --git a/nova/virt/libvirt.rescue.xen.xml.template b/nova/virt/libvirt.rescue.xen.xml.template
deleted file mode 100644
index 3b8d27237..000000000
--- a/nova/virt/libvirt.rescue.xen.xml.template
+++ /dev/null
@@ -1,34 +0,0 @@
-<domain type='%(type)s'>
- <name>%(name)s</name>
- <os>
- <type>linux</type>
- <kernel>%(basepath)s/kernel</kernel>
- <initrd>%(basepath)s/ramdisk</initrd>
- <root>/dev/xvda1</root>
- <cmdline>ro</cmdline>
- </os>
- <features>
- <acpi/>
- </features>
- <memory>%(memory_kb)s</memory>
- <vcpu>%(vcpus)s</vcpu>
- <devices>
- <disk type='file'>
- <source file='%(basepath)s/rescue-disk'/>
- <target dev='sda' />
- </disk>
- <disk type='file'>
- <source file='%(basepath)s/disk'/>
- <target dev='sdb' />
- </disk>
- <interface type='bridge'>
- <source bridge='%(bridge_name)s'/>
- <mac address='%(mac_address)s'/>
- </interface>
- <console type="file">
- <source path='%(basepath)s/console.log'/>
- <target port='1'/>
- </console>
- </devices>
-</domain>
-
diff --git a/nova/virt/libvirt.uml.xml.template b/nova/virt/libvirt.uml.xml.template
deleted file mode 100644
index bb8b47911..000000000
--- a/nova/virt/libvirt.uml.xml.template
+++ /dev/null
@@ -1,26 +0,0 @@
-<domain type='%(type)s'>
- <name>%(name)s</name>
- <memory>%(memory_kb)s</memory>
- <os>
- <type>%(type)s</type>
- <kernel>/usr/bin/linux</kernel>
- <root>/dev/ubda1</root>
- </os>
- <devices>
- <disk type='file'>
- <source file='%(basepath)s/disk'/>
- <target dev='ubd0' bus='uml'/>
- </disk>
- <interface type='bridge'>
- <source bridge='%(bridge_name)s'/>
- <mac address='%(mac_address)s'/>
- <filterref filter="nova-instance-%(name)s">
- <parameter name="IP" value="%(ip_address)s" />
- <parameter name="DHCPSERVER" value="%(dhcp_server)s" />
- </filterref>
- </interface>
- <console type="file">
- <source path='%(basepath)s/console.log'/>
- </console>
- </devices>
-</domain>
diff --git a/nova/virt/libvirt.xen.xml.template b/nova/virt/libvirt.xen.xml.template
deleted file mode 100644
index 9677902c6..000000000
--- a/nova/virt/libvirt.xen.xml.template
+++ /dev/null
@@ -1,30 +0,0 @@
-<domain type='%(type)s'>
- <name>%(name)s</name>
- <os>
- <type>linux</type>
- <kernel>%(basepath)s/kernel</kernel>
- <initrd>%(basepath)s/ramdisk</initrd>
- <root>/dev/xvda1</root>
- <cmdline>ro</cmdline>
- </os>
- <features>
- <acpi/>
- </features>
- <memory>%(memory_kb)s</memory>
- <vcpu>%(vcpus)s</vcpu>
- <devices>
- <disk type='file'>
- <source file='%(basepath)s/disk'/>
- <target dev='sda' />
- </disk>
- <interface type='bridge'>
- <source bridge='%(bridge_name)s'/>
- <mac address='%(mac_address)s'/>
- </interface>
- <console type="file">
- <source path='%(basepath)s/console.log'/>
- <target port='1'/>
- </console>
- </devices>
-</domain>
-
diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template
new file mode 100644
index 000000000..3fb2243da
--- /dev/null
+++ b/nova/virt/libvirt.xml.template
@@ -0,0 +1,79 @@
+<domain type='${type}'>
+ <name>${name}</name>
+ <memory>${memory_kb}</memory>
+ <os>
+#if $type == 'uml'
+ #set $disk_prefix = 'ubd'
+ #set $disk_bus = 'uml'
+ <type>uml</type>
+ <kernel>/usr/bin/linux</kernel>
+ <root>/dev/ubda1</root>
+#else
+ #if $type == 'xen'
+ #set $disk_prefix = 'sd'
+ #set $disk_bus = 'scsi'
+ <type>linux</type>
+ <root>/dev/xvda1</root>
+ #else
+ #set $disk_prefix = 'vd'
+ #set $disk_bus = 'virtio'
+ <type>hvm</type>
+ #end if
+ #if $getVar('rescue', False)
+ <kernel>${basepath}/rescue-kernel</kernel>
+ <initrd>${basepath}/rescue-ramdisk</initrd>
+ #else
+ #if $getVar('kernel', None)
+ <kernel>${kernel}</kernel>
+ #if $type == 'xen'
+ <cmdline>ro</cmdline>
+ #else
+ <cmdline>root=/dev/vda1 console=ttyS0</cmdline>
+ #end if
+ #if $getVar('ramdisk', None)
+ <initrd>${ramdisk}</initrd>
+ #end if
+ #else
+ <boot dev="hd" />
+ #end if
+ #end if
+#end if
+ </os>
+ <features>
+ <acpi/>
+ </features>
+ <vcpu>${vcpus}</vcpu>
+ <devices>
+#if $getVar('rescue', False)
+ <disk type='file'>
+ <source file='${basepath}/rescue-disk'/>
+ <target dev='${disk_prefix}a' bus='${disk_bus}'/>
+ </disk>
+ <disk type='file'>
+ <source file='${basepath}/disk'/>
+ <target dev='${disk_prefix}b' bus='${disk_bus}'/>
+ </disk>
+#else
+ <disk type='file'>
+ <source file='${basepath}/disk'/>
+ <target dev='${disk_prefix}a' bus='${disk_bus}'/>
+ </disk>
+#end if
+ <interface type='bridge'>
+ <source bridge='${bridge_name}'/>
+ <mac address='${mac_address}'/>
+ <!-- <model type='virtio'/> CANT RUN virtio network right now -->
+ <filterref filter="nova-instance-${name}">
+ <parameter name="IP" value="${ip_address}" />
+ <parameter name="DHCPSERVER" value="${dhcp_server}" />
+#if $getVar('extra_params', False)
+ ${extra_params}
+#end if
+ </filterref>
+ </interface>
+ <serial type="file">
+ <source path='${basepath}/console.log'/>
+ <target port='1'/>
+ </serial>
+ </devices>
+</domain>
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index 18085089f..651b2af93 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -27,12 +27,7 @@ Supports KVM, QEMU, UML, and XEN.
:libvirt_type: Libvirt domain type. Can be kvm, qemu, uml, xen
(default: kvm).
:libvirt_uri: Override for the default libvirt URI (depends on libvirt_type).
-:libvirt_xml_template: Libvirt XML Template (QEmu/KVM).
-:libvirt_xen_xml_template: Libvirt XML Template (Xen).
-:libvirt_uml_xml_template: Libvirt XML Template (User Mode Linux).
-:libvirt_rescue_xml_template: XML template for rescue mode (KVM & QEMU).
-:libvirt_rescue_xen_xml_template: XML templage for rescue mode (XEN).
-:libvirt_rescue_uml_xml_template: XML template for rescue mode (UML).
+:libvirt_xml_template: Libvirt XML Template.
:rescue_image_id: Rescue ami image (default: ami-rescue).
:rescue_kernel_id: Rescue aki image (default: aki-rescue).
:rescue_ramdisk_id: Rescue ari image (default: ari-rescue).
@@ -45,16 +40,16 @@ import logging
import os
import shutil
+from eventlet import greenthread
+from eventlet import event
+from eventlet import tpool
+
import IPy
-from twisted.internet import defer
-from twisted.internet import task
-from twisted.internet import threads
from nova import context
from nova import db
from nova import exception
from nova import flags
-from nova import process
from nova import utils
#from nova.api import context
from nova.auth import manager
@@ -63,36 +58,20 @@ from nova.compute import instance_types
from nova.compute import power_state
from nova.virt import images
+from Cheetah.Template import Template
+
libvirt = None
libxml2 = None
FLAGS = flags.FLAGS
-flags.DEFINE_string('libvirt_rescue_xml_template',
- utils.abspath('virt/libvirt.rescue.qemu.xml.template'),
- 'Libvirt RESCUE XML Template for QEmu/KVM')
-flags.DEFINE_string('libvirt_rescue_xen_xml_template',
- utils.abspath('virt/libvirt.rescue.xen.xml.template'),
- 'Libvirt RESCUE XML Template for xen')
-flags.DEFINE_string('libvirt_rescue_uml_xml_template',
- utils.abspath('virt/libvirt.rescue.uml.xml.template'),
- 'Libvirt RESCUE XML Template for user-mode-linux')
# TODO(vish): These flags should probably go into a shared location
flags.DEFINE_string('rescue_image_id', 'ami-rescue', 'Rescue ami image')
flags.DEFINE_string('rescue_kernel_id', 'aki-rescue', 'Rescue aki image')
flags.DEFINE_string('rescue_ramdisk_id', 'ari-rescue', 'Rescue ari image')
flags.DEFINE_string('libvirt_xml_template',
- utils.abspath('virt/libvirt.qemu.xml.template'),
- 'Libvirt XML Template for QEmu/KVM')
-flags.DEFINE_string('libvirt_xen_xml_template',
- utils.abspath('virt/libvirt.xen.xml.template'),
- 'Libvirt XML Template for Xen')
-flags.DEFINE_string('libvirt_uml_xml_template',
- utils.abspath('virt/libvirt.uml.xml.template'),
- 'Libvirt XML Template for user-mode-linux')
-flags.DEFINE_string('injected_network_template',
- utils.abspath('virt/interfaces.template'),
- 'Template file for injected network')
+ utils.abspath('virt/libvirt.xml.template'),
+ 'Libvirt XML Template')
flags.DEFINE_string('libvirt_type',
'kvm',
'Libvirt domain type (valid options are: '
@@ -118,21 +97,27 @@ def get_connection(read_only):
return LibvirtConnection(read_only)
+def _get_net_and_mask(cidr):
+ net = IPy.IP(cidr)
+ return str(net.net()), str(net.netmask())
+
+
class LibvirtConnection(object):
+
def __init__(self, read_only):
- (self.libvirt_uri,
- template_file,
- rescue_file) = self.get_uri_and_templates()
+ self.libvirt_uri = self.get_uri()
- self.libvirt_xml = open(template_file).read()
- self.rescue_xml = open(rescue_file).read()
+ self.libvirt_xml = open(FLAGS.libvirt_xml_template).read()
self._wrapped_conn = None
self.read_only = read_only
+ def init_host(self):
+ NWFilterFirewall(self._conn).setup_base_nwfilters()
+
@property
def _conn(self):
if not self._wrapped_conn or not self._test_connection():
- logging.debug('Connecting to libvirt: %s' % self.libvirt_uri)
+ logging.debug(_('Connecting to libvirt: %s') % self.libvirt_uri)
self._wrapped_conn = self._connect(self.libvirt_uri,
self.read_only)
return self._wrapped_conn
@@ -144,24 +129,18 @@ class LibvirtConnection(object):
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and \
e.get_error_domain() == libvirt.VIR_FROM_REMOTE:
- logging.debug('Connection to libvirt broke')
+ logging.debug(_('Connection to libvirt broke'))
return False
raise
- def get_uri_and_templates(self):
+ def get_uri(self):
if FLAGS.libvirt_type == 'uml':
uri = FLAGS.libvirt_uri or 'uml:///system'
- template_file = FLAGS.libvirt_uml_xml_template
- rescue_file = FLAGS.libvirt_rescue_uml_xml_template
elif FLAGS.libvirt_type == 'xen':
uri = FLAGS.libvirt_uri or 'xen:///'
- template_file = FLAGS.libvirt_xen_xml_template
- rescue_file = FLAGS.libvirt_rescue_xen_xml_template
else:
uri = FLAGS.libvirt_uri or 'qemu:///system'
- template_file = FLAGS.libvirt_xml_template
- rescue_file = FLAGS.libvirt_rescue_xml_template
- return uri, template_file, rescue_file
+ return uri
def _connect(self, uri, read_only):
auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT],
@@ -184,14 +163,12 @@ class LibvirtConnection(object):
except Exception as _err:
pass
# If the instance is already terminated, we're still happy
- d = defer.Deferred()
- if cleanup:
- d.addCallback(lambda _: self._cleanup(instance))
- # FIXME: What does this comment mean?
- # TODO(termie): short-circuit me for tests
- # WE'LL save this for when we do shutdown,
+
+ done = event.Event()
+
+ # We'll save this for when we do shutdown,
# instead of destroy - but destroy returns immediately
- timer = task.LoopingCall(f=None)
+ timer = utils.LoopingCall(f=None)
def _wait_for_shutdown():
try:
@@ -200,26 +177,35 @@ class LibvirtConnection(object):
instance['id'], state)
if state == power_state.SHUTDOWN:
timer.stop()
- d.callback(None)
except Exception:
db.instance_set_state(context.get_admin_context(),
instance['id'],
power_state.SHUTDOWN)
timer.stop()
- d.callback(None)
timer.f = _wait_for_shutdown
- timer.start(interval=0.5, now=True)
- return d
+ timer_done = timer.start(interval=0.5, now=True)
+
+ # NOTE(termie): this is strictly superfluous (we could put the
+ # cleanup code in the timer), but this emulates the
+ # previous model so I am keeping it around until
+ # everything has been vetted a bit
+ def _wait_for_timer():
+ timer_done.wait()
+ if cleanup:
+ self._cleanup(instance)
+ done.send()
+
+ greenthread.spawn(_wait_for_timer)
+ return done
def _cleanup(self, instance):
target = os.path.join(FLAGS.instances_path, instance['name'])
- logging.info('instance %s: deleting instance files %s',
+ logging.info(_('instance %s: deleting instance files %s'),
instance['name'], target)
if os.path.exists(target):
shutil.rmtree(target)
- @defer.inlineCallbacks
@exception.wrap_exception
def attach_volume(self, instance_name, device_path, mountpoint):
virt_dom = self._conn.lookupByName(instance_name)
@@ -230,7 +216,6 @@ class LibvirtConnection(object):
<target dev='%s' bus='virtio'/>
</disk>""" % (device_path, mount_device)
virt_dom.attachDevice(xml)
- yield
def _get_disk_xml(self, xml, device):
"""Returns the xml for the disk mounted at device"""
@@ -252,26 +237,21 @@ class LibvirtConnection(object):
if doc != None:
doc.freeDoc()
- @defer.inlineCallbacks
@exception.wrap_exception
def detach_volume(self, instance_name, mountpoint):
virt_dom = self._conn.lookupByName(instance_name)
mount_device = mountpoint.rpartition("/")[2]
xml = self._get_disk_xml(virt_dom.XMLDesc(0), mount_device)
if not xml:
- raise exception.NotFound("No disk at %s" % mount_device)
+ raise exception.NotFound(_("No disk at %s") % mount_device)
virt_dom.detachDevice(xml)
- yield
- @defer.inlineCallbacks
@exception.wrap_exception
def reboot(self, instance):
- yield self.destroy(instance, False)
+ self.destroy(instance, False)
xml = self.to_xml(instance)
- yield self._conn.createXML(xml, 0)
-
- d = defer.Deferred()
- timer = task.LoopingCall(f=None)
+ self._conn.createXML(xml, 0)
+ timer = utils.LoopingCall(f=None)
def _wait_for_reboot():
try:
@@ -279,64 +259,62 @@ class LibvirtConnection(object):
db.instance_set_state(context.get_admin_context(),
instance['id'], state)
if state == power_state.RUNNING:
- logging.debug('instance %s: rebooted', instance['name'])
+ logging.debug(_('instance %s: rebooted'), instance['name'])
timer.stop()
- d.callback(None)
except Exception, exn:
- logging.error('_wait_for_reboot failed: %s', exn)
+ logging.error(_('_wait_for_reboot failed: %s'), exn)
db.instance_set_state(context.get_admin_context(),
instance['id'],
power_state.SHUTDOWN)
timer.stop()
- d.callback(None)
timer.f = _wait_for_reboot
- timer.start(interval=0.5, now=True)
- yield d
+ return timer.start(interval=0.5, now=True)
+
+ @exception.wrap_exception
+ def pause(self, instance, callback):
+ raise exception.APIError("pause not supported for libvirt.")
+
+ @exception.wrap_exception
+ def unpause(self, instance, callback):
+ raise exception.APIError("unpause not supported for libvirt.")
- @defer.inlineCallbacks
@exception.wrap_exception
def rescue(self, instance):
- yield self.destroy(instance, False)
+ self.destroy(instance, False)
xml = self.to_xml(instance, rescue=True)
rescue_images = {'image_id': FLAGS.rescue_image_id,
'kernel_id': FLAGS.rescue_kernel_id,
'ramdisk_id': FLAGS.rescue_ramdisk_id}
- yield self._create_image(instance, xml, 'rescue-', rescue_images)
- yield self._conn.createXML(xml, 0)
+ self._create_image(instance, xml, 'rescue-', rescue_images)
+ self._conn.createXML(xml, 0)
- d = defer.Deferred()
- timer = task.LoopingCall(f=None)
+ timer = utils.LoopingCall(f=None)
def _wait_for_rescue():
try:
state = self.get_info(instance['name'])['state']
db.instance_set_state(None, instance['id'], state)
if state == power_state.RUNNING:
- logging.debug('instance %s: rescued', instance['name'])
+ logging.debug(_('instance %s: rescued'), instance['name'])
timer.stop()
- d.callback(None)
except Exception, exn:
- logging.error('_wait_for_rescue failed: %s', exn)
+ logging.error(_('_wait_for_rescue failed: %s'), exn)
db.instance_set_state(None,
instance['id'],
power_state.SHUTDOWN)
timer.stop()
- d.callback(None)
timer.f = _wait_for_rescue
- timer.start(interval=0.5, now=True)
- yield d
+ return timer.start(interval=0.5, now=True)
- @defer.inlineCallbacks
@exception.wrap_exception
def unrescue(self, instance):
# NOTE(vish): Because reboot destroys and recreates an instance using
# the normal xml file, we can just call reboot here
- yield self.reboot(instance)
+ self.reboot(instance)
- @defer.inlineCallbacks
@exception.wrap_exception
def spawn(self, instance):
xml = self.to_xml(instance)
@@ -344,14 +322,12 @@ class LibvirtConnection(object):
instance['id'],
power_state.NOSTATE,
'launching')
- yield NWFilterFirewall(self._conn).\
- setup_nwfilters_for_instance(instance)
- yield self._create_image(instance, xml)
- yield self._conn.createXML(xml, 0)
- logging.debug("instance %s: is running", instance['name'])
+ NWFilterFirewall(self._conn).setup_nwfilters_for_instance(instance)
+ self._create_image(instance, xml)
+ self._conn.createXML(xml, 0)
+ logging.debug(_("instance %s: is running"), instance['name'])
- local_d = defer.Deferred()
- timer = task.LoopingCall(f=None)
+ timer = utils.LoopingCall(f=None)
def _wait_for_boot():
try:
@@ -359,36 +335,33 @@ class LibvirtConnection(object):
db.instance_set_state(context.get_admin_context(),
instance['id'], state)
if state == power_state.RUNNING:
- logging.debug('instance %s: booted', instance['name'])
+ logging.debug(_('instance %s: booted'), instance['name'])
timer.stop()
- local_d.callback(None)
except:
- logging.exception('instance %s: failed to boot',
+ logging.exception(_('instance %s: failed to boot'),
instance['name'])
db.instance_set_state(context.get_admin_context(),
instance['id'],
power_state.SHUTDOWN)
timer.stop()
- local_d.callback(None)
+
timer.f = _wait_for_boot
- timer.start(interval=0.5, now=True)
- yield local_d
+ return timer.start(interval=0.5, now=True)
def _flush_xen_console(self, virsh_output):
logging.info('virsh said: %r' % (virsh_output,))
virsh_output = virsh_output[0].strip()
if virsh_output.startswith('/dev/'):
- logging.info('cool, it\'s a device')
- d = process.simple_execute("sudo dd if=%s iflag=nonblock" %
- virsh_output, check_exit_code=False)
- d.addCallback(lambda r: r[0])
- return d
+ logging.info(_('cool, it\'s a device'))
+ out, err = utils.execute("sudo dd if=%s iflag=nonblock" %
+ virsh_output, check_exit_code=False)
+ return out
else:
return ''
def _append_to_file(self, data, fpath):
- logging.info('data: %r, fpath: %r' % (data, fpath))
+ logging.info(_('data: %r, fpath: %r') % (data, fpath))
fp = open(fpath, 'a+')
fp.write(data)
return fpath
@@ -403,21 +376,20 @@ class LibvirtConnection(object):
def get_console_output(self, instance):
console_log = os.path.join(FLAGS.instances_path, instance['name'],
'console.log')
- d = process.simple_execute('sudo chown %d %s' % (os.getuid(),
- console_log))
+
+ utils.execute('sudo chown %d %s' % (os.getuid(), console_log))
+
if FLAGS.libvirt_type == 'xen':
- # Xen is spethial
- d.addCallback(lambda _:
- process.simple_execute("virsh ttyconsole %s" %
- instance['name']))
- d.addCallback(self._flush_xen_console)
- d.addCallback(self._append_to_file, console_log)
+ # Xen is special
+ virsh_output = utils.execute("virsh ttyconsole %s" %
+ instance['name'])
+ data = self._flush_xen_console(virsh_output)
+ fpath = self._append_to_file(data, console_log)
else:
- d.addCallback(lambda _: defer.succeed(console_log))
- d.addCallback(self._dump_file)
- return d
+ fpath = console_log
+
+ return self._dump_file(fpath)
- @defer.inlineCallbacks
def _create_image(self, inst, libvirt_xml, prefix='', disk_images=None):
# syntactic nicety
basepath = lambda fname = '', prefix = prefix: os.path.join(
@@ -426,12 +398,12 @@ class LibvirtConnection(object):
prefix + fname)
# ensure directories exist and are writable
- yield process.simple_execute('mkdir -p %s' % basepath(prefix=''))
- yield process.simple_execute('chmod 0777 %s' % basepath(prefix=''))
+ utils.execute('mkdir -p %s' % basepath(prefix=''))
+ utils.execute('chmod 0777 %s' % basepath(prefix=''))
# TODO(termie): these are blocking calls, it would be great
# if they weren't.
- logging.info('instance %s: Creating image', inst['name'])
+ logging.info(_('instance %s: Creating image'), inst['name'])
f = open(basepath('libvirt.xml'), 'w')
f.write(libvirt_xml)
f.close()
@@ -448,19 +420,29 @@ class LibvirtConnection(object):
'kernel_id': inst['kernel_id'],
'ramdisk_id': inst['ramdisk_id']}
if not os.path.exists(basepath('disk')):
- yield images.fetch(inst.image_id, basepath('disk-raw'), user,
- project)
- if not os.path.exists(basepath('kernel')):
- yield images.fetch(inst.kernel_id, basepath('kernel'), user,
- project)
- if not os.path.exists(basepath('ramdisk')):
- yield images.fetch(inst.ramdisk_id, basepath('ramdisk'), user,
- project)
-
- execute = lambda cmd, process_input = None, check_exit_code = True: \
- process.simple_execute(cmd=cmd,
- process_input=process_input,
- check_exit_code=check_exit_code)
+ images.fetch(inst.image_id, basepath('disk-raw'), user,
+ project)
+
+ if inst['kernel_id']:
+ if not os.path.exists(basepath('kernel')):
+ images.fetch(inst['kernel_id'], basepath('kernel'),
+ user, project)
+ if inst['ramdisk_id']:
+ if not os.path.exists(basepath('ramdisk')):
+ images.fetch(inst['ramdisk_id'], basepath('ramdisk'),
+ user, project)
+
+ def execute(cmd, process_input=None, check_exit_code=True):
+ return utils.execute(cmd=cmd,
+ process_input=process_input,
+ check_exit_code=check_exit_code)
+
+ # For now, we assume that if we're not using a kernel, we're using a
+ # partitioned disk image where the target partition is the first
+ # partition
+ target_partition = None
+ if not inst['kernel_id']:
+ target_partition = "1"
key = str(inst['key_data'])
net = None
@@ -477,16 +459,24 @@ class LibvirtConnection(object):
'dns': network_ref['dns']}
if key or net:
if key:
- logging.info('instance %s: injecting key into image %s',
+ logging.info(_('instance %s: injecting key into image %s'),
inst['name'], inst.image_id)
if net:
- logging.info('instance %s: injecting net into image %s',
- inst['name'], inst.image_id)
- yield disk.inject_data(basepath('disk-raw'), key, net,
- execute=execute)
-
- if os.path.exists(basepath('disk')):
- yield process.simple_execute('rm -f %s' % basepath('disk'))
+ logging.info(_('instance %s: injecting net into image %s'),
+ inst['name'], inst.image_id)
+ try:
+ disk.inject_data(basepath('disk-raw'), key, net,
+ partition=target_partition,
+ execute=execute)
+ except Exception as e:
+ # This could be a windows image, or a vmdk format disk
+ logging.warn(_('instance %s: ignoring error injecting data'
+ ' into image %s (%s)'),
+ inst['name'], inst.image_id, e)
+
+ if inst['kernel_id']:
+ if os.path.exists(basepath('disk')):
+ utils.execute('rm -f %s' % basepath('disk'))
local_bytes = (instance_types.INSTANCE_TYPES[inst.instance_type]
['local_gb']
@@ -495,18 +485,23 @@ class LibvirtConnection(object):
resize = True
if inst['instance_type'] == 'm1.tiny' or prefix == 'rescue-':
resize = False
- yield disk.partition(basepath('disk-raw'), basepath('disk'),
- local_bytes, resize, execute=execute)
+
+ if inst['kernel_id']:
+ disk.partition(basepath('disk-raw'), basepath('disk'),
+ local_bytes, resize, execute=execute)
+ else:
+ os.rename(basepath('disk-raw'), basepath('disk'))
+ disk.extend(basepath('disk'), local_bytes, execute=execute)
if FLAGS.libvirt_type == 'uml':
- yield process.simple_execute('sudo chown root %s' %
- basepath('disk'))
+ utils.execute('sudo chown root %s' % basepath('disk'))
def to_xml(self, instance, rescue=False):
# TODO(termie): cache?
- logging.debug('instance %s: starting toXML method', instance['name'])
- network = db.project_get_network(context.get_admin_context(),
- instance['project_id'])
+ logging.debug(_('instance %s: starting toXML method'),
+ instance['name'])
+ network = db.network_get_by_instance(context.get_admin_context(),
+ instance['id'])
# FIXME(vish): stick this in db
instance_type = instance['instance_type']
instance_type = instance_types.INSTANCE_TYPES[instance_type]
@@ -514,6 +509,15 @@ class LibvirtConnection(object):
instance['id'])
# Assume that the gateway also acts as the dhcp server.
dhcp_server = network['gateway']
+
+ if FLAGS.allow_project_net_traffic:
+ net, mask = _get_net_and_mask(network['cidr'])
+ extra_params = ("<parameter name=\"PROJNET\" value=\"%s\" />\n"
+ "<parameter name=\"PROJMASK\" value=\"%s\" />\n"
+ ) % (net, mask)
+ else:
+ extra_params = "\n"
+
xml_info = {'type': FLAGS.libvirt_type,
'name': instance['name'],
'basepath': os.path.join(FLAGS.instances_path,
@@ -523,20 +527,30 @@ class LibvirtConnection(object):
'bridge_name': network['bridge'],
'mac_address': instance['mac_address'],
'ip_address': ip_address,
- 'dhcp_server': dhcp_server}
- if rescue:
- libvirt_xml = self.rescue_xml % xml_info
- else:
- libvirt_xml = self.libvirt_xml % xml_info
- logging.debug('instance %s: finished toXML method', instance['name'])
+ 'dhcp_server': dhcp_server,
+ 'extra_params': extra_params,
+ 'rescue': rescue}
+ if not rescue:
+ if instance['kernel_id']:
+ xml_info['kernel'] = xml_info['basepath'] + "/kernel"
- return libvirt_xml
+ if instance['ramdisk_id']:
+ xml_info['ramdisk'] = xml_info['basepath'] + "/ramdisk"
+
+ xml_info['disk'] = xml_info['basepath'] + "/disk"
+
+ xml = str(Template(self.libvirt_xml, searchList=[xml_info]))
+ logging.debug(_('instance %s: finished toXML method'),
+ instance['name'])
+
+ return xml
def get_info(self, instance_name):
try:
virt_dom = self._conn.lookupByName(instance_name)
except:
- raise exception.NotFound("Instance %s not found" % instance_name)
+ raise exception.NotFound(_("Instance %s not found")
+ % instance_name)
(state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info()
return {'state': state,
'max_mem': max_mem,
@@ -722,6 +736,14 @@ class NWFilterFirewall(object):
</rule>
</filter>'''
+ nova_vpn_filter = '''<filter name='nova-vpn' chain='root'>
+ <uuid>2086015e-cf03-11df-8c5d-080027c27973</uuid>
+ <filterref filter='allow-dhcp-server'/>
+ <filterref filter='nova-allow-dhcp-server'/>
+ <filterref filter='nova-base-ipv4'/>
+ <filterref filter='nova-base-ipv6'/>
+ </filter>'''
+
def nova_base_ipv4_filter(self):
retval = "<filter name='nova-base-ipv4' chain='ipv4'>"
for protocol in ['tcp', 'udp', 'icmp']:
@@ -746,27 +768,31 @@ class NWFilterFirewall(object):
retval += '</filter>'
return retval
- def nova_project_filter(self, project, net, mask):
- retval = "<filter name='nova-project-%s' chain='ipv4'>" % project
+ def nova_project_filter(self):
+ retval = "<filter name='nova-project' chain='ipv4'>"
for protocol in ['tcp', 'udp', 'icmp']:
retval += """<rule action='accept' direction='in' priority='200'>
- <%s srcipaddr='%s' srcipmask='%s' />
- </rule>""" % (protocol, net, mask)
+ <%s srcipaddr='$PROJNET' srcipmask='$PROJMASK' />
+ </rule>""" % protocol
retval += '</filter>'
return retval
def _define_filter(self, xml):
if callable(xml):
xml = xml()
- d = threads.deferToThread(self._conn.nwfilterDefineXML, xml)
- return d
- @staticmethod
- def _get_net_and_mask(cidr):
- net = IPy.IP(cidr)
- return str(net.net()), str(net.netmask())
+ # execute in a native thread and block current greenthread until done
+ tpool.execute(self._conn.nwfilterDefineXML, xml)
+
+ def setup_base_nwfilters(self):
+ self._define_filter(self.nova_base_ipv4_filter)
+ self._define_filter(self.nova_base_ipv6_filter)
+ self._define_filter(self.nova_dhcp_filter)
+ self._define_filter(self.nova_base_filter)
+ self._define_filter(self.nova_vpn_filter)
+ if FLAGS.allow_project_net_traffic:
+ self._define_filter(self.nova_project_filter)
- @defer.inlineCallbacks
def setup_nwfilters_for_instance(self, instance):
"""
Creates an NWFilter for the given instance. In the process,
@@ -774,35 +800,25 @@ class NWFilterFirewall(object):
the base filter are all in place.
"""
- yield self._define_filter(self.nova_base_ipv4_filter)
- yield self._define_filter(self.nova_base_ipv6_filter)
- yield self._define_filter(self.nova_dhcp_filter)
- yield self._define_filter(self.nova_base_filter)
+ nwfilter_xml = ("<filter name='nova-instance-%s' chain='root'>\n"
+ ) % instance['name']
- nwfilter_xml = "<filter name='nova-instance-%s' chain='root'>\n" \
- " <filterref filter='nova-base' />\n" % \
- instance['name']
+ if instance['image_id'] == FLAGS.vpn_image_id:
+ nwfilter_xml += " <filterref filter='nova-vpn' />\n"
+ else:
+ nwfilter_xml += " <filterref filter='nova-base' />\n"
if FLAGS.allow_project_net_traffic:
- network_ref = db.project_get_network(context.get_admin_context(),
- instance['project_id'])
- net, mask = self._get_net_and_mask(network_ref['cidr'])
- project_filter = self.nova_project_filter(instance['project_id'],
- net, mask)
- yield self._define_filter(project_filter)
-
- nwfilter_xml += " <filterref filter='nova-project-%s' />\n" % \
- instance['project_id']
+ nwfilter_xml += " <filterref filter='nova-project' />\n"
for security_group in instance.security_groups:
- yield self.ensure_security_group_filter(security_group['id'])
+ self.ensure_security_group_filter(security_group['id'])
- nwfilter_xml += " <filterref filter='nova-secgroup-%d' />\n" % \
- security_group['id']
+ nwfilter_xml += (" <filterref filter='nova-secgroup-%d' />\n"
+ ) % security_group['id']
nwfilter_xml += "</filter>"
- yield self._define_filter(nwfilter_xml)
- return
+ self._define_filter(nwfilter_xml)
def ensure_security_group_filter(self, security_group_id):
return self._define_filter(
@@ -815,7 +831,7 @@ class NWFilterFirewall(object):
for rule in security_group.rules:
rule_xml += "<rule action='accept' direction='in' priority='300'>"
if rule.cidr:
- net, mask = self._get_net_and_mask(rule.cidr)
+ net, mask = _get_net_and_mask(rule.cidr)
rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
(rule.protocol, net, mask)
if rule.protocol in ['tcp', 'udp']:
diff --git a/nova/virt/xenapi/__init__.py b/nova/virt/xenapi/__init__.py
index 3d598c463..c75162f08 100644
--- a/nova/virt/xenapi/__init__.py
+++ b/nova/virt/xenapi/__init__.py
@@ -13,3 +13,18 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+
+"""
+:mod:`xenapi` -- Nova support for XenServer and XCP through XenAPI
+==================================================================
+"""
+
+
+class HelperBase(object):
+ """
+ The base for helper classes. This adds the XenAPI class attribute
+ """
+ XenAPI = None
+
+ def __init__(self):
+ return
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
new file mode 100644
index 000000000..7a6c9ee71
--- /dev/null
+++ b/nova/virt/xenapi/fake.py
@@ -0,0 +1,388 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+#============================================================================
+#
+# Parts of this file are based upon xmlrpclib.py, the XML-RPC client
+# interface included in the Python distribution.
+#
+# Copyright (c) 1999-2002 by Secret Labs AB
+# Copyright (c) 1999-2002 by Fredrik Lundh
+#
+# By obtaining, using, and/or copying this software and/or its
+# associated documentation, you agree that you have read, understood,
+# and will comply with the following terms and conditions:
+#
+# Permission to use, copy, modify, and distribute this software and
+# its associated documentation for any purpose and without fee is
+# hereby granted, provided that the above copyright notice appears in
+# all copies, and that both that copyright notice and this permission
+# notice appear in supporting documentation, and that the name of
+# Secret Labs AB or the author not be used in advertising or publicity
+# pertaining to distribution of the software without specific, written
+# prior permission.
+#
+# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
+# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
+# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
+# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+# --------------------------------------------------------------------
+
+
+"""
+A fake XenAPI SDK.
+"""
+
+
+import datetime
+import logging
+import uuid
+
+from nova import exception
+
+
+_CLASSES = ['host', 'network', 'session', 'SR', 'VBD',\
+ 'PBD', 'VDI', 'VIF', 'VM', 'task']
+
+_db_content = {}
+
+
+def reset():
+ for c in _CLASSES:
+ _db_content[c] = {}
+ create_host('fake')
+
+
+def create_host(name_label):
+ return _create_object('host', {
+ 'name_label': name_label,
+ })
+
+
+def create_network(name_label, bridge):
+ return _create_object('network', {
+ 'name_label': name_label,
+ 'bridge': bridge,
+ })
+
+
+def create_vm(name_label, status,
+ is_a_template=False, is_control_domain=False):
+ return _create_object('VM', {
+ 'name_label': name_label,
+ 'power-state': status,
+ 'is_a_template': is_a_template,
+ 'is_control_domain': is_control_domain,
+ })
+
+
+def create_vdi(name_label, read_only, sr_ref, sharable):
+ return _create_object('VDI', {
+ 'name_label': name_label,
+ 'read_only': read_only,
+ 'SR': sr_ref,
+ 'type': '',
+ 'name_description': '',
+ 'sharable': sharable,
+ 'other_config': {},
+ 'location': '',
+ 'xenstore_data': '',
+ 'sm_config': {},
+ 'VBDs': {},
+ })
+
+
+def create_pbd(config, sr_ref, attached):
+ return _create_object('PBD', {
+ 'device-config': config,
+ 'SR': sr_ref,
+ 'currently-attached': attached,
+ })
+
+
+def create_task(name_label):
+ return _create_object('task', {
+ 'name_label': name_label,
+ 'status': 'pending',
+ })
+
+
+def _create_object(table, obj):
+ ref = str(uuid.uuid4())
+ obj['uuid'] = str(uuid.uuid4())
+ _db_content[table][ref] = obj
+ return ref
+
+
+def _create_sr(table, obj):
+ sr_type = obj[6]
+ # Forces fake to support iscsi only
+ if sr_type != 'iscsi':
+ raise Failure(['SR_UNKNOWN_DRIVER', sr_type])
+ sr_ref = _create_object(table, obj[2])
+ vdi_ref = create_vdi('', False, sr_ref, False)
+ pbd_ref = create_pbd('', sr_ref, True)
+ _db_content['SR'][sr_ref]['VDIs'] = [vdi_ref]
+ _db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
+ _db_content['VDI'][vdi_ref]['SR'] = sr_ref
+ _db_content['PBD'][pbd_ref]['SR'] = sr_ref
+ return sr_ref
+
+
+def get_all(table):
+ return _db_content[table].keys()
+
+
+def get_all_records(table):
+ return _db_content[table]
+
+
+def get_record(table, ref):
+ if ref in _db_content[table]:
+ return _db_content[table].get(ref)
+ else:
+ raise Failure(['HANDLE_INVALID', table, ref])
+
+
+def check_for_session_leaks():
+ if len(_db_content['session']) > 0:
+ raise exception.Error('Sessions have leaked: %s' %
+ _db_content['session'])
+
+
+class Failure(Exception):
+ def __init__(self, details):
+ self.details = details
+
+ def __str__(self):
+ try:
+ return str(self.details)
+ except Exception, exc:
+ return "XenAPI Fake Failure: %s" % str(self.details)
+
+ def _details_map(self):
+ return dict([(str(i), self.details[i])
+ for i in range(len(self.details))])
+
+
+class SessionBase(object):
+ """
+ Base class for Fake Sessions
+ """
+
+ def __init__(self, uri):
+ self._session = None
+
+ def xenapi_request(self, methodname, params):
+ if methodname.startswith('login'):
+ self._login(methodname, params)
+ return None
+ elif methodname == 'logout' or methodname == 'session.logout':
+ self._logout()
+ return None
+ else:
+ full_params = (self._session,) + params
+ meth = getattr(self, methodname, None)
+ if meth is None:
+ logging.warn('Raising NotImplemented')
+ raise NotImplementedError(
+ 'xenapi.fake does not have an implementation for %s' %
+ methodname)
+ return meth(*full_params)
+
+ def _login(self, method, params):
+ self._session = str(uuid.uuid4())
+ _db_content['session'][self._session] = {
+ 'uuid': str(uuid.uuid4()),
+ 'this_host': _db_content['host'].keys()[0],
+ }
+
+ def _logout(self):
+ s = self._session
+ self._session = None
+ if s not in _db_content['session']:
+ raise exception.Error(
+ "Logging out a session that is invalid or already logged "
+ "out: %s" % s)
+ del _db_content['session'][s]
+
+ def __getattr__(self, name):
+ if name == 'handle':
+ return self._session
+ elif name == 'xenapi':
+ return _Dispatcher(self.xenapi_request, None)
+ elif name.startswith('login') or name.startswith('slave_local'):
+ return lambda *params: self._login(name, params)
+ elif name.startswith('Async'):
+ return lambda *params: self._async(name, params)
+ elif '.' in name:
+ impl = getattr(self, name.replace('.', '_'))
+ if impl is not None:
+ def callit(*params):
+ logging.warn('Calling %s %s', name, impl)
+ self._check_session(params)
+ return impl(*params)
+ return callit
+ if self._is_gettersetter(name, True):
+ logging.warn('Calling getter %s', name)
+ return lambda *params: self._getter(name, params)
+ elif self._is_create(name):
+ return lambda *params: self._create(name, params)
+ else:
+ return None
+
+ def _is_gettersetter(self, name, getter):
+ bits = name.split('.')
+ return (len(bits) == 2 and
+ bits[0] in _CLASSES and
+ bits[1].startswith(getter and 'get_' or 'set_'))
+
+ def _is_create(self, name):
+ bits = name.split('.')
+ return (len(bits) == 2 and
+ bits[0] in _CLASSES and
+ bits[1] == 'create')
+
+ def _getter(self, name, params):
+ self._check_session(params)
+ (cls, func) = name.split('.')
+
+ if func == 'get_all':
+ self._check_arg_count(params, 1)
+ return get_all(cls)
+
+ if func == 'get_all_records':
+ self._check_arg_count(params, 1)
+ return get_all_records(cls)
+
+ if func == 'get_record':
+ self._check_arg_count(params, 2)
+ return get_record(cls, params[1])
+
+ if (func == 'get_by_name_label' or
+ func == 'get_by_uuid'):
+ self._check_arg_count(params, 2)
+ return self._get_by_field(
+ _db_content[cls], func[len('get_by_'):], params[1])
+
+ if len(params) == 2:
+ field = func[len('get_'):]
+ ref = params[1]
+
+ if (ref in _db_content[cls] and
+ field in _db_content[cls][ref]):
+ return _db_content[cls][ref][field]
+
+ logging.error('Raising NotImplemented')
+ raise NotImplementedError(
+ 'xenapi.fake does not have an implementation for %s or it has '
+ 'been called with the wrong number of arguments' % name)
+
+ def _setter(self, name, params):
+ self._check_session(params)
+ (cls, func) = name.split('.')
+
+ if len(params) == 3:
+ field = func[len('set_'):]
+ ref = params[1]
+ val = params[2]
+
+ if (ref in _db_content[cls] and
+ field in _db_content[cls][ref]):
+ _db_content[cls][ref][field] = val
+
+ logging.warn('Raising NotImplemented')
+ raise NotImplementedError(
+ 'xenapi.fake does not have an implementation for %s or it has '
+ 'been called with the wrong number of arguments or the database '
+ 'is missing that field' % name)
+
+ def _create(self, name, params):
+ self._check_session(params)
+ is_sr_create = name == 'SR.create'
+ # Storage Repositories have a different API
+ expected = is_sr_create and 10 or 2
+ self._check_arg_count(params, expected)
+ (cls, _) = name.split('.')
+ ref = is_sr_create and \
+ _create_sr(cls, params) or _create_object(cls, params[1])
+ obj = get_record(cls, ref)
+
+ # Add RO fields
+ if cls == 'VM':
+ obj['power_state'] = 'Halted'
+
+ return ref
+
+ def _async(self, name, params):
+ task_ref = create_task(name)
+ task = _db_content['task'][task_ref]
+ func = name[len('Async.'):]
+ try:
+ task['result'] = self.xenapi_request(func, params[1:])
+ task['status'] = 'success'
+ except Failure, exc:
+ task['error_info'] = exc.details
+ task['status'] = 'failed'
+ task['finished'] = datetime.datetime.now()
+ return task_ref
+
+ def _check_session(self, params):
+ if (self._session is None or
+ self._session not in _db_content['session']):
+ raise Failure(['HANDLE_INVALID', 'session', self._session])
+ if len(params) == 0 or params[0] != self._session:
+ logging.warn('Raising NotImplemented')
+ raise NotImplementedError('Call to XenAPI without using .xenapi')
+
+ def _check_arg_count(self, params, expected):
+ actual = len(params)
+ if actual != expected:
+ raise Failure(['MESSAGE_PARAMETER_COUNT_MISMATCH',
+ expected, actual])
+
+ def _get_by_field(self, recs, k, v):
+ result = []
+ for ref, rec in recs.iteritems():
+ if rec.get(k) == v:
+ result.append(ref)
+ return result
+
+
+# Based upon _Method from xmlrpclib.
+class _Dispatcher:
+ def __init__(self, send, name):
+ self.__send = send
+ self.__name = name
+
+ def __repr__(self):
+ if self.__name:
+ return '<xenapi.fake._Dispatcher for %s>' % self.__name
+ else:
+ return '<xenapi.fake._Dispatcher>'
+
+ def __getattr__(self, name):
+ if self.__name is None:
+ return _Dispatcher(self.__send, name)
+ else:
+ return _Dispatcher(self.__send, "%s.%s" % (self.__name, name))
+
+ def __call__(self, *args):
+ return self.__send(self.__name, args)
diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py
index 8cb4cce3a..c0406d8f0 100644
--- a/nova/virt/xenapi/network_utils.py
+++ b/nova/virt/xenapi/network_utils.py
@@ -20,26 +20,24 @@ records and their attributes like bridges, PIFs, QoS, as well as
their lookup functions.
"""
-from twisted.internet import defer
+from nova.virt.xenapi import HelperBase
-class NetworkHelper():
+
+class NetworkHelper(HelperBase):
"""
The class that wraps the helper methods together.
"""
- def __init__(self):
- return
@classmethod
- @defer.inlineCallbacks
def find_network_with_bridge(cls, session, bridge):
- """ Return the network on which the bridge is attached, if found """
+ """Return the network on which the bridge is attached, if found."""
expr = 'field "bridge" = "%s"' % bridge
- networks = yield session.call_xenapi('network.get_all_records_where',
- expr)
+ networks = session.call_xenapi('network.get_all_records_where', expr)
if len(networks) == 1:
- defer.returnValue(networks.keys()[0])
+ return networks.keys()[0]
elif len(networks) > 1:
- raise Exception('Found non-unique network for bridge %s' % bridge)
+ raise Exception(_('Found non-unique network'
+ ' for bridge %s') % bridge)
else:
- raise Exception('Found no network for bridge %s' % bridge)
+ raise Exception(_('Found no network for bridge %s') % bridge)
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 77edb576e..89e02c917 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -21,17 +21,17 @@ their attributes like VDIs, VIFs, as well as their lookup functions.
import logging
import urllib
-
-from twisted.internet import defer
from xml.dom import minidom
+from nova import exception
from nova import flags
-from nova import utils
-
from nova.auth.manager import AuthManager
from nova.compute import instance_types
from nova.compute import power_state
from nova.virt import images
+from nova.virt.xenapi import HelperBase
+from nova.virt.xenapi.volume_utils import StorageError
+
FLAGS = flags.FLAGS
@@ -42,33 +42,16 @@ XENAPI_POWER_STATE = {
'Suspended': power_state.SHUTDOWN, # FIXME
'Crashed': power_state.CRASHED}
-XenAPI = None
-
-class VMHelper():
+class VMHelper(HelperBase):
"""
The class that wraps the helper methods together.
"""
- def __init__(self):
- return
@classmethod
- def late_import(cls):
- """
- Load the XenAPI module in for helper class, if required.
- This is to avoid to install the XenAPI library when other
- hypervisors are used
- """
- global XenAPI
- if XenAPI is None:
- XenAPI = __import__('XenAPI')
-
- @classmethod
- @defer.inlineCallbacks
def create_vm(cls, session, instance, kernel, ramdisk):
"""Create a VM record. Returns a Deferred that gives the new
VM reference."""
-
instance_type = instance_types.INSTANCE_TYPES[instance.instance_type]
mem = str(long(instance_type['memory_mb']) * 1024 * 1024)
vcpus = str(instance_type['vcpus'])
@@ -101,17 +84,15 @@ class VMHelper():
'user_version': '0',
'other_config': {},
}
- logging.debug('Created VM %s...', instance.name)
- vm_ref = yield session.call_xenapi('VM.create', rec)
- logging.debug('Created VM %s as %s.', instance.name, vm_ref)
- defer.returnValue(vm_ref)
+ logging.debug(_('Created VM %s...'), instance.name)
+ vm_ref = session.call_xenapi('VM.create', rec)
+ logging.debug(_('Created VM %s as %s.'), instance.name, vm_ref)
+ return vm_ref
@classmethod
- @defer.inlineCallbacks
def create_vbd(cls, session, vm_ref, vdi_ref, userdevice, bootable):
"""Create a VBD record. Returns a Deferred that gives the new
VBD reference."""
-
vbd_rec = {}
vbd_rec['VM'] = vm_ref
vbd_rec['VDI'] = vdi_ref
@@ -125,18 +106,53 @@ class VMHelper():
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
- logging.debug('Creating VBD for VM %s, VDI %s ... ', vm_ref, vdi_ref)
- vbd_ref = yield session.call_xenapi('VBD.create', vbd_rec)
- logging.debug('Created VBD %s for VM %s, VDI %s.', vbd_ref, vm_ref,
+ logging.debug(_('Creating VBD for VM %s, VDI %s ... '),
+ vm_ref, vdi_ref)
+ vbd_ref = session.call_xenapi('VBD.create', vbd_rec)
+ logging.debug(_('Created VBD %s for VM %s, VDI %s.'), vbd_ref, vm_ref,
vdi_ref)
- defer.returnValue(vbd_ref)
+ return vbd_ref
+
+ @classmethod
+ def find_vbd_by_number(cls, session, vm_ref, number):
+ """Get the VBD reference from the device number"""
+ vbds = session.get_xenapi().VM.get_VBDs(vm_ref)
+ if vbds:
+ for vbd in vbds:
+ try:
+ vbd_rec = session.get_xenapi().VBD.get_record(vbd)
+ if vbd_rec['userdevice'] == str(number):
+ return vbd
+ except cls.XenAPI.Failure, exc:
+ logging.warn(exc)
+ raise StorageError(_('VBD not found in instance %s') % vm_ref)
+
+ @classmethod
+ def unplug_vbd(cls, session, vbd_ref):
+ """Unplug VBD from VM"""
+ try:
+ vbd_ref = session.call_xenapi('VBD.unplug', vbd_ref)
+ except cls.XenAPI.Failure, exc:
+ logging.warn(exc)
+ if exc.details[0] != 'DEVICE_ALREADY_DETACHED':
+ raise StorageError(_('Unable to unplug VBD %s') % vbd_ref)
+
+ @classmethod
+ def destroy_vbd(cls, session, vbd_ref):
+ """Destroy VBD from host database"""
+ try:
+ task = session.call_xenapi('Async.VBD.destroy', vbd_ref)
+ #FIXME(armando): find a solution to missing instance_id
+ #with Josh Kearney
+ session.wait_for_task(0, task)
+ except cls.XenAPI.Failure, exc:
+ logging.warn(exc)
+ raise StorageError(_('Unable to destroy VBD %s') % vbd_ref)
@classmethod
- @defer.inlineCallbacks
def create_vif(cls, session, vm_ref, network_ref, mac_address):
"""Create a VIF record. Returns a Deferred that gives the new
VIF reference."""
-
vif_rec = {}
vif_rec['device'] = '0'
vif_rec['network'] = network_ref
@@ -146,15 +162,14 @@ class VMHelper():
vif_rec['other_config'] = {}
vif_rec['qos_algorithm_type'] = ''
vif_rec['qos_algorithm_params'] = {}
- logging.debug('Creating VIF for VM %s, network %s ... ', vm_ref,
+ logging.debug(_('Creating VIF for VM %s, network %s.'), vm_ref,
network_ref)
- vif_ref = yield session.call_xenapi('VIF.create', vif_rec)
- logging.debug('Created VIF %s for VM %s, network %s.', vif_ref,
+ vif_ref = session.call_xenapi('VIF.create', vif_rec)
+ logging.debug(_('Created VIF %s for VM %s, network %s.'), vif_ref,
vm_ref, network_ref)
- defer.returnValue(vif_ref)
+ return vif_ref
@classmethod
- @defer.inlineCallbacks
def fetch_image(cls, session, image, user, project, use_sr):
"""use_sr: True to put the image as a VDI in an SR, False to place
it on dom0's filesystem. The former is for VM disks, the latter for
@@ -163,7 +178,7 @@ class VMHelper():
url = images.image_url(image)
access = AuthManager().get_access_key(user, project)
- logging.debug("Asking xapi to fetch %s as %s", url, access)
+ logging.debug(_("Asking xapi to fetch %s as %s"), url, access)
fn = use_sr and 'get_vdi' or 'get_kernel'
args = {}
args['src_url'] = url
@@ -171,37 +186,27 @@ class VMHelper():
args['password'] = user.secret
if use_sr:
args['add_partition'] = 'true'
- task = yield session.async_call_plugin('objectstore', fn, args)
- uuid = yield session.wait_for_task(task)
- defer.returnValue(uuid)
+ task = session.async_call_plugin('objectstore', fn, args)
+ #FIXME(armando): find a solution to missing instance_id
+ #with Josh Kearney
+ uuid = session.wait_for_task(0, task)
+ return uuid
@classmethod
- @utils.deferredToThread
def lookup(cls, session, i):
- """ Look the instance i up, and returns it if available """
- return VMHelper.lookup_blocking(session, i)
-
- @classmethod
- def lookup_blocking(cls, session, i):
- """ Synchronous lookup """
+ """Look the instance i up, and returns it if available"""
vms = session.get_xenapi().VM.get_by_name_label(i)
n = len(vms)
if n == 0:
return None
elif n > 1:
- raise Exception('duplicate name found: %s' % i)
+ raise exception.Duplicate(_('duplicate name found: %s') % i)
else:
return vms[0]
@classmethod
- @utils.deferredToThread
def lookup_vm_vdis(cls, session, vm):
- """ Look for the VDIs that are attached to the VM """
- return VMHelper.lookup_vm_vdis_blocking(session, vm)
-
- @classmethod
- def lookup_vm_vdis_blocking(cls, session, vm):
- """ Synchronous lookup_vm_vdis """
+ """Look for the VDIs that are attached to the VM"""
# Firstly we get the VBDs, then the VDIs.
# TODO(Armando): do we leave the read-only devices?
vbds = session.get_xenapi().VM.get_VBDs(vm)
@@ -212,8 +217,9 @@ class VMHelper():
vdi = session.get_xenapi().VBD.get_VDI(vbd)
# Test valid VDI
record = session.get_xenapi().VDI.get_record(vdi)
- logging.debug('VDI %s is still available', record['uuid'])
- except XenAPI.Failure, exc:
+ logging.debug(_('VDI %s is still available'),
+ record['uuid'])
+ except cls.XenAPI.Failure, exc:
logging.warn(exc)
else:
vdis.append(vdi)
@@ -224,6 +230,7 @@ class VMHelper():
@classmethod
def compile_info(cls, record):
+ """Fill record with VM status information"""
return {'state': XENAPI_POWER_STATE[record['power_state']],
'max_mem': long(record['memory_static_max']) >> 10,
'mem': long(record['memory_dynamic_max']) >> 10,
@@ -236,11 +243,7 @@ class VMHelper():
try:
host = session.get_xenapi_host()
host_ip = session.get_xenapi().host.get_record(host)["address"]
- metrics = session.get_xenapi().VM_guest_metrics.get_record(
- record["guest_metrics"])
- diags = {
- "Kernel": metrics["os_version"]["uname"],
- "Distro": metrics["os_version"]["name"]}
+ diags = {}
xml = get_rrd(host_ip, record["uuid"])
if xml:
rrd = minidom.parseString(xml)
@@ -251,7 +254,7 @@ class VMHelper():
# Name and Value
diags[ref[0].firstChild.data] = ref[6].firstChild.data
return diags
- except XenAPI.Failure as e:
+ except cls.XenAPI.Failure as e:
return {"Unable to retrieve diagnostics": e}
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 9bfd07267..39bb0f1e8 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -20,127 +20,177 @@ Management class for VM-related functions (spawn, reboot, etc).
import logging
-from twisted.internet import defer
-
from nova import db
from nova import context
+from nova import exception
+from nova import utils
from nova.auth.manager import AuthManager
+from nova.compute import power_state
from nova.virt.xenapi.network_utils import NetworkHelper
from nova.virt.xenapi.vm_utils import VMHelper
-XenAPI = None
-
class VMOps(object):
"""
Management class for VM-related tasks
"""
+
def __init__(self, session):
- global XenAPI
- if XenAPI is None:
- XenAPI = __import__('XenAPI')
+ self.XenAPI = session.get_imported_xenapi()
self._session = session
- # Load XenAPI module in the helper class
- VMHelper.late_import()
+ VMHelper.XenAPI = self.XenAPI
def list_instances(self):
- """ List VM instances """
- return [self._session.get_xenapi().VM.get_name_label(vm) \
- for vm in self._session.get_xenapi().VM.get_all()]
+ """List VM instances"""
+ vms = []
+ for vm in self._session.get_xenapi().VM.get_all():
+ rec = self._session.get_xenapi().VM.get_record(vm)
+ if not rec["is_a_template"] and not rec["is_control_domain"]:
+ vms.append(rec["name_label"])
+ return vms
- @defer.inlineCallbacks
def spawn(self, instance):
- """ Create VM instance """
- vm = yield VMHelper.lookup(self._session, instance.name)
+ """Create VM instance"""
+ vm = VMHelper.lookup(self._session, instance.name)
if vm is not None:
- raise Exception('Attempted to create non-unique name %s' %
- instance.name)
+ raise exception.Duplicate(_('Attempted to create'
+ ' non-unique name %s') % instance.name)
- bridge = db.project_get_network(context.get_admin_context(),
- instance.project_id).bridge
+ bridge = db.network_get_by_instance(context.get_admin_context(),
+ instance['id'])['bridge']
network_ref = \
- yield NetworkHelper.find_network_with_bridge(self._session, bridge)
+ NetworkHelper.find_network_with_bridge(self._session, bridge)
user = AuthManager().get_user(instance.user_id)
project = AuthManager().get_project(instance.project_id)
- vdi_uuid = yield VMHelper.fetch_image(self._session,
- instance.image_id, user, project, True)
- kernel = yield VMHelper.fetch_image(self._session,
- instance.kernel_id, user, project, False)
- ramdisk = yield VMHelper.fetch_image(self._session,
- instance.ramdisk_id, user, project, False)
- vdi_ref = yield self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
- vm_ref = yield VMHelper.create_vm(self._session,
- instance, kernel, ramdisk)
- yield VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True)
+ vdi_uuid = VMHelper.fetch_image(
+ self._session, instance.image_id, user, project, True)
+ kernel = VMHelper.fetch_image(
+ self._session, instance.kernel_id, user, project, False)
+ ramdisk = VMHelper.fetch_image(
+ self._session, instance.ramdisk_id, user, project, False)
+ vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
+ vm_ref = VMHelper.create_vm(
+ self._session, instance, kernel, ramdisk)
+ VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True)
if network_ref:
- yield VMHelper.create_vif(self._session, vm_ref,
- network_ref, instance.mac_address)
- logging.debug('Starting VM %s...', vm_ref)
- yield self._session.call_xenapi('VM.start', vm_ref, False, False)
- logging.info('Spawning VM %s created %s.', instance.name,
+ VMHelper.create_vif(self._session, vm_ref,
+ network_ref, instance.mac_address)
+ logging.debug(_('Starting VM %s...'), vm_ref)
+ self._session.call_xenapi('VM.start', vm_ref, False, False)
+ logging.info(_('Spawning VM %s created %s.'), instance.name,
vm_ref)
- @defer.inlineCallbacks
+ # NOTE(armando): Do we really need to do this in virt?
+ timer = utils.LoopingCall(f=None)
+
+ def _wait_for_boot():
+ try:
+ state = self.get_info(instance['name'])['state']
+ db.instance_set_state(context.get_admin_context(),
+ instance['id'], state)
+ if state == power_state.RUNNING:
+ logging.debug(_('Instance %s: booted'), instance['name'])
+ timer.stop()
+ except Exception, exc:
+ logging.warn(exc)
+ logging.exception(_('instance %s: failed to boot'),
+ instance['name'])
+ db.instance_set_state(context.get_admin_context(),
+ instance['id'],
+ power_state.SHUTDOWN)
+ timer.stop()
+
+ timer.f = _wait_for_boot
+ return timer.start(interval=0.5, now=True)
+
def reboot(self, instance):
- """ Reboot VM instance """
+ """Reboot VM instance"""
instance_name = instance.name
- vm = yield VMHelper.lookup(self._session, instance_name)
+ vm = VMHelper.lookup(self._session, instance_name)
if vm is None:
- raise Exception('instance not present %s' % instance_name)
- task = yield self._session.call_xenapi('Async.VM.clean_reboot', vm)
- yield self._session.wait_for_task(task)
+ raise exception.NotFound(_('instance not'
+ ' found %s') % instance_name)
+ task = self._session.call_xenapi('Async.VM.clean_reboot', vm)
+ self._session.wait_for_task(instance.id, task)
- @defer.inlineCallbacks
def destroy(self, instance):
- """ Destroy VM instance """
- vm = yield VMHelper.lookup(self._session, instance.name)
+ """Destroy VM instance"""
+ vm = VMHelper.lookup(self._session, instance.name)
if vm is None:
# Don't complain, just return. This lets us clean up instances
# that have already disappeared from the underlying platform.
- defer.returnValue(None)
+ return
# Get the VDIs related to the VM
- vdis = yield VMHelper.lookup_vm_vdis(self._session, vm)
+ vdis = VMHelper.lookup_vm_vdis(self._session, vm)
try:
- task = yield self._session.call_xenapi('Async.VM.hard_shutdown',
+ task = self._session.call_xenapi('Async.VM.hard_shutdown',
vm)
- yield self._session.wait_for_task(task)
+ self._session.wait_for_task(instance.id, task)
except XenAPI.Failure, exc:
logging.warn(exc)
# Disk clean-up
if vdis:
for vdi in vdis:
try:
- task = yield self._session.call_xenapi('Async.VDI.destroy',
- vdi)
- yield self._session.wait_for_task(task)
+ task = self._session.call_xenapi('Async.VDI.destroy', vdi)
+ self._session.wait_for_task(instance.id, task)
except XenAPI.Failure, exc:
logging.warn(exc)
+ # VM Destroy
+ try:
+ task = self._session.call_xenapi('Async.VM.destroy', vm)
+ self._session.wait_for_task(instance.id, task)
+ except XenAPI.Failure, exc:
+ logging.warn(exc)
+
+ def _wait_with_callback(self, instance_id, task, callback):
+ ret = None
try:
- task = yield self._session.call_xenapi('Async.VM.destroy', vm)
- yield self._session.wait_for_task(task)
+ ret = self._session.wait_for_task(instance_id, task)
except XenAPI.Failure, exc:
logging.warn(exc)
+ callback(ret)
+
+ def pause(self, instance, callback):
+ """Pause VM instance"""
+ instance_name = instance.name
+ vm = VMHelper.lookup(self._session, instance_name)
+ if vm is None:
+ raise exception.NotFound(_('Instance not'
+ ' found %s') % instance_name)
+ task = self._session.call_xenapi('Async.VM.pause', vm)
+ self._wait_with_callback(instance.id, task, callback)
+
+ def unpause(self, instance, callback):
+ """Unpause VM instance"""
+ instance_name = instance.name
+ vm = VMHelper.lookup(self._session, instance_name)
+ if vm is None:
+ raise exception.NotFound(_('Instance not'
+ ' found %s') % instance_name)
+ task = self._session.call_xenapi('Async.VM.unpause', vm)
+ self._wait_with_callback(instance.id, task, callback)
def get_info(self, instance_id):
- """ Return data about VM instance """
- vm = VMHelper.lookup_blocking(self._session, instance_id)
+ """Return data about VM instance"""
+ vm = VMHelper.lookup(self._session, instance_id)
if vm is None:
- raise Exception('instance not present %s' % instance_id)
+ raise exception.NotFound(_('Instance not'
+ ' found %s') % instance_id)
rec = self._session.get_xenapi().VM.get_record(vm)
return VMHelper.compile_info(rec)
- @defer.inlineCallbacks
def get_diagnostics(self, instance_id):
"""Return data about VM diagnostics"""
- vm = yield VMHelper.lookup(self._session, instance_id)
+ vm = VMHelper.lookup(self._session, instance_id)
if vm is None:
- raise Exception("instance not present %s" % instance_id)
- rec = yield self._session.get_xenapi().VM.get_record(vm)
- defer.returnValue(VMHelper.compile_diagnostics(self._session, rec))
+ raise exception.NotFound(_("Instance not found %s") % instance_id)
+ rec = self._session.get_xenapi().VM.get_record(vm)
+ return VMHelper.compile_diagnostics(self._session, rec)
def get_console_output(self, instance):
- """ Return snapshot of console """
+ """Return snapshot of console"""
# TODO: implement this to fix pylint!
return 'FAKE CONSOLE OUTPUT of instance'
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
new file mode 100644
index 000000000..a0c0a67d4
--- /dev/null
+++ b/nova/virt/xenapi/volume_utils.py
@@ -0,0 +1,268 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Helper methods for operations related to the management of volumes,
+and storage repositories
+"""
+
+import re
+import string
+import logging
+
+from nova import db
+from nova import context
+from nova import exception
+from nova import flags
+from nova import utils
+from nova.virt.xenapi import HelperBase
+
+FLAGS = flags.FLAGS
+
+
+class StorageError(Exception):
+ """To raise errors related to SR, VDI, PBD, and VBD commands"""
+
+ def __init__(self, message=None):
+ super(StorageError, self).__init__(message)
+
+
+class VolumeHelper(HelperBase):
+ """
+ The class that wraps the helper methods together.
+ """
+
+ @classmethod
+ def create_iscsi_storage(cls, session, info, label, description):
+ """
+ Create an iSCSI storage repository that will be used to mount
+ the volume for the specified instance
+ """
+ sr_ref = session.get_xenapi().SR.get_by_name_label(label)
+ if len(sr_ref) == 0:
+ logging.debug('Introducing %s...', label)
+ record = {}
+ if 'chapuser' in info and 'chappassword' in info:
+ record = {'target': info['targetHost'],
+ 'port': info['targetPort'],
+ 'targetIQN': info['targetIQN'],
+ 'chapuser': info['chapuser'],
+ 'chappassword': info['chappassword']
+ }
+ else:
+ record = {'target': info['targetHost'],
+ 'port': info['targetPort'],
+ 'targetIQN': info['targetIQN']
+ }
+ try:
+ sr_ref = session.get_xenapi().SR.create(
+ session.get_xenapi_host(),
+ record,
+ '0', label, description, 'iscsi', '', False, {})
+ logging.debug('Introduced %s as %s.', label, sr_ref)
+ return sr_ref
+ except cls.XenAPI.Failure, exc:
+ logging.warn(exc)
+ raise StorageError(_('Unable to create Storage Repository'))
+ else:
+ return sr_ref[0]
+
+ @classmethod
+ def find_sr_from_vbd(cls, session, vbd_ref):
+ """Find the SR reference from the VBD reference"""
+ try:
+ vdi_ref = session.get_xenapi().VBD.get_VDI(vbd_ref)
+ sr_ref = session.get_xenapi().VDI.get_SR(vdi_ref)
+ except cls.XenAPI.Failure, exc:
+ logging.warn(exc)
+ raise StorageError(_('Unable to find SR from VBD %s') % vbd_ref)
+ return sr_ref
+
+ @classmethod
+ def destroy_iscsi_storage(cls, session, sr_ref):
+ """Forget the SR whilst preserving the state of the disk"""
+ logging.debug("Forgetting SR %s ... ", sr_ref)
+ pbds = []
+ try:
+ pbds = session.get_xenapi().SR.get_PBDs(sr_ref)
+ except cls.XenAPI.Failure, exc:
+ logging.warn('Ignoring exception %s when getting PBDs for %s',
+ exc, sr_ref)
+ for pbd in pbds:
+ try:
+ session.get_xenapi().PBD.unplug(pbd)
+ except cls.XenAPI.Failure, exc:
+ logging.warn('Ignoring exception %s when unplugging PBD %s',
+ exc, pbd)
+ try:
+ session.get_xenapi().SR.forget(sr_ref)
+ logging.debug("Forgetting SR %s done.", sr_ref)
+ except cls.XenAPI.Failure, exc:
+ logging.warn('Ignoring exception %s when forgetting SR %s',
+ exc, sr_ref)
+
+ @classmethod
+ def introduce_vdi(cls, session, sr_ref):
+ """Introduce VDI in the host"""
+ try:
+ vdis = session.get_xenapi().SR.get_VDIs(sr_ref)
+ except cls.XenAPI.Failure, exc:
+ logging.warn(exc)
+ raise StorageError(_('Unable to introduce VDI on SR %s') % sr_ref)
+ try:
+ vdi_rec = session.get_xenapi().VDI.get_record(vdis[0])
+ except cls.XenAPI.Failure, exc:
+ logging.warn(exc)
+ raise StorageError(_('Unable to get record'
+ ' of VDI %s on') % vdis[0])
+ else:
+ try:
+ return session.get_xenapi().VDI.introduce(
+ vdi_rec['uuid'],
+ vdi_rec['name_label'],
+ vdi_rec['name_description'],
+ vdi_rec['SR'],
+ vdi_rec['type'],
+ vdi_rec['sharable'],
+ vdi_rec['read_only'],
+ vdi_rec['other_config'],
+ vdi_rec['location'],
+ vdi_rec['xenstore_data'],
+ vdi_rec['sm_config'])
+ except cls.XenAPI.Failure, exc:
+ logging.warn(exc)
+ raise StorageError(_('Unable to introduce VDI for SR %s')
+ % sr_ref)
+
+ @classmethod
+ def parse_volume_info(cls, device_path, mountpoint):
+ """
+ Parse device_path and mountpoint as they can be used by XenAPI.
+ In particular, the mountpoint (e.g. /dev/sdc) must be translated
+ into a numeric literal.
+ FIXME(armando):
+ As for device_path, currently cannot be used as it is,
+ because it does not contain target information. As for interim
+ solution, target details are passed either via Flags or obtained
+ by iscsiadm. Long-term solution is to add a few more fields to the
+ db in the iscsi_target table with the necessary info and modify
+ the iscsi driver to set them.
+ """
+ device_number = VolumeHelper.mountpoint_to_number(mountpoint)
+ volume_id = _get_volume_id(device_path)
+ (iscsi_name, iscsi_portal) = _get_target(volume_id)
+ target_host = _get_target_host(iscsi_portal)
+ target_port = _get_target_port(iscsi_portal)
+ target_iqn = _get_iqn(iscsi_name, volume_id)
+ logging.debug('(vol_id,number,host,port,iqn): (%s,%s,%s,%s)',
+ volume_id,
+ target_host,
+ target_port,
+ target_iqn)
+ if (device_number < 0) or \
+ (volume_id is None) or \
+ (target_host is None) or \
+ (target_iqn is None):
+ raise StorageError(_('Unable to obtain target information %s, %s')
+ % (device_path, mountpoint))
+ volume_info = {}
+ volume_info['deviceNumber'] = device_number
+ volume_info['volumeId'] = volume_id
+ volume_info['targetHost'] = target_host
+ volume_info['targetPort'] = target_port
+ volume_info['targetIQN'] = target_iqn
+ return volume_info
+
+ @classmethod
+ def mountpoint_to_number(cls, mountpoint):
+ """Translate a mountpoint like /dev/sdc into a numeric"""
+ if mountpoint.startswith('/dev/'):
+ mountpoint = mountpoint[5:]
+ if re.match('^[hs]d[a-p]$', mountpoint):
+ return (ord(mountpoint[2:3]) - ord('a'))
+ elif re.match('^vd[a-p]$', mountpoint):
+ return (ord(mountpoint[2:3]) - ord('a'))
+ elif re.match('^[0-9]+$', mountpoint):
+ return string.atoi(mountpoint, 10)
+ else:
+ logging.warn('Mountpoint cannot be translated: %s', mountpoint)
+ return -1
+
+
+def _get_volume_id(path):
+ """Retrieve the volume id from device_path"""
+ # n must contain at least the volume_id
+ # /vol- is for remote volumes
+ # -vol- is for local volumes
+ # see compute/manager->setup_compute_volume
+ volume_id = path[path.find('/vol-') + 1:]
+ if volume_id == path:
+ volume_id = path[path.find('-vol-') + 1:].replace('--', '-')
+ return volume_id
+
+
+def _get_target_host(iscsi_string):
+ """Retrieve target host"""
+ if iscsi_string:
+ return iscsi_string[0:iscsi_string.find(':')]
+ elif iscsi_string is None or FLAGS.target_host:
+ return FLAGS.target_host
+
+
+def _get_target_port(iscsi_string):
+ """Retrieve target port"""
+ if iscsi_string:
+ return iscsi_string[iscsi_string.find(':') + 1:]
+ elif iscsi_string is None or FLAGS.target_port:
+ return FLAGS.target_port
+
+
+def _get_iqn(iscsi_string, id):
+ """Retrieve target IQN"""
+ if iscsi_string:
+ return iscsi_string
+ elif iscsi_string is None or FLAGS.iqn_prefix:
+ volume_id = _get_volume_id(id)
+ return '%s:%s' % (FLAGS.iqn_prefix, volume_id)
+
+
+def _get_target(volume_id):
+ """
+ Gets iscsi name and portal from volume name and host.
+ For this method to work the following are needed:
+ 1) volume_ref['host'] must resolve to something rather than loopback
+ 2) ietd must bind only to the address as resolved above
+ If any of the two conditions are not met, fall back on Flags.
+ """
+ volume_ref = db.volume_get_by_ec2_id(context.get_admin_context(),
+ volume_id)
+ result = (None, None)
+ try:
+ (r, _e) = utils.execute("sudo iscsiadm -m discovery -t "
+ "sendtargets -p %s" %
+ volume_ref['host'])
+ except exception.ProcessExecutionError, exc:
+ logging.warn(exc)
+ else:
+ targets = r.splitlines()
+ if len(_e) == 0 and len(targets) == 1:
+ for target in targets:
+ if volume_id in target:
+ (location, _sep, iscsi_name) = target.partition(" ")
+ break
+ iscsi_portal = location.split(",")[0]
+ result = (iscsi_name, iscsi_portal)
+ return result
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index a4c7a3861..fdeb2506c 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -17,16 +17,110 @@
"""
Management class for Storage-related functions (attach, detach, etc).
"""
+import logging
+
+from nova import exception
+from nova.virt.xenapi.vm_utils import VMHelper
+from nova.virt.xenapi.volume_utils import VolumeHelper
+from nova.virt.xenapi.volume_utils import StorageError
class VolumeOps(object):
+ """
+ Management class for Volume-related tasks
+ """
+
def __init__(self, session):
+ self.XenAPI = session.get_imported_xenapi()
self._session = session
+ # Load XenAPI module in the helper classes respectively
+ VolumeHelper.XenAPI = self.XenAPI
+ VMHelper.XenAPI = self.XenAPI
def attach_volume(self, instance_name, device_path, mountpoint):
- # FIXME: that's going to be sorted when iscsi-xenapi lands in branch
- return True
+ """Attach volume storage to VM instance"""
+ # Before we start, check that the VM exists
+ vm_ref = VMHelper.lookup(self._session, instance_name)
+ if vm_ref is None:
+ raise exception.NotFound(_('Instance %s not found')
+ % instance_name)
+ # NOTE: No Resource Pool concept so far
+ logging.debug(_("Attach_volume: %s, %s, %s"),
+ instance_name, device_path, mountpoint)
+ # Create the iSCSI SR, and the PDB through which hosts access SRs.
+ # But first, retrieve target info, like Host, IQN, LUN and SCSIID
+ vol_rec = VolumeHelper.parse_volume_info(device_path, mountpoint)
+ label = 'SR-%s' % vol_rec['volumeId']
+ description = 'Disk-for:%s' % instance_name
+ # Create SR
+ sr_ref = VolumeHelper.create_iscsi_storage(self._session,
+ vol_rec,
+ label,
+ description)
+ # Introduce VDI and attach VBD to VM
+ try:
+ vdi_ref = VolumeHelper.introduce_vdi(self._session, sr_ref)
+ except StorageError, exc:
+ logging.warn(exc)
+ VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
+ raise Exception(_('Unable to create VDI on SR %s for instance %s')
+ % (sr_ref,
+ instance_name))
+ else:
+ try:
+ vbd_ref = VMHelper.create_vbd(self._session,
+ vm_ref, vdi_ref,
+ vol_rec['deviceNumber'],
+ False)
+ except self.XenAPI.Failure, exc:
+ logging.warn(exc)
+ VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
+ raise Exception(_('Unable to use SR %s for instance %s')
+ % (sr_ref,
+ instance_name))
+ else:
+ try:
+ task = self._session.call_xenapi('Async.VBD.plug',
+ vbd_ref)
+ self._session.wait_for_task(vol_rec['deviceNumber'], task)
+ except self.XenAPI.Failure, exc:
+ logging.warn(exc)
+ VolumeHelper.destroy_iscsi_storage(self._session,
+ sr_ref)
+ raise Exception(_('Unable to attach volume to instance %s')
+ % instance_name)
+ logging.info(_('Mountpoint %s attached to instance %s'),
+ mountpoint, instance_name)
def detach_volume(self, instance_name, mountpoint):
- # FIXME: that's going to be sorted when iscsi-xenapi lands in branch
- return True
+ """Detach volume storage to VM instance"""
+ # Before we start, check that the VM exists
+ vm_ref = VMHelper.lookup(self._session, instance_name)
+ if vm_ref is None:
+ raise exception.NotFound(_('Instance %s not found')
+ % instance_name)
+ # Detach VBD from VM
+ logging.debug(_("Detach_volume: %s, %s"), instance_name, mountpoint)
+ device_number = VolumeHelper.mountpoint_to_number(mountpoint)
+ try:
+ vbd_ref = VMHelper.find_vbd_by_number(self._session,
+ vm_ref, device_number)
+ except StorageError, exc:
+ logging.warn(exc)
+ raise Exception(_('Unable to locate volume %s') % mountpoint)
+ else:
+ try:
+ sr_ref = VolumeHelper.find_sr_from_vbd(self._session,
+ vbd_ref)
+ VMHelper.unplug_vbd(self._session, vbd_ref)
+ except StorageError, exc:
+ logging.warn(exc)
+ raise Exception(_('Unable to detach volume %s') % mountpoint)
+ try:
+ VMHelper.destroy_vbd(self._session, vbd_ref)
+ except StorageError, exc:
+ logging.warn(exc)
+ # Forget SR
+ VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
+ logging.info(_('Mountpoint %s detached from instance %s'),
+ mountpoint, instance_name)
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index 2153810c8..11c66c974 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -44,21 +44,28 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block.
:xenapi_task_poll_interval: The interval (seconds) used for polling of
remote tasks (Async.VM.start, etc)
(default: 0.5).
-
+:target_host: the iSCSI Target Host IP address, i.e. the IP
+ address for the nova-volume host
+:target_port: iSCSI Target Port, 3260 Default
+:iqn_prefix: IQN Prefix, e.g. 'iqn.2010-10.org.openstack'
"""
import logging
+import sys
import xmlrpclib
-from twisted.internet import defer
-from twisted.internet import reactor
+from eventlet import event
+from eventlet import tpool
+from nova import context
+from nova import db
from nova import utils
from nova import flags
from nova.virt.xenapi.vmops import VMOps
from nova.virt.xenapi.volumeops import VolumeOps
FLAGS = flags.FLAGS
+
flags.DEFINE_string('xenapi_connection_url',
None,
'URL for connection to XenServer/Xen Cloud Platform.'
@@ -76,54 +83,72 @@ flags.DEFINE_float('xenapi_task_poll_interval',
'The interval used for polling of remote tasks '
'(Async.VM.start, etc). Used only if '
'connection_type=xenapi.')
-
-XenAPI = None
+flags.DEFINE_string('target_host',
+ None,
+ 'iSCSI Target Host')
+flags.DEFINE_string('target_port',
+ '3260',
+ 'iSCSI Target Port, 3260 Default')
+flags.DEFINE_string('iqn_prefix',
+ 'iqn.2010-10.org.openstack',
+ 'IQN Prefix')
def get_connection(_):
"""Note that XenAPI doesn't have a read-only connection mode, so
the read_only parameter is ignored."""
- # This is loaded late so that there's no need to install this
- # library when not using XenAPI.
- global XenAPI
- if XenAPI is None:
- XenAPI = __import__('XenAPI')
url = FLAGS.xenapi_connection_url
username = FLAGS.xenapi_connection_username
password = FLAGS.xenapi_connection_password
if not url or password is None:
- raise Exception('Must specify xenapi_connection_url, '
- 'xenapi_connection_username (optionally), and '
- 'xenapi_connection_password to use '
- 'connection_type=xenapi')
+ raise Exception(_('Must specify xenapi_connection_url, '
+ 'xenapi_connection_username (optionally), and '
+ 'xenapi_connection_password to use '
+ 'connection_type=xenapi'))
return XenAPIConnection(url, username, password)
class XenAPIConnection(object):
- """ A connection to XenServer or Xen Cloud Platform """
+ """A connection to XenServer or Xen Cloud Platform"""
+
def __init__(self, url, user, pw):
session = XenAPISession(url, user, pw)
self._vmops = VMOps(session)
self._volumeops = VolumeOps(session)
+ def init_host(self):
+ #FIXME(armando): implement this
+ #NOTE(armando): would we need a method
+ #to call when shutting down the host?
+ #e.g. to do session logout?
+ pass
+
def list_instances(self):
- """ List VM instances """
+ """List VM instances"""
return self._vmops.list_instances()
def spawn(self, instance):
- """ Create VM instance """
+ """Create VM instance"""
self._vmops.spawn(instance)
def reboot(self, instance):
- """ Reboot VM instance """
+ """Reboot VM instance"""
self._vmops.reboot(instance)
def destroy(self, instance):
- """ Destroy VM instance """
+ """Destroy VM instance"""
self._vmops.destroy(instance)
+ def pause(self, instance, callback):
+ """Pause VM instance"""
+ self._vmops.pause(instance, callback)
+
+ def unpause(self, instance, callback):
+ """Unpause paused VM instance"""
+ self._vmops.unpause(instance, callback)
+
def get_info(self, instance_id):
- """ Return data about VM instance """
+ """Return data about VM instance"""
return self._vmops.get_info(instance_id)
def get_diagnostics(self, instance_id):
@@ -131,103 +156,121 @@ class XenAPIConnection(object):
return self._vmops.get_diagnostics(instance_id)
def get_console_output(self, instance):
- """ Return snapshot of console """
+ """Return snapshot of console"""
return self._vmops.get_console_output(instance)
def attach_volume(self, instance_name, device_path, mountpoint):
- """ Attach volume storage to VM instance """
+ """Attach volume storage to VM instance"""
return self._volumeops.attach_volume(instance_name,
device_path,
mountpoint)
def detach_volume(self, instance_name, mountpoint):
- """ Detach volume storage to VM instance """
+ """Detach volume storage to VM instance"""
return self._volumeops.detach_volume(instance_name, mountpoint)
class XenAPISession(object):
- """ The session to invoke XenAPI SDK calls """
+ """The session to invoke XenAPI SDK calls"""
+
def __init__(self, url, user, pw):
- self._session = XenAPI.Session(url)
+ self.XenAPI = self.get_imported_xenapi()
+ self._session = self._create_session(url)
self._session.login_with_password(user, pw)
+ def get_imported_xenapi(self):
+ """Stubout point. This can be replaced with a mock xenapi module."""
+ return __import__('XenAPI')
+
def get_xenapi(self):
- """ Return the xenapi object """
+ """Return the xenapi object"""
return self._session.xenapi
def get_xenapi_host(self):
- """ Return the xenapi host """
+ """Return the xenapi host"""
return self._session.xenapi.session.get_this_host(self._session.handle)
- @utils.deferredToThread
def call_xenapi(self, method, *args):
- """Call the specified XenAPI method on a background thread. Returns
- a Deferred for the result."""
+ """Call the specified XenAPI method on a background thread."""
f = self._session.xenapi
for m in method.split('.'):
f = f.__getattr__(m)
- return f(*args)
+ return tpool.execute(f, *args)
- @utils.deferredToThread
def async_call_plugin(self, plugin, fn, args):
- """Call Async.host.call_plugin on a background thread. Returns a
- Deferred with the task reference."""
- return _unwrap_plugin_exceptions(
- self._session.xenapi.Async.host.call_plugin,
- self.get_xenapi_host(), plugin, fn, args)
-
- def wait_for_task(self, task):
- """Return a Deferred that will give the result of the given task.
- The task is polled until it completes."""
- d = defer.Deferred()
- reactor.callLater(0, self._poll_task, task, d)
- return d
-
- @utils.deferredToThread
- def _poll_task(self, task, deferred):
+ """Call Async.host.call_plugin on a background thread."""
+ return tpool.execute(self._unwrap_plugin_exceptions,
+ self._session.xenapi.Async.host.call_plugin,
+ self.get_xenapi_host(), plugin, fn, args)
+
+ def wait_for_task(self, id, task):
+ """Return the result of the given task. The task is polled
+ until it completes."""
+
+ done = event.Event()
+ loop = utils.LoopingCall(self._poll_task, id, task, done)
+ loop.start(FLAGS.xenapi_task_poll_interval, now=True)
+ rv = done.wait()
+ loop.stop()
+ return rv
+
+ def _create_session(self, url):
+ """Stubout point. This can be replaced with a mock session."""
+ return self.XenAPI.Session(url)
+
+ def _poll_task(self, id, task, done):
"""Poll the given XenAPI task, and fire the given Deferred if we
get a result."""
try:
- #logging.debug('Polling task %s...', task)
+ name = self._session.xenapi.task.get_name_label(task)
status = self._session.xenapi.task.get_status(task)
- if status == 'pending':
- reactor.callLater(FLAGS.xenapi_task_poll_interval,
- self._poll_task, task, deferred)
- elif status == 'success':
+ action = dict(
+ id=int(id),
+ action=name,
+ error=None)
+ if status == "pending":
+ return
+ elif status == "success":
result = self._session.xenapi.task.get_result(task)
- logging.info('Task %s status: success. %s', task, result)
- deferred.callback(_parse_xmlrpc_value(result))
+ logging.info(_("Task [%s] %s status: success %s") % (
+ name,
+ task,
+ result))
+ done.send(_parse_xmlrpc_value(result))
else:
error_info = self._session.xenapi.task.get_error_info(task)
- logging.warn('Task %s status: %s. %s', task, status,
- error_info)
- deferred.errback(XenAPI.Failure(error_info))
- #logging.debug('Polling task %s done.', task)
- except XenAPI.Failure, exc:
+ action["error"] = str(error_info)
+ logging.warn(_("Task [%s] %s status: %s %s") % (
+ name,
+ task,
+ status,
+ error_info))
+ done.send_exception(self.XenAPI.Failure(error_info))
+ db.instance_action_create(context.get_admin_context(), action)
+ except self.XenAPI.Failure, exc:
logging.warn(exc)
- deferred.errback(exc)
-
-
-def _unwrap_plugin_exceptions(func, *args, **kwargs):
- """ Parse exception details """
- try:
- return func(*args, **kwargs)
- except XenAPI.Failure, exc:
- logging.debug("Got exception: %s", exc)
- if (len(exc.details) == 4 and
- exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and
- exc.details[2] == 'Failure'):
- params = None
- try:
- params = eval(exc.details[3])
- except:
- raise exc
- raise XenAPI.Failure(params)
- else:
+ done.send_exception(*sys.exc_info())
+
+ def _unwrap_plugin_exceptions(self, func, *args, **kwargs):
+ """Parse exception details"""
+ try:
+ return func(*args, **kwargs)
+ except self.XenAPI.Failure, exc:
+ logging.debug(_("Got exception: %s"), exc)
+ if (len(exc.details) == 4 and
+ exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and
+ exc.details[2] == 'Failure'):
+ params = None
+ try:
+ params = eval(exc.details[3])
+ except:
+ raise exc
+ raise self.XenAPI.Failure(params)
+ else:
+ raise
+ except xmlrpclib.ProtocolError, exc:
+ logging.debug(_("Got exception: %s"), exc)
raise
- except xmlrpclib.ProtocolError, exc:
- logging.debug("Got exception: %s", exc)
- raise
def _parse_xmlrpc_value(val):