summaryrefslogtreecommitdiffstats
path: root/nova/virt
diff options
context:
space:
mode:
authorAnthony Young <sleepsonthefloor@gmail.com>2011-01-03 08:51:35 -0800
committerAnthony Young <sleepsonthefloor@gmail.com>2011-01-03 08:51:35 -0800
commitfebe1e32d1e0441206f1645748ed216abe3e89e4 (patch)
treefe828d24fd31cb29ad623796b3e7687427a702ec /nova/virt
parent13dfb66624ca082bd5e83969213c657d2d2d1dff (diff)
parent0e88a58cf95bf9298a52d132cd1eb02f29c6bfe1 (diff)
merge in trunk
Diffstat (limited to 'nova/virt')
-rw-r--r--nova/virt/fake.py17
-rw-r--r--nova/virt/libvirt_conn.py13
-rw-r--r--nova/virt/xenapi/fake.py64
-rw-r--r--nova/virt/xenapi/vm_utils.py144
-rw-r--r--nova/virt/xenapi/vmops.py80
-rw-r--r--nova/virt/xenapi_conn.py16
6 files changed, 308 insertions, 26 deletions
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index a700e35c1..925c32e4d 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -112,6 +112,20 @@ class FakeConnection(object):
self.instances[instance.name] = fake_instance
fake_instance._state = power_state.RUNNING
+ def snapshot(self, instance, name):
+ """
+ Snapshots the specified instance.
+
+ The given parameter is an instance of nova.compute.service.Instance,
+ and so the instance is being specified as instance.name.
+
+ The second parameter is the name of the snapshot.
+
+ The work will be done asynchronously. This function returns a
+ Deferred that allows the caller to detect when it is complete.
+ """
+ pass
+
def reboot(self, instance):
"""
Reboot the specified instance.
@@ -202,6 +216,9 @@ class FakeConnection(object):
'num_cpu': 2,
'cpu_time': 0}
+ def get_diagnostics(self, instance_name):
+ pass
+
def list_disks(self, instance_name):
"""
Return the IDs of all the virtual disks attached to the specified
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index dc878846d..6471e8c0c 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -73,6 +73,9 @@ FLAGS = flags.FLAGS
flags.DEFINE_string('rescue_image_id', 'ami-rescue', 'Rescue ami image')
flags.DEFINE_string('rescue_kernel_id', 'aki-rescue', 'Rescue aki image')
flags.DEFINE_string('rescue_ramdisk_id', 'ari-rescue', 'Rescue ari image')
+flags.DEFINE_string('injected_network_template',
+ utils.abspath('virt/interfaces.template'),
+ 'Template file for injected network')
flags.DEFINE_string('libvirt_xml_template',
utils.abspath('virt/libvirt.xml.template'),
'Libvirt XML Template')
@@ -265,6 +268,13 @@ class LibvirtConnection(object):
virt_dom.detachDevice(xml)
@exception.wrap_exception
+ def snapshot(self, instance, name):
+ """ Create snapshot from a running VM instance """
+ raise NotImplementedError(
+ _("Instance snapshotting is not supported for libvirt"
+ "at this time"))
+
+ @exception.wrap_exception
def reboot(self, instance):
self.destroy(instance, False)
xml = self.to_xml(instance)
@@ -624,6 +634,9 @@ class LibvirtConnection(object):
'num_cpu': num_cpu,
'cpu_time': cpu_time}
+ def get_diagnostics(self, instance_name):
+ raise exception.APIError("diagnostics are not supported for libvirt")
+
def get_disks(self, instance_name):
"""
Note that this function takes an instance name, not an Instance, so
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index 1eaf31c25..aa4026f97 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -55,6 +55,8 @@ import datetime
import logging
import uuid
+from pprint import pformat
+
from nova import exception
@@ -64,6 +66,10 @@ _CLASSES = ['host', 'network', 'session', 'SR', 'VBD',\
_db_content = {}
+def log_db_contents(msg=None):
+ logging.debug(_("%s: _db_content => %s"), msg or "", pformat(_db_content))
+
+
def reset():
for c in _CLASSES:
_db_content[c] = {}
@@ -93,6 +99,24 @@ def create_vm(name_label, status,
})
+def destroy_vm(vm_ref):
+ vm_rec = _db_content['VM'][vm_ref]
+
+ vbd_refs = vm_rec['VBDs']
+ for vbd_ref in vbd_refs:
+ destroy_vbd(vbd_ref)
+
+ del _db_content['VM'][vm_ref]
+
+
+def destroy_vbd(vbd_ref):
+ del _db_content['VBD'][vbd_ref]
+
+
+def destroy_vdi(vdi_ref):
+ del _db_content['VDI'][vdi_ref]
+
+
def create_vdi(name_label, read_only, sr_ref, sharable):
return _create_object('VDI', {
'name_label': name_label,
@@ -109,6 +133,23 @@ def create_vdi(name_label, read_only, sr_ref, sharable):
})
+def create_vbd(vm_ref, vdi_ref):
+ vbd_rec = {'VM': vm_ref, 'VDI': vdi_ref}
+ vbd_ref = _create_object('VBD', vbd_rec)
+ after_VBD_create(vbd_ref, vbd_rec)
+ return vbd_ref
+
+
+def after_VBD_create(vbd_ref, vbd_rec):
+ """Create backref from VM to VBD when VBD is created"""
+ vm_ref = vbd_rec['VM']
+ vm_rec = _db_content['VM'][vm_ref]
+ vm_rec['VBDs'] = [vbd_ref]
+
+ vm_name_label = _db_content['VM'][vm_ref]['name_label']
+ vbd_rec['vm_name_label'] = vm_name_label
+
+
def create_pbd(config, sr_ref, attached):
return _create_object('PBD', {
'device-config': config,
@@ -277,11 +318,12 @@ class SessionBase(object):
self._check_arg_count(params, 2)
return get_record(cls, params[1])
- if (func == 'get_by_name_label' or
- func == 'get_by_uuid'):
+ if func in ('get_by_name_label', 'get_by_uuid'):
self._check_arg_count(params, 2)
+ return_singleton = (func == 'get_by_uuid')
return self._get_by_field(
- _db_content[cls], func[len('get_by_'):], params[1])
+ _db_content[cls], func[len('get_by_'):], params[1],
+ return_singleton=return_singleton)
if len(params) == 2:
field = func[len('get_'):]
@@ -324,6 +366,13 @@ class SessionBase(object):
(cls, _) = name.split('.')
ref = is_sr_create and \
_create_sr(cls, params) or _create_object(cls, params[1])
+
+ # Call hook to provide any fixups needed (ex. creating backrefs)
+ try:
+ globals()["after_%s_create" % cls](ref, params[1])
+ except KeyError:
+ pass
+
obj = get_record(cls, ref)
# Add RO fields
@@ -359,11 +408,18 @@ class SessionBase(object):
raise Failure(['MESSAGE_PARAMETER_COUNT_MISMATCH',
expected, actual])
- def _get_by_field(self, recs, k, v):
+ def _get_by_field(self, recs, k, v, return_singleton):
result = []
for ref, rec in recs.iteritems():
if rec.get(k) == v:
result.append(ref)
+
+ if return_singleton:
+ try:
+ return result[0]
+ except IndexError:
+ return None
+
return result
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 47fb6db53..9d1b51848 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -20,11 +20,14 @@ their attributes like VDIs, VIFs, as well as their lookup functions.
"""
import logging
+import pickle
import urllib
from xml.dom import minidom
+from eventlet import event
from nova import exception
from nova import flags
+from nova import utils
from nova.auth.manager import AuthManager
from nova.compute import instance_types
from nova.compute import power_state
@@ -204,7 +207,54 @@ class VMHelper(HelperBase):
return vif_ref
@classmethod
- def fetch_image(cls, session, image, user, project, type):
+ def create_snapshot(cls, session, instance_id, vm_ref, label):
+ """ Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
+ Snapshot VHD
+ """
+ #TODO(sirp): Add quiesce and VSS locking support when Windows support
+ # is added
+ logging.debug(_("Snapshotting VM %s with label '%s'..."),
+ vm_ref, label)
+
+ vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref)
+ vm_vdi_uuid = vm_vdi_rec["uuid"]
+ sr_ref = vm_vdi_rec["SR"]
+
+ original_parent_uuid = get_vhd_parent_uuid(session, vm_vdi_ref)
+
+ task = session.call_xenapi('Async.VM.snapshot', vm_ref, label)
+ template_vm_ref = session.wait_for_task(instance_id, task)
+ template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1]
+ template_vdi_uuid = template_vdi_rec["uuid"]
+
+ logging.debug(_('Created snapshot %s from VM %s.'), template_vm_ref,
+ vm_ref)
+
+ parent_uuid = wait_for_vhd_coalesce(
+ session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid)
+
+ #TODO(sirp): we need to assert only one parent, not parents two deep
+ return template_vm_ref, [template_vdi_uuid, parent_uuid]
+
+ @classmethod
+ def upload_image(cls, session, instance_id, vdi_uuids, image_name):
+ """ Requests that the Glance plugin bundle the specified VDIs and
+ push them into Glance using the specified human-friendly name.
+ """
+ logging.debug(_("Asking xapi to upload %s as '%s'"),
+ vdi_uuids, image_name)
+
+ params = {'vdi_uuids': vdi_uuids,
+ 'image_name': image_name,
+ 'glance_host': FLAGS.glance_host,
+ 'glance_port': FLAGS.glance_port}
+
+ kwargs = {'params': pickle.dumps(params)}
+ task = session.async_call_plugin('glance', 'put_vdis', kwargs)
+ session.wait_for_task(instance_id, task)
+
+ @classmethod
+ def fetch_image(cls, session, instance_id, image, user, project, type):
"""
type is interpreted as an ImageType instance
"""
@@ -223,9 +273,7 @@ class VMHelper(HelperBase):
if type == ImageType.DISK_RAW:
args['raw'] = 'true'
task = session.async_call_plugin('objectstore', fn, args)
- #FIXME(armando): find a solution to missing instance_id
- #with Josh Kearney
- uuid = session.wait_for_task(0, task)
+ uuid = session.wait_for_task(instance_id, task)
return uuid
@classmethod
@@ -299,6 +347,10 @@ class VMHelper(HelperBase):
try:
host = session.get_xenapi_host()
host_ip = session.get_xenapi().host.get_record(host)["address"]
+ except (cls.XenAPI.Failure, KeyError) as e:
+ return {"Unable to retrieve diagnostics": e}
+
+ try:
diags = {}
xml = get_rrd(host_ip, record["uuid"])
if xml:
@@ -325,3 +377,87 @@ def get_rrd(host, uuid):
return xml.read()
except IOError:
return None
+
+
+#TODO(sirp): This code comes from XS5.6 pluginlib.py, we should refactor to
+# use that implmenetation
+def get_vhd_parent(session, vdi_rec):
+ """
+ Returns the VHD parent of the given VDI record, as a (ref, rec) pair.
+ Returns None if we're at the root of the tree.
+ """
+ if 'vhd-parent' in vdi_rec['sm_config']:
+ parent_uuid = vdi_rec['sm_config']['vhd-parent']
+ #NOTE(sirp): changed xenapi -> get_xenapi()
+ parent_ref = session.get_xenapi().VDI.get_by_uuid(parent_uuid)
+ parent_rec = session.get_xenapi().VDI.get_record(parent_ref)
+ #NOTE(sirp): changed log -> logging
+ logging.debug(_("VHD %s has parent %s"), vdi_rec['uuid'], parent_ref)
+ return parent_ref, parent_rec
+ else:
+ return None
+
+
+def get_vhd_parent_uuid(session, vdi_ref):
+ vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
+ ret = get_vhd_parent(session, vdi_rec)
+ if ret:
+ parent_ref, parent_rec = ret
+ return parent_rec["uuid"]
+ else:
+ return None
+
+
+def scan_sr(session, instance_id, sr_ref):
+ logging.debug(_("Re-scanning SR %s"), sr_ref)
+ task = session.call_xenapi('Async.SR.scan', sr_ref)
+ session.wait_for_task(instance_id, task)
+
+
+def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
+ original_parent_uuid):
+ """ Spin until the parent VHD is coalesced into its parent VHD
+
+ Before coalesce:
+ * original_parent_vhd
+ * parent_vhd
+ snapshot
+
+ Atter coalesce:
+ * parent_vhd
+ snapshot
+ """
+ #TODO(sirp): we need to timeout this req after a while
+
+ def _poll_vhds():
+ scan_sr(session, instance_id, sr_ref)
+ parent_uuid = get_vhd_parent_uuid(session, vdi_ref)
+ if original_parent_uuid and (parent_uuid != original_parent_uuid):
+ logging.debug(
+ _("Parent %s doesn't match original parent %s, "
+ "waiting for coalesce..."),
+ parent_uuid, original_parent_uuid)
+ else:
+ done.send(parent_uuid)
+
+ done = event.Event()
+ loop = utils.LoopingCall(_poll_vhds)
+ loop.start(FLAGS.xenapi_vhd_coalesce_poll_interval, now=True)
+ parent_uuid = done.wait()
+ loop.stop()
+ return parent_uuid
+
+
+def get_vdi_for_vm_safely(session, vm_ref):
+ vdi_refs = VMHelper.lookup_vm_vdis(session, vm_ref)
+ if vdi_refs is None:
+ raise Exception(_("No VDIs found for VM %s") % vm_ref)
+ else:
+ num_vdis = len(vdi_refs)
+ if num_vdis != 1:
+ raise Exception(_("Unexpected number of VDIs (%s) found for "
+ "VM %s") % (num_vdis, vm_ref))
+
+ vdi_ref = vdi_refs[0]
+ vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
+ return vdi_ref, vdi_rec
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index ba502ffa2..76f31635a 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -70,7 +70,7 @@ class VMOps(object):
disk_image_type = ImageType.DISK
else:
disk_image_type = ImageType.DISK_RAW
- vdi_uuid = VMHelper.fetch_image(self._session,
+ vdi_uuid = VMHelper.fetch_image(self._session, instance.id,
instance.image_id, user, project, disk_image_type)
vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
#Have a look at the VDI and see if it has a PV kernel
@@ -79,11 +79,11 @@ class VMOps(object):
pv_kernel = VMHelper.lookup_image(self._session, vdi_ref)
kernel = None
if instance.kernel_id:
- kernel = VMHelper.fetch_image(self._session,
+ kernel = VMHelper.fetch_image(self._session, instance.id,
instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK)
ramdisk = None
if instance.ramdisk_id:
- ramdisk = VMHelper.fetch_image(self._session,
+ ramdisk = VMHelper.fetch_image(self._session, instance.id,
instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK)
vm_ref = VMHelper.create_vm(self._session,
instance, kernel, ramdisk, pv_kernel)
@@ -120,6 +120,52 @@ class VMOps(object):
timer.f = _wait_for_boot
return timer.start(interval=0.5, now=True)
+ def snapshot(self, instance, name):
+ """ Create snapshot from a running VM instance
+
+ :param instance: instance to be snapshotted
+ :param name: name/label to be given to the snapshot
+
+ Steps involved in a XenServer snapshot:
+
+ 1. XAPI-Snapshot: Snapshotting the instance using XenAPI. This
+ creates: Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
+ Snapshot VHD
+
+ 2. Wait-for-coalesce: The Snapshot VDI and Instance VDI both point to
+ a 'base-copy' VDI. The base_copy is immutable and may be chained
+ with other base_copies. If chained, the base_copies
+ coalesce together, so, we must wait for this coalescing to occur to
+ get a stable representation of the data on disk.
+
+ 3. Push-to-glance: Once coalesced, we call a plugin on the XenServer
+ that will bundle the VHDs together and then push the bundle into
+ Glance.
+ """
+
+ #TODO(sirp): Add quiesce and VSS locking support when Windows support
+ # is added
+
+ logging.debug(_("Starting snapshot for VM %s"), instance)
+ vm_ref = VMHelper.lookup(self._session, instance.name)
+
+ label = "%s-snapshot" % instance.name
+ try:
+ template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot(
+ self._session, instance.id, vm_ref, label)
+ except self.XenAPI.Failure, exc:
+ logging.error(_("Unable to Snapshot %s: %s"), vm_ref, exc)
+ return
+
+ try:
+ # call plugin to ship snapshot off to glance
+ VMHelper.upload_image(
+ self._session, instance.id, template_vdi_uuids, name)
+ finally:
+ self._destroy(instance, template_vm_ref, shutdown=False)
+
+ logging.debug(_("Finished snapshot and upload for VM %s"), instance)
+
def reboot(self, instance):
"""Reboot VM instance"""
instance_name = instance.name
@@ -133,31 +179,36 @@ class VMOps(object):
def destroy(self, instance):
"""Destroy VM instance"""
vm = VMHelper.lookup(self._session, instance.name)
+ return self._destroy(instance, vm, shutdown=True)
+
+ def _destroy(self, instance, vm, shutdown=True):
+ """ Destroy VM instance """
if vm is None:
# Don't complain, just return. This lets us clean up instances
# that have already disappeared from the underlying platform.
return
# Get the VDIs related to the VM
vdis = VMHelper.lookup_vm_vdis(self._session, vm)
- try:
- task = self._session.call_xenapi('Async.VM.hard_shutdown',
- vm)
- self._session.wait_for_task(instance.id, task)
- except XenAPI.Failure, exc:
- logging.warn(exc)
+ if shutdown:
+ try:
+ task = self._session.call_xenapi('Async.VM.hard_shutdown', vm)
+ self._session.wait_for_task(instance.id, task)
+ except self.XenAPI.Failure, exc:
+ logging.warn(exc)
+
# Disk clean-up
if vdis:
for vdi in vdis:
try:
task = self._session.call_xenapi('Async.VDI.destroy', vdi)
self._session.wait_for_task(instance.id, task)
- except XenAPI.Failure, exc:
+ except self.XenAPI.Failure, exc:
logging.warn(exc)
# VM Destroy
try:
task = self._session.call_xenapi('Async.VM.destroy', vm)
self._session.wait_for_task(instance.id, task)
- except XenAPI.Failure, exc:
+ except self.XenAPI.Failure, exc:
logging.warn(exc)
def _wait_with_callback(self, instance_id, task, callback):
@@ -217,11 +268,12 @@ class VMOps(object):
rec = self._session.get_xenapi().VM.get_record(vm)
return VMHelper.compile_info(rec)
- def get_diagnostics(self, instance_id):
+ def get_diagnostics(self, instance):
"""Return data about VM diagnostics"""
- vm = VMHelper.lookup(self._session, instance_id)
+ vm = VMHelper.lookup(self._session, instance.name)
if vm is None:
- raise exception.NotFound(_("Instance not found %s") % instance_id)
+ raise exception.NotFound(_("Instance not found %s") %
+ instance.name)
rec = self._session.get_xenapi().VM.get_record(vm)
return VMHelper.compile_diagnostics(self._session, rec)
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index 7f03d6c2b..f17c8f39d 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -83,6 +83,10 @@ flags.DEFINE_float('xenapi_task_poll_interval',
'The interval used for polling of remote tasks '
'(Async.VM.start, etc). Used only if '
'connection_type=xenapi.')
+flags.DEFINE_float('xenapi_vhd_coalesce_poll_interval',
+ 5.0,
+ 'The interval used for polling of coalescing vhds.'
+ ' Used only if connection_type=xenapi.')
flags.DEFINE_string('target_host',
None,
'iSCSI Target Host')
@@ -131,6 +135,10 @@ class XenAPIConnection(object):
"""Create VM instance"""
self._vmops.spawn(instance)
+ def snapshot(self, instance, name):
+ """ Create snapshot from a running VM instance """
+ self._vmops.snapshot(instance, name)
+
def reboot(self, instance):
"""Reboot VM instance"""
self._vmops.reboot(instance)
@@ -159,9 +167,9 @@ class XenAPIConnection(object):
"""Return data about VM instance"""
return self._vmops.get_info(instance_id)
- def get_diagnostics(self, instance_id):
+ def get_diagnostics(self, instance):
"""Return data about VM diagnostics"""
- return self._vmops.get_diagnostics(instance_id)
+ return self._vmops.get_diagnostics(instance)
def get_console_output(self, instance):
"""Return snapshot of console"""
@@ -233,8 +241,8 @@ class XenAPISession(object):
name = self._session.xenapi.task.get_name_label(task)
status = self._session.xenapi.task.get_status(task)
action = dict(
- id=int(id),
- action=name,
+ instance_id=int(id),
+ action=name[0:255], # Ensure action is never > 255
error=None)
if status == "pending":
return