summaryrefslogtreecommitdiffstats
path: root/nova/virt
diff options
context:
space:
mode:
authorSalvatore Orlando <salvatore.orlando@eu.citrix.com>2011-03-07 17:03:27 +0000
committerSalvatore Orlando <salvatore.orlando@eu.citrix.com>2011-03-07 17:03:27 +0000
commitdaaa68415a33c975e3e136f9a02af6f9bf2c3fdb (patch)
tree8a629f2c7403e4b9a743927ac846b2ab5cb76683 /nova/virt
parent97563d650a08e7f2d1aa1f08237219291d821e39 (diff)
parent9610c61055ab84038becf4af0b63bab9071a900a (diff)
Merged with current trunk
Updated _get_vm_opaqueref: OpaqueRef inspection not performed anymore No test-specific code path No weird lists for known vm references merged stubout_db_network_api with stubout_db_instance_api Unit tests passed pep8 errors fixed
Diffstat (limited to 'nova/virt')
-rw-r--r--nova/virt/libvirt_conn.py10
-rw-r--r--nova/virt/xenapi/vm_utils.py227
-rw-r--r--nova/virt/xenapi/vmops.py316
-rw-r--r--nova/virt/xenapi/volumeops.py2
-rw-r--r--nova/virt/xenapi_conn.py26
5 files changed, 458 insertions, 123 deletions
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index 806f35a81..4391689c3 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -55,6 +55,7 @@ from nova import db
from nova import exception
from nova import flags
from nova import log as logging
+#from nova import test
from nova import utils
#from nova.api import context
from nova.auth import manager
@@ -360,7 +361,7 @@ class LibvirtConnection(object):
raise exception.APIError("resume not supported for libvirt")
@exception.wrap_exception
- def rescue(self, instance):
+ def rescue(self, instance, callback=None):
self.destroy(instance, False)
xml = self.to_xml(instance, rescue=True)
@@ -390,7 +391,7 @@ class LibvirtConnection(object):
return timer.start(interval=0.5, now=True)
@exception.wrap_exception
- def unrescue(self, instance):
+ def unrescue(self, instance, callback=None):
# NOTE(vish): Because reboot destroys and recreates an instance using
# the normal xml file, we can just call reboot here
self.reboot(instance)
@@ -604,7 +605,7 @@ class LibvirtConnection(object):
user=user,
project=project,
size=size)
- type_data = instance_types.INSTANCE_TYPES[inst['instance_type']]
+ type_data = instance_types.get_instance_type(inst['instance_type'])
if type_data['local_gb']:
self._cache_image(fn=self._create_local,
@@ -650,7 +651,8 @@ class LibvirtConnection(object):
instance['id'])
# FIXME(vish): stick this in db
instance_type = instance['instance_type']
- instance_type = instance_types.INSTANCE_TYPES[instance_type]
+ # instance_type = test.INSTANCE_TYPES[instance_type]
+ instance_type = instance_types.get_instance_type(instance_type)
ip_address = db.instance_get_fixed_address(context.get_admin_context(),
instance['id'])
# Assume that the gateway also acts as the dhcp server.
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 510261b15..11cc30fae 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -25,6 +25,7 @@ import re
import tempfile
import time
import urllib
+import uuid
from xml.dom import minidom
from eventlet import event
@@ -67,11 +68,14 @@ class ImageType:
0 - kernel/ramdisk image (goes on dom0's filesystem)
1 - disk image (local SR, partitioned by objectstore plugin)
2 - raw disk image (local SR, NOT partitioned by plugin)
+ 3 - vhd disk image (local SR, NOT inspected by XS, PV assumed for
+ linux, HVM assumed for Windows)
"""
KERNEL_RAMDISK = 0
DISK = 1
DISK_RAW = 2
+ DISK_VHD = 3
class VMHelper(HelperBase):
@@ -86,7 +90,8 @@ class VMHelper(HelperBase):
the pv_kernel flag indicates whether the guest is HVM or PV
"""
- instance_type = instance_types.INSTANCE_TYPES[instance.instance_type]
+ instance_type = instance_types.\
+ get_instance_type(instance.instance_type)
mem = str(long(instance_type['memory_mb']) * 1024 * 1024)
vcpus = str(instance_type['vcpus'])
rec = {
@@ -144,7 +149,8 @@ class VMHelper(HelperBase):
@classmethod
def ensure_free_mem(cls, session, instance):
- instance_type = instance_types.INSTANCE_TYPES[instance.instance_type]
+ instance_type = instance_types.get_instance_type(
+ instance.instance_type)
mem = long(instance_type['memory_mb']) * 1024 * 1024
#get free memory from host
host = session.get_xenapi_host()
@@ -205,19 +211,17 @@ class VMHelper(HelperBase):
"""Destroy VBD from host database"""
try:
task = session.call_xenapi('Async.VBD.destroy', vbd_ref)
- #FIXME(armando): find a solution to missing instance_id
- #with Josh Kearney
- session.wait_for_task(0, task)
+ session.wait_for_task(task)
except cls.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('Unable to destroy VBD %s') % vbd_ref)
@classmethod
- def create_vif(cls, session, vm_ref, network_ref, mac_address):
+ def create_vif(cls, session, vm_ref, network_ref, mac_address, dev="0"):
"""Create a VIF record. Returns a Deferred that gives the new
VIF reference."""
vif_rec = {}
- vif_rec['device'] = '0'
+ vif_rec['device'] = dev
vif_rec['network'] = network_ref
vif_rec['VM'] = vm_ref
vif_rec['MAC'] = mac_address
@@ -269,7 +273,7 @@ class VMHelper(HelperBase):
original_parent_uuid = get_vhd_parent_uuid(session, vm_vdi_ref)
task = session.call_xenapi('Async.VM.snapshot', vm_ref, label)
- template_vm_ref = session.wait_for_task(instance_id, task)
+ template_vm_ref = session.wait_for_task(task, instance_id)
template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1]
template_vdi_uuid = template_vdi_rec["uuid"]
@@ -280,29 +284,35 @@ class VMHelper(HelperBase):
session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid)
#TODO(sirp): we need to assert only one parent, not parents two deep
- return template_vm_ref, [template_vdi_uuid, parent_uuid]
+ template_vdi_uuids = {'image': parent_uuid,
+ 'snap': template_vdi_uuid}
+ return template_vm_ref, template_vdi_uuids
@classmethod
def upload_image(cls, session, instance_id, vdi_uuids, image_id):
""" Requests that the Glance plugin bundle the specified VDIs and
push them into Glance using the specified human-friendly name.
"""
+ # NOTE(sirp): Currently we only support uploading images as VHD, there
+ # is no RAW equivalent (yet)
logging.debug(_("Asking xapi to upload %(vdi_uuids)s as"
" ID %(image_id)s") % locals())
params = {'vdi_uuids': vdi_uuids,
'image_id': image_id,
'glance_host': FLAGS.glance_host,
- 'glance_port': FLAGS.glance_port}
+ 'glance_port': FLAGS.glance_port,
+ 'sr_path': get_sr_path(session)}
kwargs = {'params': pickle.dumps(params)}
- task = session.async_call_plugin('glance', 'put_vdis', kwargs)
- session.wait_for_task(instance_id, task)
+ task = session.async_call_plugin('glance', 'upload_vhd', kwargs)
+ session.wait_for_task(task, instance_id)
@classmethod
- def fetch_image(cls, session, instance_id, image, user, project, type):
+ def fetch_image(cls, session, instance_id, image, user, project,
+ image_type):
"""
- type is interpreted as an ImageType instance
+ image_type is interpreted as an ImageType instance
Related flags:
xenapi_image_service = ['glance', 'objectstore']
glance_address = 'address for glance services'
@@ -312,35 +322,80 @@ class VMHelper(HelperBase):
if FLAGS.xenapi_image_service == 'glance':
return cls._fetch_image_glance(session, instance_id, image,
- access, type)
+ access, image_type)
else:
return cls._fetch_image_objectstore(session, instance_id, image,
- access, user.secret, type)
+ access, user.secret,
+ image_type)
@classmethod
- def _fetch_image_glance(cls, session, instance_id, image, access, type):
- sr = find_sr(session)
- if sr is None:
- raise exception.NotFound('Cannot find SR to write VDI to')
+ def _fetch_image_glance_vhd(cls, session, instance_id, image, access,
+ image_type):
+ LOG.debug(_("Asking xapi to fetch vhd image %(image)s")
+ % locals())
+
+ sr_ref = safe_find_sr(session)
+
+ # NOTE(sirp): The Glance plugin runs under Python 2.4 which does not
+ # have the `uuid` module. To work around this, we generate the uuids
+ # here (under Python 2.6+) and pass them as arguments
+ uuid_stack = [str(uuid.uuid4()) for i in xrange(2)]
- c = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port)
+ params = {'image_id': image,
+ 'glance_host': FLAGS.glance_host,
+ 'glance_port': FLAGS.glance_port,
+ 'uuid_stack': uuid_stack,
+ 'sr_path': get_sr_path(session)}
+
+ kwargs = {'params': pickle.dumps(params)}
+ task = session.async_call_plugin('glance', 'download_vhd', kwargs)
+ vdi_uuid = session.wait_for_task(task, instance_id)
- meta, image_file = c.get_image(image)
+ scan_sr(session, instance_id, sr_ref)
+
+ # Set the name-label to ease debugging
+ vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid)
+ name_label = get_name_label_for_image(image)
+ session.get_xenapi().VDI.set_name_label(vdi_ref, name_label)
+
+ LOG.debug(_("xapi 'download_vhd' returned VDI UUID %(vdi_uuid)s")
+ % locals())
+ return vdi_uuid
+
+ @classmethod
+ def _fetch_image_glance_disk(cls, session, instance_id, image, access,
+ image_type):
+ """Fetch the image from Glance
+
+ NOTE:
+ Unlike _fetch_image_glance_vhd, this method does not use the Glance
+ plugin; instead, it streams the disks through domU to the VDI
+ directly.
+
+ """
+ # FIXME(sirp): Since the Glance plugin seems to be required for the
+ # VHD disk, it may be worth using the plugin for both VHD and RAW and
+ # DISK restores
+ sr_ref = safe_find_sr(session)
+
+ client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port)
+ meta, image_file = client.get_image(image)
virtual_size = int(meta['size'])
vdi_size = virtual_size
LOG.debug(_("Size for image %(image)s:%(virtual_size)d") % locals())
- if type == ImageType.DISK:
+
+ if image_type == ImageType.DISK:
# Make room for MBR.
vdi_size += MBR_SIZE_BYTES
- vdi = cls.create_vdi(session, sr, _('Glance image %s') % image,
- vdi_size, False)
+ name_label = get_name_label_for_image(image)
+ vdi = cls.create_vdi(session, sr_ref, name_label, vdi_size, False)
with_vdi_attached_here(session, vdi, False,
lambda dev:
- _stream_disk(dev, type,
+ _stream_disk(dev, image_type,
virtual_size, image_file))
- if (type == ImageType.KERNEL_RAMDISK):
+ if image_type == ImageType.KERNEL_RAMDISK:
#we need to invoke a plugin for copying VDI's
#content into proper path
LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi)
@@ -350,7 +405,7 @@ class VMHelper(HelperBase):
#let the plugin copy the correct number of bytes
args['image-size'] = str(vdi_size)
task = session.async_call_plugin('glance', fn, args)
- filename = session.wait_for_task(instance_id, task)
+ filename = session.wait_for_task(task, instance_id)
#remove the VDI as it is not needed anymore
session.get_xenapi().VDI.destroy(vdi)
LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi)
@@ -359,27 +414,97 @@ class VMHelper(HelperBase):
return session.get_xenapi().VDI.get_uuid(vdi)
@classmethod
+ def determine_disk_image_type(cls, instance):
+ """Disk Image Types are used to determine where the kernel will reside
+ within an image. To figure out which type we're dealing with, we use
+ the following rules:
+
+ 1. If we're using Glance, we can use the image_type field to
+ determine the image_type
+
+ 2. If we're not using Glance, then we need to deduce this based on
+ whether a kernel_id is specified.
+ """
+ def log_disk_format(image_type):
+ pretty_format = {ImageType.KERNEL_RAMDISK: 'KERNEL_RAMDISK',
+ ImageType.DISK: 'DISK',
+ ImageType.DISK_RAW: 'DISK_RAW',
+ ImageType.DISK_VHD: 'DISK_VHD'}
+ disk_format = pretty_format[image_type]
+ image_id = instance.image_id
+ instance_id = instance.id
+ LOG.debug(_("Detected %(disk_format)s format for image "
+ "%(image_id)s, instance %(instance_id)s") % locals())
+
+ def determine_from_glance():
+ glance_type2nova_type = {'machine': ImageType.DISK,
+ 'raw': ImageType.DISK_RAW,
+ 'vhd': ImageType.DISK_VHD,
+ 'kernel': ImageType.KERNEL_RAMDISK,
+ 'ramdisk': ImageType.KERNEL_RAMDISK}
+ client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port)
+ meta = client.get_image_meta(instance.image_id)
+ type_ = meta['type']
+ try:
+ return glance_type2nova_type[type_]
+ except KeyError:
+ raise exception.NotFound(
+ _("Unrecognized image type '%(type_)s'") % locals())
+
+ def determine_from_instance():
+ if instance.kernel_id:
+ return ImageType.DISK
+ else:
+ return ImageType.DISK_RAW
+
+ # FIXME(sirp): can we unify the ImageService and xenapi_image_service
+ # abstractions?
+ if FLAGS.xenapi_image_service == 'glance':
+ image_type = determine_from_glance()
+ else:
+ image_type = determine_from_instance()
+
+ log_disk_format(image_type)
+ return image_type
+
+ @classmethod
+ def _fetch_image_glance(cls, session, instance_id, image, access,
+ image_type):
+ if image_type == ImageType.DISK_VHD:
+ return cls._fetch_image_glance_vhd(
+ session, instance_id, image, access, image_type)
+ else:
+ return cls._fetch_image_glance_disk(
+ session, instance_id, image, access, image_type)
+
+ @classmethod
def _fetch_image_objectstore(cls, session, instance_id, image, access,
- secret, type):
+ secret, image_type):
url = images.image_url(image)
LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals())
- fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel'
+ if image_type == ImageType.KERNEL_RAMDISK:
+ fn = 'get_kernel'
+ else:
+ fn = 'get_vdi'
args = {}
args['src_url'] = url
args['username'] = access
args['password'] = secret
args['add_partition'] = 'false'
args['raw'] = 'false'
- if type != ImageType.KERNEL_RAMDISK:
+ if image_type != ImageType.KERNEL_RAMDISK:
args['add_partition'] = 'true'
- if type == ImageType.DISK_RAW:
+ if image_type == ImageType.DISK_RAW:
args['raw'] = 'true'
task = session.async_call_plugin('objectstore', fn, args)
- uuid = session.wait_for_task(instance_id, task)
+ uuid = session.wait_for_task(task, instance_id)
return uuid
@classmethod
def lookup_image(cls, session, instance_id, vdi_ref):
+ """
+ Determine if VDI is using a PV kernel
+ """
if FLAGS.xenapi_image_service == 'glance':
return cls._lookup_image_glance(session, vdi_ref)
else:
@@ -392,7 +517,7 @@ class VMHelper(HelperBase):
args = {}
args['vdi-ref'] = vdi_ref
task = session.async_call_plugin('objectstore', fn, args)
- pv_str = session.wait_for_task(instance_id, task)
+ pv_str = session.wait_for_task(task, instance_id)
pv = None
if pv_str.lower() == 'true':
pv = True
@@ -603,7 +728,7 @@ def get_vhd_parent_uuid(session, vdi_ref):
def scan_sr(session, instance_id, sr_ref):
LOG.debug(_("Re-scanning SR %s"), sr_ref)
task = session.call_xenapi('Async.SR.scan', sr_ref)
- session.wait_for_task(instance_id, task)
+ session.wait_for_task(task, instance_id)
def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
@@ -661,7 +786,18 @@ def get_vdi_for_vm_safely(session, vm_ref):
return vdi_ref, vdi_rec
+def safe_find_sr(session):
+ """Same as find_sr except raises a NotFound exception if SR cannot be
+ determined
+ """
+ sr_ref = find_sr(session)
+ if sr_ref is None:
+ raise exception.NotFound(_('Cannot find SR to read/write VDI'))
+ return sr_ref
+
+
def find_sr(session):
+ """Return the storage repository to hold VM images"""
host = session.get_xenapi_host()
srs = session.get_xenapi().SR.get_all()
for sr in srs:
@@ -676,6 +812,18 @@ def find_sr(session):
return None
+def get_sr_path(session):
+ """Return the path to our storage repository
+
+ This is used when we're dealing with VHDs directly, either by taking
+ snapshots or by restoring an image in the DISK_VHD format.
+ """
+ sr_ref = safe_find_sr(session)
+ sr_rec = session.get_xenapi().SR.get_record(sr_ref)
+ sr_uuid = sr_rec["uuid"]
+ return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid)
+
+
def remap_vbd_dev(dev):
"""Return the appropriate location for a plugged-in VBD device
@@ -790,9 +938,9 @@ def _is_vdi_pv(dev):
return False
-def _stream_disk(dev, type, virtual_size, image_file):
+def _stream_disk(dev, image_type, virtual_size, image_file):
offset = 0
- if type == ImageType.DISK:
+ if image_type == ImageType.DISK:
offset = MBR_SIZE_BYTES
_write_partition(virtual_size, dev)
@@ -821,3 +969,8 @@ def _write_partition(virtual_size, dev):
(dest, primary_first, primary_last))
LOG.debug(_('Writing partition table %s done.'), dest)
+
+
+def get_name_label_for_image(image):
+ # TODO(sirp): This should eventually be the URI for the Glance image
+ return _('Glance image %s') % image
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 450a06315..64969c924 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -51,7 +51,6 @@ class VMOps(object):
def __init__(self, session):
self.XenAPI = session.get_imported_xenapi()
self._session = session
- self.known_vm_refs = []
VMHelper.XenAPI = self.XenAPI
def list_instances(self):
@@ -65,48 +64,51 @@ class VMOps(object):
def spawn(self, instance):
"""Create VM instance"""
- vm = VMHelper.lookup(self._session, instance.name)
+ instance_name = instance.name
+ vm = VMHelper.lookup(self._session, instance_name)
if vm is not None:
raise exception.Duplicate(_('Attempted to create'
- ' non-unique name %s') % instance.name)
+ ' non-unique name %s') % instance_name)
#ensure enough free memory is available
if not VMHelper.ensure_free_mem(self._session, instance):
- name = instance['name']
- LOG.exception(_('instance %(name)s: not enough free memory')
- % locals())
- db.instance_set_state(context.get_admin_context(),
- instance['id'],
- power_state.SHUTDOWN)
- return
+ LOG.exception(_('instance %(instance_name)s: not enough free '
+ 'memory') % locals())
+ db.instance_set_state(context.get_admin_context(),
+ instance['id'],
+ power_state.SHUTDOWN)
+ return
user = AuthManager().get_user(instance.user_id)
project = AuthManager().get_project(instance.project_id)
- #if kernel is not present we must download a raw disk
- if instance.kernel_id:
- disk_image_type = ImageType.DISK
- else:
- disk_image_type = ImageType.DISK_RAW
- # TODO: Coalesce fetch_image, lookup_image and
- # manipulate_root_image so requires a single VDI mount/umount
- # sequence
+ disk_image_type = VMHelper.determine_disk_image_type(instance)
+
vdi_uuid = VMHelper.fetch_image(self._session, instance.id,
instance.image_id, user, project, disk_image_type)
+
vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
- #Have a look at the VDI and see if it has a PV kernel
+
pv_kernel = False
- if not instance.kernel_id:
+ if disk_image_type == ImageType.DISK_RAW:
+ #Have a look at the VDI and see if it has a PV kernel
pv_kernel = VMHelper.lookup_image(self._session, instance.id,
vdi_ref)
+ elif disk_image_type == ImageType.DISK_VHD:
+ # TODO(sirp): Assuming PV for now; this will need to be
+ # configurable as Windows will use HVM.
+ pv_kernel = True
+
kernel = None
if instance.kernel_id:
kernel = VMHelper.fetch_image(self._session, instance.id,
instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK)
+
ramdisk = None
if instance.ramdisk_id:
ramdisk = VMHelper.fetch_image(self._session, instance.id,
instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK)
+
vm_ref = VMHelper.create_vm(self._session,
instance, kernel, ramdisk, pv_kernel)
VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True)
@@ -120,10 +122,9 @@ class VMOps(object):
self.create_vifs(instance, networks)
LOG.debug(_('Starting VM %s...'), vm_ref)
- self._session.call_xenapi('VM.start', vm_ref, False, False)
- instance_name = instance.name
+ self._start(instance, vm_ref)
LOG.info(_('Spawning VM %(instance_name)s created %(vm_ref)s.')
- % locals())
+ % locals())
def _inject_onset_files():
onset_files = instance.onset_files
@@ -147,18 +148,18 @@ class VMOps(object):
def _wait_for_boot():
try:
- state = self.get_info(instance['name'])['state']
+ state = self.get_info(instance_name)['state']
db.instance_set_state(context.get_admin_context(),
instance['id'], state)
if state == power_state.RUNNING:
- LOG.debug(_('Instance %s: booted'), instance['name'])
+ LOG.debug(_('Instance %s: booted'), instance_name)
timer.stop()
_inject_onset_files()
return True
except Exception, exc:
LOG.warn(exc)
LOG.exception(_('instance %s: failed to boot'),
- instance['name'])
+ instance_name)
db.instance_set_state(context.get_admin_context(),
instance['id'],
power_state.SHUTDOWN)
@@ -177,19 +178,43 @@ class VMOps(object):
a vm name or a vm instance, and want a vm instance in return.
"""
vm = None
- if instance_or_vm in self.known_vm_refs:
- return instance_or_vm
- instance_name = instance_or_vm
- #if instance_or_vm is not a string;
- #must be an ID or a vm instance
- if not isinstance(instance_or_vm, str):
+ #assume instance_or_vm is an instance object
+ try:
instance_name = instance_or_vm.name
+ except (AttributeError, KeyError):
+ if isinstance(instance_or_vm, (int, long)):
+ ctx = context.get_admin_context()
+ instance_obj = db.instance_get(ctx, instance_or_vm)
+ instance_name = instance_obj.name
+ else:
+ instance_name = instance_or_vm
+ #try to lookup instance
vm = VMHelper.lookup(self._session, instance_name)
- self.known_vm_refs.append(vm)
- if vm is None:
- raise exception.NotFound(
- _('Instance not present %s') % instance_name)
- return vm
+ #if vm=None might be a VM ref
+ if vm == None:
+ vm_rec = self._session.get_xenapi().VM.get_record(instance_or_vm)
+ if vm_rec != None:
+ #found - it is a vm ref
+ return instance_or_vm
+ else:
+ return vm
+ #if we end up here instance_or_vm is neither an instance id or VM ref
+ raise exception.NotFound(
+ _('Instance not present %s') % instance_name)
+
+ def _acquire_bootlock(self, vm):
+ """Prevent an instance from booting"""
+ self._session.call_xenapi(
+ "VM.set_blocked_operations",
+ vm,
+ {"start": ""})
+
+ def _release_bootlock(self, vm):
+ """Allow an instance to boot"""
+ self._session.call_xenapi(
+ "VM.remove_from_blocked_operations",
+ vm,
+ "start")
def snapshot(self, instance, image_id):
""" Create snapshot from a running VM instance
@@ -234,7 +259,8 @@ class VMOps(object):
VMHelper.upload_image(
self._session, instance.id, template_vdi_uuids, image_id)
finally:
- self._destroy(instance, template_vm_ref, shutdown=False)
+ self._destroy(instance, template_vm_ref, shutdown=False,
+ destroy_kernel_ramdisk=False)
logging.debug(_("Finished snapshot and upload for VM %s"), instance)
@@ -242,7 +268,7 @@ class VMOps(object):
"""Reboot VM instance"""
vm = self._get_vm_opaque_ref(instance)
task = self._session.call_xenapi('Async.VM.clean_reboot', vm)
- self._session.wait_for_task(instance.id, task)
+ self._session.wait_for_task(task, instance.id)
def set_admin_password(self, instance, new_pass):
"""Set the root/admin password on the VM instance. This is done via
@@ -282,6 +308,11 @@ class VMOps(object):
raise RuntimeError(resp_dict['message'])
return resp_dict['message']
+ def _start(self, instance, vm):
+ """Start an instance"""
+ task = self._session.call_xenapi("Async.VM.start", vm, False, False)
+ self._session.wait_for_task(task, instance.id)
+
def inject_file(self, instance, b64_path, b64_contents):
"""Write a file to the VM instance. The path to which it is to be
written and the contents of the file need to be supplied; both should
@@ -308,22 +339,33 @@ class VMOps(object):
raise RuntimeError(resp_dict['message'])
return resp_dict['message']
- def _shutdown(self, instance, vm):
- """Shutdown an instance """
+ def _shutdown(self, instance, vm, hard=True):
+ """Shutdown an instance"""
state = self.get_info(instance['name'])['state']
if state == power_state.SHUTDOWN:
LOG.warn(_("VM %(vm)s already halted, skipping shutdown...") %
locals())
return
+ instance_id = instance.id
+ LOG.debug(_("Shutting down VM for Instance %(instance_id)s")
+ % locals())
try:
- task = self._session.call_xenapi('Async.VM.hard_shutdown', vm)
- self._session.wait_for_task(instance.id, task)
+ task = None
+ if hard:
+ task = self._session.call_xenapi("Async.VM.hard_shutdown", vm)
+ else:
+ task = self._session.call_xenapi("Async.VM.clean_shutdown", vm)
+
+ self._session.wait_for_task(task, instance.id)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
def _destroy_vdis(self, instance, vm):
"""Destroys all VDIs associated with a VM """
+ instance_id = instance.id
+ LOG.debug(_("Destroying VDIs for Instance %(instance_id)s")
+ % locals())
vdis = VMHelper.lookup_vm_vdis(self._session, vm)
if not vdis:
@@ -332,33 +374,60 @@ class VMOps(object):
for vdi in vdis:
try:
task = self._session.call_xenapi('Async.VDI.destroy', vdi)
- self._session.wait_for_task(instance.id, task)
+ self._session.wait_for_task(task, instance.id)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
+ def _destroy_kernel_ramdisk(self, instance, vm):
+ """
+ Three situations can occur:
+
+ 1. We have neither a ramdisk nor a kernel, in which case we are a
+ RAW image and can omit this step
+
+ 2. We have one or the other, in which case, we should flag as an
+ error
+
+ 3. We have both, in which case we safely remove both the kernel
+ and the ramdisk.
+ """
+ instance_id = instance.id
+ if not instance.kernel_id and not instance.ramdisk_id:
+ # 1. No kernel or ramdisk
+ LOG.debug(_("Instance %(instance_id)s using RAW or VHD, "
+ "skipping kernel and ramdisk deletion") % locals())
+ return
+
+ if not (instance.kernel_id and instance.ramdisk_id):
+ # 2. We only have kernel xor ramdisk
+ raise exception.NotFound(
+ _("Instance %(instance_id)s has a kernel or ramdisk but not "
+ "both" % locals()))
+
+ # 3. We have both kernel and ramdisk
+ (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(
+ self._session, vm)
+
+ LOG.debug(_("Removing kernel/ramdisk files"))
+
+ args = {'kernel-file': kernel, 'ramdisk-file': ramdisk}
+ task = self._session.async_call_plugin(
+ 'glance', 'remove_kernel_ramdisk', args)
+ self._session.wait_for_task(task, instance.id)
+
+ LOG.debug(_("kernel/ramdisk files removed"))
+
def _destroy_vm(self, instance, vm):
"""Destroys a VM record """
+ instance_id = instance.id
try:
- kernel = None
- ramdisk = None
- if instance.kernel_id or instance.ramdisk_id:
- (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(
- self._session, vm)
- task1 = self._session.call_xenapi('Async.VM.destroy', vm)
- LOG.debug(_("Removing kernel/ramdisk files"))
- fn = "remove_kernel_ramdisk"
- args = {}
- if kernel:
- args['kernel-file'] = kernel
- if ramdisk:
- args['ramdisk-file'] = ramdisk
- task2 = self._session.async_call_plugin('glance', fn, args)
- self._session.wait_for_task(instance.id, task1)
- self._session.wait_for_task(instance.id, task2)
- LOG.debug(_("kernel/ramdisk files removed"))
+ task = self._session.call_xenapi('Async.VM.destroy', vm)
+ self._session.wait_for_task(task, instance_id)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
+ LOG.debug(_("Instance %(instance_id)s VM destroyed") % locals())
+
def destroy(self, instance):
"""
Destroy VM instance
@@ -366,32 +435,37 @@ class VMOps(object):
This is the method exposed by xenapi_conn.destroy(). The rest of the
destroy_* methods are internal.
"""
+ instance_id = instance.id
+ LOG.info(_("Destroying VM for Instance %(instance_id)s") % locals())
vm = VMHelper.lookup(self._session, instance.name)
return self._destroy(instance, vm, shutdown=True)
- def _destroy(self, instance, vm, shutdown=True):
+ def _destroy(self, instance, vm, shutdown=True,
+ destroy_kernel_ramdisk=True):
"""
Destroys VM instance by performing:
- 1. A shutdown if requested
- 2. Destroying associated VDIs
- 3. Destroying that actual VM record
+ 1. A shutdown if requested
+ 2. Destroying associated VDIs
+ 3. Destroying kernel and ramdisk files (if necessary)
+ 4. Destroying that actual VM record
"""
if vm is None:
- # Don't complain, just return. This lets us clean up instances
- # that have already disappeared from the underlying platform.
+ LOG.warning(_("VM is not present, skipping destroy..."))
return
if shutdown:
self._shutdown(instance, vm)
self._destroy_vdis(instance, vm)
+ if destroy_kernel_ramdisk:
+ self._destroy_kernel_ramdisk(instance, vm)
self._destroy_vm(instance, vm)
def _wait_with_callback(self, instance_id, task, callback):
ret = None
try:
- ret = self._session.wait_for_task(instance_id, task)
+ ret = self._session.wait_for_task(task, instance_id)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
callback(ret)
@@ -420,6 +494,78 @@ class VMOps(object):
task = self._session.call_xenapi('Async.VM.resume', vm, False, True)
self._wait_with_callback(instance.id, task, callback)
+ def rescue(self, instance, callback):
+ """Rescue the specified instance
+ - shutdown the instance VM
+ - set 'bootlock' to prevent the instance from starting in rescue
+ - spawn a rescue VM (the vm name-label will be instance-N-rescue)
+
+ """
+ rescue_vm = VMHelper.lookup(self._session, instance.name + "-rescue")
+ if rescue_vm:
+ raise RuntimeError(_(
+ "Instance is already in Rescue Mode: %s" % instance.name))
+
+ vm = self._get_vm_opaque_ref(instance)
+ self._shutdown(instance, vm)
+ self._acquire_bootlock(vm)
+
+ instance._rescue = True
+ self.spawn(instance)
+ rescue_vm = self._get_vm_opaque_ref(instance)
+
+ vbd = self._session.get_xenapi().VM.get_VBDs(vm)[0]
+ vdi_ref = self._session.get_xenapi().VBD.get_record(vbd)["VDI"]
+ vbd_ref = VMHelper.create_vbd(
+ self._session,
+ rescue_vm,
+ vdi_ref,
+ 1,
+ False)
+
+ self._session.call_xenapi("Async.VBD.plug", vbd_ref)
+
+ def unrescue(self, instance, callback):
+ """Unrescue the specified instance
+ - unplug the instance VM's disk from the rescue VM
+ - teardown the rescue VM
+ - release the bootlock to allow the instance VM to start
+
+ """
+ rescue_vm = VMHelper.lookup(self._session, instance.name + "-rescue")
+
+ if not rescue_vm:
+ raise exception.NotFound(_(
+ "Instance is not in Rescue Mode: %s" % instance.name))
+
+ original_vm = self._get_vm_opaque_ref(instance)
+ vbds = self._session.get_xenapi().VM.get_VBDs(rescue_vm)
+
+ instance._rescue = False
+
+ for vbd_ref in vbds:
+ vbd = self._session.get_xenapi().VBD.get_record(vbd_ref)
+ if vbd["userdevice"] == "1":
+ VMHelper.unplug_vbd(self._session, vbd_ref)
+ VMHelper.destroy_vbd(self._session, vbd_ref)
+
+ task1 = self._session.call_xenapi("Async.VM.hard_shutdown", rescue_vm)
+ self._session.wait_for_task(task1, instance.id)
+
+ vdis = VMHelper.lookup_vm_vdis(self._session, rescue_vm)
+ for vdi in vdis:
+ try:
+ task = self._session.call_xenapi('Async.VDI.destroy', vdi)
+ self._session.wait_for_task(task, instance.id)
+ except self.XenAPI.Failure:
+ continue
+
+ task2 = self._session.call_xenapi('Async.VM.destroy', rescue_vm)
+ self._session.wait_for_task(task2, instance.id)
+
+ self._release_bootlock(original_vm)
+ self._start(instance, original_vm)
+
def get_info(self, instance):
"""Return data about VM instance"""
vm = self._get_vm_opaque_ref(instance)
@@ -464,9 +610,17 @@ class VMOps(object):
network_IPs = [ip for ip in IPs if ip.network_id == network.id]
def ip_dict(ip):
- return {'netmask': network['netmask'],
- 'enabled': '1',
- 'ip': ip.address}
+ return {
+ "ip": ip.address,
+ "netmask": network["netmask"],
+ "enabled": "1"}
+
+ def ip6_dict(ip6):
+ return {
+ "ip": ip6.addressV6,
+ "netmask": ip6.netmaskV6,
+ "gateway": ip6.gatewayV6,
+ "enabled": "1"}
mac_id = instance.mac_address.replace(':', '')
location = 'vm-data/networking/%s' % mac_id
@@ -477,8 +631,11 @@ class VMOps(object):
'broadcast': network['broadcast'],
'mac': instance.mac_address,
'dns': [network['dns']],
- 'ips': [ip_dict(ip) for ip in network_IPs]}
+ 'ips': [ip_dict(ip) for ip in network_IPs],
+ 'ip6s': [ip6_dict(ip) for ip in network_IPs]}
+
self.write_to_param_xenstore(vm_opaque_ref, {location: mapping})
+
try:
self.write_to_xenstore(vm_opaque_ref, location,
mapping['location'])
@@ -509,8 +666,17 @@ class VMOps(object):
NetworkHelper.find_network_with_bridge(self._session, bridge)
if network_ref:
- VMHelper.create_vif(self._session, vm_opaque_ref,
- network_ref, instance.mac_address)
+ try:
+ device = "1" if instance._rescue else "0"
+ except (AttributeError, KeyError):
+ device = "0"
+
+ VMHelper.create_vif(
+ self._session,
+ vm_opaque_ref,
+ network_ref,
+ instance.mac_address,
+ device)
def reset_network(self, instance):
"""
@@ -580,7 +746,7 @@ class VMOps(object):
args.update(addl_args)
try:
task = self._session.async_call_plugin(plugin, method, args)
- ret = self._session.wait_for_task(instance_id, task)
+ ret = self._session.wait_for_task(task, instance_id)
except self.XenAPI.Failure, e:
ret = None
err_trace = e.details[-1]
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index d89a6f995..757ecf5ad 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -83,7 +83,7 @@ class VolumeOps(object):
try:
task = self._session.call_xenapi('Async.VBD.plug',
vbd_ref)
- self._session.wait_for_task(vol_rec['deviceNumber'], task)
+ self._session.wait_for_task(task, vol_rec['deviceNumber'])
except self.XenAPI.Failure, exc:
LOG.exception(exc)
VolumeHelper.destroy_iscsi_storage(self._session,
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index 9f8b6af02..5a7f39a4d 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -113,6 +113,9 @@ flags.DEFINE_string('xenapi_agent_path',
' Used only if connection_type=xenapi.'
' and xenapi_inject_image=True')
+flags.DEFINE_string('xenapi_sr_base_path', '/var/run/sr-mount',
+ 'Base path to the storage repository')
+
flags.DEFINE_string('target_host',
None,
'iSCSI Target Host')
@@ -207,6 +210,14 @@ class XenAPIConnection(object):
"""resume the specified instance"""
self._vmops.resume(instance, callback)
+ def rescue(self, instance, callback):
+ """Rescue the specified instance"""
+ self._vmops.rescue(instance, callback)
+
+ def unrescue(self, instance, callback):
+ """Unrescue the specified instance"""
+ self._vmops.unrescue(instance, callback)
+
def reset_network(self, instance):
"""reset networking for specified instance"""
self._vmops.reset_network(instance)
@@ -290,7 +301,7 @@ class XenAPISession(object):
self._session.xenapi.Async.host.call_plugin,
self.get_xenapi_host(), plugin, fn, args)
- def wait_for_task(self, id, task):
+ def wait_for_task(self, task, id=None):
"""Return the result of the given task. The task is polled
until it completes. Not re-entrant."""
done = event.Event()
@@ -317,10 +328,11 @@ class XenAPISession(object):
try:
name = self._session.xenapi.task.get_name_label(task)
status = self._session.xenapi.task.get_status(task)
- action = dict(
- instance_id=int(id),
- action=name[0:255], # Ensure action is never > 255
- error=None)
+ if id:
+ action = dict(
+ instance_id=int(id),
+ action=name[0:255], # Ensure action is never > 255
+ error=None)
if status == "pending":
return
elif status == "success":
@@ -334,7 +346,9 @@ class XenAPISession(object):
LOG.warn(_("Task [%(name)s] %(task)s status:"
" %(status)s %(error_info)s") % locals())
done.send_exception(self.XenAPI.Failure(error_info))
- db.instance_action_create(context.get_admin_context(), action)
+
+ if id:
+ db.instance_action_create(context.get_admin_context(), action)
except self.XenAPI.Failure, exc:
LOG.warn(exc)
done.send_exception(*sys.exc_info())