summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Authors1
-rw-r--r--nova/flags.py6
-rw-r--r--nova/tests/test_xenapi.py9
-rw-r--r--nova/tests/xenapi/stubs.py2
-rw-r--r--nova/virt/libvirt/connection.py3
-rw-r--r--nova/virt/xenapi/fake.py31
-rw-r--r--nova/virt/xenapi/vm_utils.py177
-rw-r--r--nova/virt/xenapi/vmops.py6
-rwxr-xr-xplugins/xenserver/xenapi/etc/xapi.d/plugins/glance53
9 files changed, 268 insertions, 20 deletions
diff --git a/Authors b/Authors
index afcc2bd70..4df821419 100644
--- a/Authors
+++ b/Authors
@@ -43,6 +43,7 @@ David Subiros <david.perez5@hp.com>
Dean Troyer <dtroyer@gmail.com>
Deepak Garg <deepak.garg@citrix.com>
Derek Higgins <higginsd@gmail.com>
+Devdeep Singh <devdeep.singh@citrix.com>
Devendra Modium <dmodium@isi.edu>
Devin Carlen <devin.carlen@gmail.com>
Donal Lafferty <donal.lafferty@citrix.com>
diff --git a/nova/flags.py b/nova/flags.py
index 6fc3100f8..ea6176e9c 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -445,6 +445,12 @@ global_opts = [
cfg.ListOpt('isolated_hosts',
default=[],
help='Host reserved for specific images'),
+ cfg.BoolOpt('cache_images',
+ default=True,
+ help='Cache glance images locally'),
+ cfg.BoolOpt('use_cow_images',
+ default=True,
+ help='Whether to use cow images')
]
FLAGS.register_opts(global_opts)
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index ec10defea..6fc7fdf7d 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -399,7 +399,14 @@ class XenAPIVMTestCase(test.TestCase):
def _check_vdis(self, start_list, end_list):
for vdi_ref in end_list:
if not vdi_ref in start_list:
- self.fail('Found unexpected VDI:%s' % vdi_ref)
+ vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
+ # If the cache is turned on then the base disk will be
+ # there even after the cleanup
+ if 'other_config' in vdi_rec:
+ if vdi_rec['other_config']['image-id'] is None:
+ self.fail('Found unexpected VDI:%s' % vdi_ref)
+ else:
+ self.fail('Found unexpected VDI:%s' % vdi_ref)
def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
instance_type_id="3", os_type="linux",
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index 2dfece00f..27e2acd9e 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -46,7 +46,7 @@ def stubout_instance_snapshot(stubs):
def fake_wait_for_vhd_coalesce(*args):
#TODO(sirp): Should we actually fake out the data here
- return "fakeparent"
+ return "fakeparent", "fakebase"
stubs.Set(vm_utils, '_wait_for_vhd_coalesce', fake_wait_for_vhd_coalesce)
diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py
index 087dba446..17e32e6ac 100644
--- a/nova/virt/libvirt/connection.py
+++ b/nova/virt/libvirt/connection.py
@@ -99,9 +99,6 @@ libvirt_opts = [
default='',
help='Override the default libvirt URI '
'(which is dependent on libvirt_type)'),
- cfg.BoolOpt('use_cow_images',
- default=True,
- help='Whether to use cow images'),
cfg.StrOpt('cpuinfo_xml_template',
default=utils.abspath('virt/cpuinfo.xml.template'),
help='CpuInfo XML Template (Used only live migration now)'),
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index feb8c00fc..a29929ff9 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -446,6 +446,35 @@ class SessionBase(object):
db_ref['xenstore_data'] = {}
db_ref['xenstore_data'][key] = value
+ def VDI_remove_from_other_config(self, _1, vdi_ref, key):
+ db_ref = _db_content['VDI'][vdi_ref]
+ if not 'other_config' in db_ref:
+ return
+ db_ref['other_config'][key] = None
+
+ def VDI_add_to_other_config(self, _1, vdi_ref, key, value):
+ db_ref = _db_content['VDI'][vdi_ref]
+ if not 'other_config' in db_ref:
+ db_ref['other_config'] = {}
+ db_ref['other_config'][key] = value
+
+ def VDI_copy(self, _1, vdi_to_copy_ref, sr_ref):
+ db_ref = _db_content['VDI'][vdi_to_copy_ref]
+ name_label = db_ref['name_label']
+ read_only = db_ref['read_only']
+ sharable = db_ref['sharable']
+ vdi_ref = create_vdi(name_label, read_only, sr_ref, sharable)
+ return vdi_ref
+
+ def VDI_clone(self, _1, vdi_to_clone_ref):
+ db_ref = _db_content['VDI'][vdi_to_clone_ref]
+ name_label = db_ref['name_label']
+ read_only = db_ref['read_only']
+ sr_ref = db_ref['SR']
+ sharable = db_ref['sharable']
+ vdi_ref = create_vdi(name_label, read_only, sr_ref, sharable)
+ return vdi_ref
+
def host_compute_free_memory(self, _1, ref):
#Always return 12GB available
return 12 * 1024 * 1024 * 1024
@@ -457,6 +486,8 @@ class SessionBase(object):
return ''
elif (plugin, method) == ('glance', 'upload_vhd'):
return ''
+ elif (plugin, method) == ('glance', 'create_kernel_ramdisk'):
+ return ''
elif (plugin, method) == ('migration', 'move_vhds_into_sr'):
return ''
elif (plugin, method) == ('migration', 'transfer_vhd'):
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 97e40f3ab..cf7967567 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -315,6 +315,22 @@ class VMHelper(HelperBase):
return vdi_ref
@classmethod
+ def copy_vdi(cls, session, sr_ref, vdi_to_copy_ref):
+ """Copy a VDI and return the new VDIs reference."""
+ vdi_ref = session.call_xenapi('VDI.copy', vdi_to_copy_ref, sr_ref)
+ LOG.debug(_('Copied VDI %(vdi_ref)s from VDI '
+ '%(vdi_to_copy_ref)s on %(sr_ref)s.') % locals())
+ return vdi_ref
+
+ @classmethod
+ def clone_vdi(cls, session, vdi_to_clone_ref):
+ """Clones a VDI and return the new VDIs reference."""
+ vdi_ref = session.call_xenapi('VDI.clone', vdi_to_clone_ref)
+ LOG.debug(_('Cloned VDI %(vdi_ref)s from VDI '
+ '%(vdi_to_clone_ref)s') % locals())
+ return vdi_ref
+
+ @classmethod
def set_vdi_name_label(cls, session, vdi_uuid, name_label):
vdi_ref = session.call_xenapi("VDI.get_by_uuid", vdi_uuid)
session.call_xenapi("VDI.set_name_label", vdi_ref, name_label)
@@ -353,11 +369,11 @@ class VMHelper(HelperBase):
LOG.debug(_('Created snapshot %(template_vm_ref)s from'
' VM %(vm_ref)s.') % locals())
- parent_uuid = _wait_for_vhd_coalesce(
+ parent_uuid, base_uuid = _wait_for_vhd_coalesce(
session, instance, sr_ref, vm_vdi_ref, original_parent_uuid)
- #TODO(sirp): we need to assert only one parent, not parents two deep
- template_vdi_uuids = {'image': parent_uuid,
+ template_vdi_uuids = {'base': base_uuid,
+ 'image': parent_uuid,
'snap': template_vdi_uuid}
return template_vm_ref, template_vdi_uuids
@@ -374,6 +390,15 @@ class VMHelper(HelperBase):
return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid)
@classmethod
+ def find_cached_image(cls, session, image_id, sr_ref):
+ """Returns the vdi-ref of the cached image."""
+ for vdi_ref, vdi_rec in _get_all_vdis_in_sr(session, sr_ref):
+ if ('image-id' in vdi_rec['other_config'] and
+ vdi_rec['other_config']['image-id'] == image_id):
+ return vdi_ref
+ return None
+
+ @classmethod
def upload_image(cls, context, session, instance, vdi_uuids, image_id):
""" Requests that the Glance plugin bundle the specified VDIs and
push them into Glance using the specified human-friendly name.
@@ -545,6 +570,108 @@ class VMHelper(HelperBase):
return vdi_ref
@classmethod
+ def create_kernel_image(cls, context, session, instance, image, user_id,
+ project_id, image_type):
+ """Creates kernel/ramdisk file from the image stored in the cache.
+ If the image is not present in the cache, it streams it from glance.
+
+ Returns: A list of dictionaries that describe VDIs
+ """
+ filename = ""
+ if FLAGS.cache_images:
+ args = {}
+ args['cached-image'] = image
+ args['new-image-uuid'] = str(uuid.uuid4())
+ task = session.async_call_plugin('glance', "create_kernel_ramdisk",
+ args)
+ filename = session.wait_for_task(task, instance.id)
+
+ if filename == "":
+ return cls.fetch_image(context, session, instance, image,
+ user_id, project_id, image_type)
+ else:
+ return [dict(vdi_type=ImageType.to_string(image_type),
+ vdi_uuid=None,
+ file=filename)]
+
+ @classmethod
+ def create_image(cls, context, session, instance, image, user_id,
+ project_id, image_type):
+ """Creates VDI from the image stored in the local cache. If the image
+ is not present in the cache, it streams it from glance.
+
+ Returns: A list of dictionaries that describe VDIs
+ """
+ if FLAGS.cache_images == False or image_type == ImageType.DISK_ISO:
+ # If caching is disabled, we do not have to keep a copy of the
+ # image. Fetch the image from glance.
+ return cls.fetch_image(context, session,
+ instance, instance.image_ref,
+ instance.user_id, instance.project_id,
+ image_type)
+
+ sr_ref = cls.safe_find_sr(session)
+ sr_type = session.call_xenapi('SR.get_record', sr_ref)["type"]
+ vdi_return_list = []
+
+ if FLAGS.use_cow_images and sr_type != "ext":
+ LOG.warning(_("Fast cloning is only supported on default local SR "
+ "of type ext. SR on this system was found to be of "
+ "type %(sr_type)s. Ignoring the cow flag.")
+ % locals())
+
+ vdi_ref = cls.find_cached_image(session, image, sr_ref)
+ if vdi_ref is None:
+ vdis = cls.fetch_image(context, session, instance, image, user_id,
+ project_id, image_type)
+ vdi_ref = session.call_xenapi('VDI.get_by_uuid',
+ vdis[0]['vdi_uuid'])
+ session.call_xenapi('VDI.add_to_other_config',
+ vdi_ref, "image-id", str(image))
+ session.call_xenapi('VDI.set_name_label',
+ vdi_ref, "Cached glance image")
+
+ for vdi in vdis:
+ if vdi["vdi_type"] == "swap":
+ session.call_xenapi('VDI.add_to_other_config',
+ vdi_ref, "swap-disk",
+ str(vdi['vdi_uuid']))
+
+ if FLAGS.use_cow_images and sr_type == 'ext':
+ new_vdi_ref = cls.clone_vdi(session, vdi_ref)
+ else:
+ new_vdi_ref = cls.copy_vdi(session, sr_ref, vdi_ref)
+
+ # Set the name label for the image we just created and remove image id
+ # field from other-config.
+ session.call_xenapi('VDI.set_name_label', new_vdi_ref, instance.name)
+ session.call_xenapi('VDI.remove_from_other_config',
+ new_vdi_ref, "image-id")
+
+ vdi_return_list.append(dict(
+ vdi_type=("os" if image_type == ImageType.DISK_VHD
+ else ImageType.to_string(image_type)),
+ vdi_uuid=session.call_xenapi('VDI.get_uuid', new_vdi_ref),
+ file=None))
+
+ # Create a swap disk if the glance image had one associated with it.
+ vdi_rec = session.call_xenapi('VDI.get_record', vdi_ref)
+ if 'swap-disk' in vdi_rec['other_config']:
+ swap_disk_uuid = vdi_rec['other_config']['swap-disk']
+ swap_vdi_ref = session.call_xenapi('VDI.get_by_uuid',
+ swap_disk_uuid)
+ new_swap_vdi_ref = cls.copy_vdi(session, sr_ref, swap_vdi_ref)
+ new_swap_vdi_uuid = session.call_xenapi('VDI.get_uuid',
+ new_swap_vdi_ref)
+ session.call_xenapi('VDI.set_name_label', new_swap_vdi_ref,
+ instance.name + "-swap")
+ vdi_return_list.append(dict(vdi_type="swap",
+ vdi_uuid=new_swap_vdi_uuid,
+ file=None))
+
+ return vdi_return_list
+
+ @classmethod
def fetch_image(cls, context, session, instance, image, user_id,
project_id, image_type):
"""Fetch image from glance based on image type.
@@ -575,7 +702,7 @@ class VMHelper(HelperBase):
# which does not have the `uuid` module. To work around this,
# we generate the uuids here (under Python 2.6+) and
# pass them as arguments
- uuid_stack = [str(uuid.uuid4()) for i in xrange(2)]
+ uuid_stack = [str(uuid.uuid4()) for i in xrange(3)]
glance_host, glance_port = glance.pick_glance_api_server()
params = {'image_id': image,
@@ -712,6 +839,8 @@ class VMHelper(HelperBase):
args['vdi-ref'] = vdi_ref
# Let the plugin copy the correct number of bytes.
args['image-size'] = str(vdi_size)
+ if FLAGS.cache_images:
+ args['cached-image'] = image
task = session.async_call_plugin('glance', fn, args)
filename = session.wait_for_task(task, instance['uuid'])
# Remove the VDI as it is not needed anymore.
@@ -1156,6 +1285,15 @@ def integrate_series(data, col, start, until=None):
return total.quantize(Decimal('1.0000'))
+def _get_all_vdis_in_sr(session, sr_ref):
+ for vdi_ref in session.call_xenapi('SR.get_VDIs', sr_ref):
+ try:
+ vdi_rec = session.call_xenapi('VDI.get_record', vdi_ref)
+ yield vdi_ref, vdi_rec
+ except VMHelper.XenAPI.Failure:
+ continue
+
+
#TODO(sirp): This code comes from XS5.6 pluginlib.py, we should refactor to
# use that implmenetation
def get_vhd_parent(session, vdi_rec):
@@ -1208,10 +1346,35 @@ def _wait_for_vhd_coalesce(session, instance, sr_ref, vdi_ref,
* parent_vhd
snapshot
- Atter coalesce:
+ After coalesce:
* parent_vhd
snapshot
"""
+ def _another_child_vhd():
+ if not original_parent_uuid:
+ return False
+
+ # Search for any other vdi which parents to original parent and is not
+ # in the active vm/instance vdi chain.
+ vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
+ parent_vdi_uuid = get_vhd_parent_uuid(session, vdi_ref)
+ for ref, rec in _get_all_vdis_in_sr(session, sr_ref):
+ if ((rec['uuid'] != vdi_uuid) and
+ (rec['uuid'] != parent_vdi_uuid) and
+ (rec['sm_config'].get('vhd-parent') == original_parent_uuid)):
+ # Found another vhd which too parents to original parent.
+ return True
+ # Found no other vdi with the same parent.
+ return False
+
+ # Check if original parent has any other child. If so, coalesce will
+ # not take place.
+ if _another_child_vhd():
+ parent_uuid = get_vhd_parent_uuid(session, vdi_ref)
+ parent_ref = session.call_xenapi("VDI.get_by_uuid", parent_uuid)
+ base_uuid = get_vhd_parent_uuid(session, parent_ref)
+ return parent_uuid, base_uuid
+
max_attempts = FLAGS.xenapi_vhd_coalesce_max_attempts
for i in xrange(max_attempts):
VMHelper.scan_sr(session, instance, sr_ref)
@@ -1221,7 +1384,9 @@ def _wait_for_vhd_coalesce(session, instance, sr_ref, vdi_ref,
" %(original_parent_uuid)s, waiting for coalesce...")
% locals())
else:
- return parent_uuid
+ parent_ref = session.call_xenapi("VDI.get_by_uuid", parent_uuid)
+ base_uuid = get_vhd_parent_uuid(session, parent_ref)
+ return parent_uuid, base_uuid
greenthread.sleep(FLAGS.xenapi_vhd_coalesce_poll_interval)
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index e1b8208b1..b656cabd0 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -186,7 +186,7 @@ class VMOps(object):
def _create_disks(self, context, instance, image_meta):
disk_image_type = VMHelper.determine_disk_image_type(image_meta)
- vdis = VMHelper.fetch_image(context, self._session,
+ vdis = VMHelper.create_image(context, self._session,
instance, instance.image_ref,
instance.user_id, instance.project_id,
disk_image_type)
@@ -279,11 +279,11 @@ class VMOps(object):
ramdisk = None
try:
if instance.kernel_id:
- kernel = VMHelper.fetch_image(context, self._session,
+ kernel = VMHelper.create_kernel_image(context, self._session,
instance, instance.kernel_id, instance.user_id,
instance.project_id, vm_utils.ImageType.KERNEL)[0]
if instance.ramdisk_id:
- ramdisk = VMHelper.fetch_image(context, self._session,
+ ramdisk = VMHelper.create_kernel_image(context, self._session,
instance, instance.ramdisk_id, instance.user_id,
instance.project_id, vm_utils.ImageType.RAMDISK)[0]
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
index e53e4d5eb..35f60923c 100755
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
@@ -54,6 +54,7 @@ class RetryException(Exception):
def _copy_kernel_vdi(dest, copy_args):
vdi_uuid = copy_args['vdi_uuid']
vdi_size = copy_args['vdi_size']
+ cached_image = copy_args['cached-image']
logging.debug("copying kernel/ramdisk file from %s to /boot/guest/%s",
dest, vdi_uuid)
filename = KERNEL_DIR + '/' + vdi_uuid
@@ -67,6 +68,17 @@ def _copy_kernel_vdi(dest, copy_args):
#copy only vdi_size bytes
data = f.read(vdi_size)
of.write(data)
+ if cached_image:
+ #create a cache file. If caching is enabled, kernel images do not have
+ #to be fetched from glance.
+ cached_image = KERNEL_DIR + '/' + cached_image
+ logging.debug("copying kernel/ramdisk file from %s to /boot/guest/%s",
+ dest, cached_image)
+ cache_file = open(cached_image, 'wb')
+ cache_file.write(data)
+ cache_file.close()
+ logging.debug("Done. Filename: %s", cached_image)
+
f.close()
of.close()
logging.debug("Done. Filename: %s", filename)
@@ -264,11 +276,17 @@ def _import_vhds(sr_path, staging_path, uuid_stack):
vdi_return_list = []
paths_to_move = []
- image_info = prepare_if_exists(staging_path, 'image.vhd')
+ image_parent = None
+ base_info = prepare_if_exists(staging_path, 'base.vhd')
+ if base_info:
+ paths_to_move.append(base_info[0])
+ image_parent = base_info[0]
+
+ image_info = prepare_if_exists(staging_path, 'image.vhd', image_parent)
if not image_info:
raise Exception("Invalid image: image.vhd not present")
- paths_to_move.append(image_info[0])
+ paths_to_move.insert(0, image_info[0])
snap_info = prepare_if_exists(staging_path, 'snap.vhd',
image_info[0])
@@ -302,9 +320,10 @@ def _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids):
('snap' or 'image.vhd')
"""
for name, uuid in vdi_uuids.items():
- source = os.path.join(sr_path, "%s.vhd" % uuid)
- link_name = os.path.join(staging_path, "%s.vhd" % name)
- os.link(source, link_name)
+ if uuid:
+ source = os.path.join(sr_path, "%s.vhd" % uuid)
+ link_name = os.path.join(staging_path, "%s.vhd" % name)
+ os.link(source, link_name)
def _upload_tarball(staging_path, image_id, glance_host, glance_port,
@@ -439,6 +458,24 @@ def _finish_subprocess(proc, cmdline):
return out, err
+def create_kernel_ramdisk(session, args):
+ """Creates a copy of the kernel/ramdisk image if it is present in the
+ cache. If the image is not present in the cache, it does nothing.
+ """
+ cached_image = exists(args, 'cached-image')
+ image_uuid = exists(args, 'new-image-uuid')
+ cached_image_filename = KERNEL_DIR + '/' + cached_image
+ filename = KERNEL_DIR + '/' + image_uuid
+
+ if os.path.isfile(cached_image_filename):
+ shutil.copyfile(cached_image_filename, filename)
+ logging.debug("Done. Filename: %s", filename)
+ else:
+ filename = ""
+ logging.debug("Cached kernel/ramdisk image not found")
+ return filename
+
+
def download_vhd(session, args):
"""Download an image from Glance, unbundle it, and then deposit the VHDs
into the storage repository
@@ -491,9 +528,12 @@ def upload_vhd(session, args):
def copy_kernel_vdi(session, args):
vdi = exists(args, 'vdi-ref')
size = exists(args, 'image-size')
+ cached_image = optional(args, 'cached-image')
#Use the uuid as a filename
vdi_uuid = session.xenapi.VDI.get_uuid(vdi)
- copy_args = {'vdi_uuid': vdi_uuid, 'vdi_size': int(size)}
+ copy_args = {'vdi_uuid': vdi_uuid,
+ 'vdi_size': int(size),
+ 'cached-image': cached_image}
filename = with_vdi_in_dom0(session, vdi, False,
lambda dev:
_copy_kernel_vdi('/dev/%s' % dev, copy_args))
@@ -515,4 +555,5 @@ if __name__ == '__main__':
XenAPIPlugin.dispatch({'upload_vhd': upload_vhd,
'download_vhd': download_vhd,
'copy_kernel_vdi': copy_kernel_vdi,
+ 'create_kernel_ramdisk': create_kernel_ramdisk,
'remove_kernel_ramdisk': remove_kernel_ramdisk})