summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJosh Kearney <josh@jk0.org>2011-08-16 12:47:35 +0000
committerTarmac <>2011-08-16 12:47:35 +0000
commit43bf1b5bd4ee0ab737959e411864dcdab8e1337d (patch)
treea8ae81e18fb069d7e60379154ea0665a176d779e
parentea53d0f37a4f478ffbe18516f99ca26192117e80 (diff)
parentbbd577de616915025e524e330f1991f3f155388c (diff)
Validate the size of VHD files in OVF containers.
-rw-r--r--nova/compute/manager.py53
-rw-r--r--nova/exception.py4
-rw-r--r--nova/tests/scheduler/test_scheduler.py4
-rw-r--r--nova/tests/xenapi/stubs.py4
-rw-r--r--nova/virt/xenapi/fake.py1
-rw-r--r--nova/virt/xenapi/vm_utils.py75
-rw-r--r--nova/virt/xenapi/vmops.py6
7 files changed, 133 insertions, 14 deletions
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 16b8e14b4..3299268f2 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -323,10 +323,63 @@ class ComputeManager(manager.SchedulerDependentManager):
def _run_instance(self, context, instance_id, **kwargs):
"""Launch a new instance with specified options."""
+ def _check_image_size():
+ """Ensure image is smaller than the maximum size allowed by the
+ instance_type.
+
+ The image stored in Glance is potentially compressed, so we use two
+ checks to ensure that the size isn't exceeded:
+
+ 1) This one - checks compressed size, this a quick check to
+ eliminate any images which are obviously too large
+
+ 2) Check uncompressed size in nova.virt.xenapi.vm_utils. This
+ is a slower check since it requires uncompressing the entire
+ image, but is accurate because it reflects the image's
+ actual size.
+ """
+ # NOTE(jk0): image_ref is defined in the DB model, image_href is
+ # used by the image service. This should be refactored to be
+ # consistent.
+ image_href = instance['image_ref']
+ image_service, image_id = nova.image.get_image_service(image_href)
+ image_meta = image_service.show(context, image_id)
+
+ try:
+ size_bytes = image_meta['size']
+ except KeyError:
+ # Size is not a required field in the image service (yet), so
+ # we are unable to rely on it being there even though it's in
+ # glance.
+
+ # TODO(jk0): Should size be required in the image service?
+ return
+
+ instance_type_id = instance['instance_type_id']
+ instance_type = self.db.instance_type_get(context,
+ instance_type_id)
+ allowed_size_gb = instance_type['local_gb']
+ allowed_size_bytes = allowed_size_gb * 1024 * 1024 * 1024
+
+ LOG.debug(_("image_id=%(image_id)d, image_size_bytes="
+ "%(size_bytes)d, allowed_size_bytes="
+ "%(allowed_size_bytes)d") % locals())
+
+ if size_bytes > allowed_size_bytes:
+ LOG.info(_("Image '%(image_id)d' size %(size_bytes)d exceeded"
+ " instance_type allowed size "
+ "%(allowed_size_bytes)d")
+ % locals())
+ raise exception.ImageTooLarge()
+
context = context.elevated()
instance = self.db.instance_get(context, instance_id)
+
if instance['name'] in self.driver.list_instances():
raise exception.Error(_("Instance has already been created"))
+
+ _check_image_size()
+
LOG.audit(_("instance %s: starting..."), instance_id,
context=context)
updates = {}
diff --git a/nova/exception.py b/nova/exception.py
index 3e2218863..b09d50797 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -730,3 +730,7 @@ class CannotResizeToSameSize(NovaException):
class CannotResizeToSmallerSize(NovaException):
message = _("Resizing to a smaller size is not supported.")
+
+
+class ImageTooLarge(NovaException):
+ message = _("Image is larger than instance type allows")
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index 33461025f..158df2a27 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -257,7 +257,9 @@ class SimpleDriverTestCase(test.TestCase):
def _create_instance(self, **kwargs):
"""Create a test instance"""
inst = {}
- inst['image_id'] = 1
+ # NOTE(jk0): If an integer is passed as the image_ref, the image
+ # service will use the default image service (in this case, the fake).
+ inst['image_ref'] = '1'
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index 0d0f84e32..a6a1febd6 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -28,10 +28,10 @@ from nova import utils
def stubout_instance_snapshot(stubs):
@classmethod
- def fake_fetch_image(cls, context, session, instance_id, image, user,
+ def fake_fetch_image(cls, context, session, instance, image, user,
project, type):
from nova.virt.xenapi.fake import create_vdi
- name_label = "instance-%s" % instance_id
+ name_label = "instance-%s" % instance.id
#TODO: create fake SR record
sr_ref = "fakesr"
vdi_ref = create_vdi(name_label=name_label, read_only=False,
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index 1aa642e4e..7c91aa9b9 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -140,6 +140,7 @@ def create_vdi(name_label, read_only, sr_ref, sharable):
'location': '',
'xenstore_data': '',
'sm_config': {},
+ 'physical_utilisation': '123',
'VBDs': {}})
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index ba5cf4b49..4a1f07bb1 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -31,6 +31,7 @@ import uuid
from xml.dom import minidom
import glance.client
+from nova import db
from nova import exception
from nova import flags
import nova.image
@@ -413,7 +414,7 @@ class VMHelper(HelperBase):
return vdi_ref
@classmethod
- def fetch_image(cls, context, session, instance_id, image, user_id,
+ def fetch_image(cls, context, session, instance, image, user_id,
project_id, image_type):
"""Fetch image from glance based on image type.
@@ -422,18 +423,19 @@ class VMHelper(HelperBase):
"""
if image_type == ImageType.DISK_VHD:
return cls._fetch_image_glance_vhd(context,
- session, instance_id, image, image_type)
+ session, instance, image, image_type)
else:
return cls._fetch_image_glance_disk(context,
- session, instance_id, image, image_type)
+ session, instance, image, image_type)
@classmethod
- def _fetch_image_glance_vhd(cls, context, session, instance_id, image,
+ def _fetch_image_glance_vhd(cls, context, session, instance, image,
image_type):
"""Tell glance to download an image and put the VHDs into the SR
Returns: A list of dictionaries that describe VDIs
"""
+ instance_id = instance.id
LOG.debug(_("Asking xapi to fetch vhd image %(image)s")
% locals())
sr_ref = safe_find_sr(session)
@@ -467,17 +469,58 @@ class VMHelper(HelperBase):
cls.scan_sr(session, instance_id, sr_ref)
- # Pull out the UUID of the first VDI
- vdi_uuid = vdis[0]['vdi_uuid']
+ # Pull out the UUID of the first VDI (which is the os VDI)
+ os_vdi_uuid = vdis[0]['vdi_uuid']
+
# Set the name-label to ease debugging
- vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid)
+ vdi_ref = session.get_xenapi().VDI.get_by_uuid(os_vdi_uuid)
primary_name_label = get_name_label_for_image(image)
session.get_xenapi().VDI.set_name_label(vdi_ref, primary_name_label)
+ cls._check_vdi_size(context, session, instance, os_vdi_uuid)
return vdis
@classmethod
- def _fetch_image_glance_disk(cls, context, session, instance_id, image,
+ def _get_vdi_chain_size(cls, context, session, vdi_uuid):
+ """Compute the total size of a VDI chain, starting with the specified
+ VDI UUID.
+
+ This will walk the VDI chain to the root, add the size of each VDI into
+ the total.
+ """
+ size_bytes = 0
+ for vdi_rec in walk_vdi_chain(session, vdi_uuid):
+ cur_vdi_uuid = vdi_rec['uuid']
+ vdi_size_bytes = int(vdi_rec['physical_utilisation'])
+ LOG.debug(_('vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes='
+ '%(vdi_size_bytes)d' % locals()))
+ size_bytes += vdi_size_bytes
+ return size_bytes
+
+ @classmethod
+ def _check_vdi_size(cls, context, session, instance, vdi_uuid):
+ size_bytes = cls._get_vdi_chain_size(context, session, vdi_uuid)
+
+ # FIXME(jk0): this was copied directly from compute.manager.py, let's
+ # refactor this to a common area
+ instance_type_id = instance['instance_type_id']
+ instance_type = db.instance_type_get(context,
+ instance_type_id)
+ allowed_size_gb = instance_type['local_gb']
+ allowed_size_bytes = allowed_size_gb * 1024 * 1024 * 1024
+
+ LOG.debug(_("image_size_bytes=%(size_bytes)d, allowed_size_bytes="
+ "%(allowed_size_bytes)d") % locals())
+
+ if size_bytes > allowed_size_bytes:
+ LOG.info(_("Image size %(size_bytes)d exceeded"
+ " instance_type allowed size "
+ "%(allowed_size_bytes)d")
+ % locals())
+ raise exception.ImageTooLarge()
+
+ @classmethod
+ def _fetch_image_glance_disk(cls, context, session, instance, image,
image_type):
"""Fetch the image from Glance
@@ -489,6 +532,7 @@ class VMHelper(HelperBase):
Returns: A single filename if image_type is KERNEL_RAMDISK
A list of dictionaries that describe VDIs, otherwise
"""
+ instance_id = instance.id
# FIXME(sirp): Since the Glance plugin seems to be required for the
# VHD disk, it may be worth using the plugin for both VHD and RAW and
# DISK restores
@@ -807,6 +851,21 @@ def get_vhd_parent_uuid(session, vdi_ref):
return None
+def walk_vdi_chain(session, vdi_uuid):
+ """Yield vdi_recs for each element in a VDI chain"""
+ # TODO(jk0): perhaps make get_vhd_parent use this
+ while True:
+ vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid)
+ vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
+ yield vdi_rec
+
+ parent_uuid = vdi_rec['sm_config'].get('vhd-parent')
+ if parent_uuid:
+ vdi_uuid = parent_uuid
+ else:
+ break
+
+
def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
original_parent_uuid):
""" Spin until the parent VHD is coalesced into its parent VHD
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 1fefd1291..eb0a846b5 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -137,7 +137,7 @@ class VMOps(object):
def _create_disks(self, context, instance):
disk_image_type = VMHelper.determine_disk_image_type(instance)
vdis = VMHelper.fetch_image(context, self._session,
- instance.id, instance.image_ref,
+ instance, instance.image_ref,
instance.user_id, instance.project_id,
disk_image_type)
return vdis
@@ -182,11 +182,11 @@ class VMOps(object):
try:
if instance.kernel_id:
kernel = VMHelper.fetch_image(context, self._session,
- instance.id, instance.kernel_id, instance.user_id,
+ instance, instance.kernel_id, instance.user_id,
instance.project_id, ImageType.KERNEL)[0]
if instance.ramdisk_id:
ramdisk = VMHelper.fetch_image(context, self._session,
- instance.id, instance.ramdisk_id, instance.user_id,
+ instance, instance.ramdisk_id, instance.user_id,
instance.project_id, ImageType.RAMDISK)[0]
# Create the VM ref and attach the first disk
first_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid',