summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWilliam Wolf <throughnothing@gmail.com>2011-08-16 11:31:08 -0400
committerWilliam Wolf <throughnothing@gmail.com>2011-08-16 11:31:08 -0400
commitd3becc6a0c99fb73637e7fdb89540fd8c4b48be2 (patch)
tree3fed37860ec9c54f8e10ebb56ee47f8c5b9122be
parent5278d7e7a28b49ef1d7acafa9283360b0b33a725 (diff)
parent7e4d35db17a6d129fe9cbfd3d4ffaeb96fd1fc69 (diff)
downloadnova-d3becc6a0c99fb73637e7fdb89540fd8c4b48be2.tar.gz
nova-d3becc6a0c99fb73637e7fdb89540fd8c4b48be2.tar.xz
nova-d3becc6a0c99fb73637e7fdb89540fd8c4b48be2.zip
merge from trunk
-rw-r--r--nova/api/ec2/cloud.py2
-rw-r--r--nova/compute/manager.py53
-rw-r--r--nova/exception.py4
-rw-r--r--nova/scheduler/abstract_scheduler.py (renamed from nova/scheduler/zone_aware_scheduler.py)32
-rw-r--r--nova/scheduler/host_filter.py51
-rw-r--r--nova/scheduler/least_cost.py7
-rw-r--r--nova/tests/api/openstack/test_extensions.py17
-rw-r--r--nova/tests/scheduler/test_abstract_scheduler.py (renamed from nova/tests/scheduler/test_zone_aware_scheduler.py)40
-rw-r--r--nova/tests/scheduler/test_least_cost_scheduler.py4
-rw-r--r--nova/tests/scheduler/test_scheduler.py4
-rw-r--r--nova/tests/test_metadata.py2
-rw-r--r--nova/tests/xenapi/stubs.py4
-rw-r--r--nova/virt/xenapi/fake.py1
-rw-r--r--nova/virt/xenapi/vm_utils.py75
-rw-r--r--nova/virt/xenapi/vmops.py6
-rw-r--r--smoketests/test_netadmin.py19
16 files changed, 224 insertions, 97 deletions
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 87bba58c3..9aebf92e3 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -305,7 +305,7 @@ class CloudController(object):
'hostname': hostname,
'instance-action': 'none',
'instance-id': ec2_id,
- 'instance-type': instance_ref['instance_type'],
+ 'instance-type': instance_ref['instance_type']['name'],
'local-hostname': hostname,
'local-ipv4': address,
'placement': {'availability-zone': availability_zone},
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 16b8e14b4..3299268f2 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -323,10 +323,63 @@ class ComputeManager(manager.SchedulerDependentManager):
def _run_instance(self, context, instance_id, **kwargs):
"""Launch a new instance with specified options."""
+ def _check_image_size():
+ """Ensure image is smaller than the maximum size allowed by the
+ instance_type.
+
+ The image stored in Glance is potentially compressed, so we use two
+ checks to ensure that the size isn't exceeded:
+
+ 1) This one - checks compressed size, this a quick check to
+ eliminate any images which are obviously too large
+
+ 2) Check uncompressed size in nova.virt.xenapi.vm_utils. This
+ is a slower check since it requires uncompressing the entire
+ image, but is accurate because it reflects the image's
+ actual size.
+ """
+ # NOTE(jk0): image_ref is defined in the DB model, image_href is
+ # used by the image service. This should be refactored to be
+ # consistent.
+ image_href = instance['image_ref']
+ image_service, image_id = nova.image.get_image_service(image_href)
+ image_meta = image_service.show(context, image_id)
+
+ try:
+ size_bytes = image_meta['size']
+ except KeyError:
+ # Size is not a required field in the image service (yet), so
+ # we are unable to rely on it being there even though it's in
+ # glance.
+
+ # TODO(jk0): Should size be required in the image service?
+ return
+
+ instance_type_id = instance['instance_type_id']
+ instance_type = self.db.instance_type_get(context,
+ instance_type_id)
+ allowed_size_gb = instance_type['local_gb']
+ allowed_size_bytes = allowed_size_gb * 1024 * 1024 * 1024
+
+ LOG.debug(_("image_id=%(image_id)d, image_size_bytes="
+ "%(size_bytes)d, allowed_size_bytes="
+ "%(allowed_size_bytes)d") % locals())
+
+ if size_bytes > allowed_size_bytes:
+ LOG.info(_("Image '%(image_id)d' size %(size_bytes)d exceeded"
+ " instance_type allowed size "
+ "%(allowed_size_bytes)d")
+ % locals())
+ raise exception.ImageTooLarge()
+
context = context.elevated()
instance = self.db.instance_get(context, instance_id)
+
if instance['name'] in self.driver.list_instances():
raise exception.Error(_("Instance has already been created"))
+
+ _check_image_size()
+
LOG.audit(_("instance %s: starting..."), instance_id,
context=context)
updates = {}
diff --git a/nova/exception.py b/nova/exception.py
index 3e2218863..b09d50797 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -730,3 +730,7 @@ class CannotResizeToSameSize(NovaException):
class CannotResizeToSmallerSize(NovaException):
message = _("Resizing to a smaller size is not supported.")
+
+
+class ImageTooLarge(NovaException):
+ message = _("Image is larger than instance type allows")
diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/abstract_scheduler.py
index d1924c9f9..eb924732a 100644
--- a/nova/scheduler/zone_aware_scheduler.py
+++ b/nova/scheduler/abstract_scheduler.py
@@ -14,7 +14,7 @@
# under the License.
"""
-The Zone Aware Scheduler is a base class Scheduler for creating instances
+The AbsractScheduler is a base class Scheduler for creating instances
across zones. There are two expansion points to this class for:
1. Assigning Weights to hosts for requested instances
2. Filtering Hosts based on required instance capabilities
@@ -40,7 +40,7 @@ from nova.scheduler import api
from nova.scheduler import driver
FLAGS = flags.FLAGS
-LOG = logging.getLogger('nova.scheduler.zone_aware_scheduler')
+LOG = logging.getLogger('nova.scheduler.abstract_scheduler')
class InvalidBlob(exception.NovaException):
@@ -48,8 +48,10 @@ class InvalidBlob(exception.NovaException):
"to instance create request.")
-class ZoneAwareScheduler(driver.Scheduler):
- """Base class for creating Zone Aware Schedulers."""
+class AbstractScheduler(driver.Scheduler):
+ """Base class for creating Schedulers that can work across any nova
+ deployment, from simple designs to multiply-nested zones.
+ """
def _call_zone_method(self, context, method, specs, zones):
"""Call novaclient zone method. Broken out for testing."""
@@ -266,7 +268,7 @@ class ZoneAwareScheduler(driver.Scheduler):
"""
if topic != "compute":
- raise NotImplementedError(_("Zone Aware Scheduler only understands"
+ raise NotImplementedError(_("Scheduler only understands"
" Compute nodes (for now)"))
num_instances = request_spec.get('num_instances', 1)
@@ -328,13 +330,31 @@ class ZoneAwareScheduler(driver.Scheduler):
requested_mem = instance_type['memory_mb'] * 1024 * 1024
return capabilities['host_memory_free'] >= requested_mem
+ def hold_filter_hosts(self, topic, request_spec, hosts=None):
+ """Filter the full host list (from the ZoneManager)"""
+ # NOTE(dabo): The logic used by the current _schedule() method
+ # is incorrect. Since this task is just to refactor the classes,
+ # I'm not fixing the logic now - that will be the next task.
+ # So for now this method is just renamed; afterwards this will
+ # become the filter_hosts() method, and the one below will
+ # be removed.
+ filter_name = request_spec.get('filter', None)
+ # Make sure that the requested filter is legitimate.
+ selected_filter = host_filter.choose_host_filter(filter_name)
+
+ # TODO(sandy): We're only using InstanceType-based specs
+ # currently. Later we'll need to snoop for more detailed
+ # host filter requests.
+ instance_type = request_spec['instance_type']
+ name, query = selected_filter.instance_type_to_filter(instance_type)
+ return selected_filter.filter_hosts(self.zone_manager, query)
+
def filter_hosts(self, topic, request_spec, host_list=None):
"""Return a list of hosts which are acceptable for scheduling.
Return value should be a list of (hostname, capability_dict)s.
Derived classes may override this, but may find the
'<topic>_filter' function more appropriate.
"""
-
def _default_filter(self, hostname, capabilities, request_spec):
"""Default filter function if there's no <topic>_filter"""
# NOTE(sirp): The default logic is the equivalent to
diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py
index b7bbbbcb8..45a8f40d8 100644
--- a/nova/scheduler/host_filter.py
+++ b/nova/scheduler/host_filter.py
@@ -14,7 +14,12 @@
# under the License.
"""
-Host Filter is a mechanism for requesting instance resources.
+The Host Filter classes are a way to ensure that only hosts that are
+appropriate are considered when creating a new instance. Hosts that are
+either incompatible or insufficient to accept a newly-requested instance
+are removed by Host Filter classes from consideration. Those that pass
+the filter are then passed on for weighting or other process for ordering.
+
Three filters are included: AllHosts, Flavor & JSON. AllHosts just
returns the full, unfiltered list of hosts. Flavor is a hard coded
matching mechanism based on flavor criteria and JSON is an ad-hoc
@@ -28,12 +33,6 @@ noted a need for a more expressive way of specifying instances.
Since we don't want to get into building full DSL this is a simple
form as an example of how this could be done. In reality, most
consumers will use the more rigid filters such as FlavorFilter.
-
-Note: These are "required" capability filters. These capabilities
-used must be present or the host will be excluded. The hosts
-returned are then weighed by the Weighted Scheduler. Weights
-can take the more esoteric factors into consideration (such as
-server affinity and customer separation).
"""
import json
@@ -41,9 +40,7 @@ import json
from nova import exception
from nova import flags
from nova import log as logging
-from nova.scheduler import zone_aware_scheduler
from nova import utils
-from nova.scheduler import zone_aware_scheduler
LOG = logging.getLogger('nova.scheduler.host_filter')
@@ -125,9 +122,8 @@ class InstanceTypeFilter(HostFilter):
spec_disk = instance_type['local_gb']
extra_specs = instance_type['extra_specs']
- if host_ram_mb >= spec_ram and \
- disk_bytes >= spec_disk and \
- self._satisfies_extra_specs(capabilities, instance_type):
+ if ((host_ram_mb >= spec_ram) and (disk_bytes >= spec_disk) and
+ self._satisfies_extra_specs(capabilities, instance_type)):
selected_hosts.append((host, capabilities))
return selected_hosts
@@ -309,7 +305,6 @@ def choose_host_filter(filter_name=None):
function checks the filter name against a predefined set
of acceptable filters.
"""
-
if not filter_name:
filter_name = FLAGS.default_host_filter
for filter_class in FILTERS:
@@ -317,33 +312,3 @@ def choose_host_filter(filter_name=None):
if host_match == filter_name:
return filter_class()
raise exception.SchedulerHostFilterNotFound(filter_name=filter_name)
-
-
-class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler):
- """The HostFilterScheduler uses the HostFilter to filter
- hosts for weighing. The particular filter used may be passed in
- as an argument or the default will be used.
-
- request_spec = {'filter': <Filter name>,
- 'instance_type': <InstanceType dict>}
- """
-
- def filter_hosts(self, topic, request_spec, hosts=None):
- """Filter the full host list (from the ZoneManager)"""
-
- filter_name = request_spec.get('filter', None)
- host_filter = choose_host_filter(filter_name)
-
- # TODO(sandy): We're only using InstanceType-based specs
- # currently. Later we'll need to snoop for more detailed
- # host filter requests.
- instance_type = request_spec['instance_type']
- name, query = host_filter.instance_type_to_filter(instance_type)
- return host_filter.filter_hosts(self.zone_manager, query)
-
- def weigh_hosts(self, topic, request_spec, hosts):
- """Derived classes must override this method and return
- a lists of hosts in [{weight, hostname}] format.
- """
- return [dict(weight=1, hostname=hostname, capabilities=caps)
- for hostname, caps in hosts]
diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py
index 329107efe..a58b11289 100644
--- a/nova/scheduler/least_cost.py
+++ b/nova/scheduler/least_cost.py
@@ -22,11 +22,14 @@ The cost-function and weights are tabulated, and the host with the least cost
is then selected for provisioning.
"""
+# TODO(dabo): This class will be removed in the next merge prop; it remains now
+# because much of the code will be refactored into different classes.
+
import collections
from nova import flags
from nova import log as logging
-from nova.scheduler import zone_aware_scheduler
+from nova.scheduler import abstract_scheduler
from nova import utils
from nova import exception
@@ -61,7 +64,7 @@ def compute_fill_first_cost_fn(host):
return free_mem
-class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler):
+class LeastCostScheduler(abstract_scheduler.AbstractScheduler):
def __init__(self, *args, **kwargs):
self.cost_fns_cache = {}
super(LeastCostScheduler, self).__init__(*args, **kwargs)
diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py
index d3f750bf9..1db0c8df2 100644
--- a/nova/tests/api/openstack/test_extensions.py
+++ b/nova/tests/api/openstack/test_extensions.py
@@ -84,6 +84,17 @@ class ExtensionControllerTest(test.TestCase):
super(ExtensionControllerTest, self).setUp()
ext_path = os.path.join(os.path.dirname(__file__), "extensions")
self.flags(osapi_extensions_path=ext_path)
+ self.ext_list = [
+ "FlavorExtraSpecs",
+ "Floating_ips",
+ "Fox In Socks",
+ "Hosts",
+ "Keypairs",
+ "Multinic",
+ "SecurityGroups",
+ "Volumes",
+ ]
+ self.ext_list.sort()
def test_list_extensions_json(self):
app = openstack.APIRouterV11()
@@ -96,9 +107,7 @@ class ExtensionControllerTest(test.TestCase):
data = json.loads(response.body)
names = [x['name'] for x in data['extensions']]
names.sort()
- self.assertEqual(names, ["FlavorExtraSpecs", "Floating_ips",
- "Fox In Socks", "Hosts", "Keypairs", "Multinic", "SecurityGroups",
- "Volumes"])
+ self.assertEqual(names, self.ext_list)
# Make sure that at least Fox in Sox is correct.
(fox_ext,) = [
@@ -143,7 +152,7 @@ class ExtensionControllerTest(test.TestCase):
# Make sure we have all the extensions.
exts = root.findall('{0}extension'.format(NS))
- self.assertEqual(len(exts), 8)
+ self.assertEqual(len(exts), len(self.ext_list))
# Make sure that at least Fox in Sox is correct.
(fox_ext,) = [x for x in exts if x.get('alias') == 'FOXNSOX']
diff --git a/nova/tests/scheduler/test_zone_aware_scheduler.py b/nova/tests/scheduler/test_abstract_scheduler.py
index 788efca52..f4f5cc233 100644
--- a/nova/tests/scheduler/test_zone_aware_scheduler.py
+++ b/nova/tests/scheduler/test_abstract_scheduler.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
"""
-Tests For Zone Aware Scheduler.
+Tests For Abstract Scheduler.
"""
import json
@@ -25,7 +25,7 @@ from nova import rpc
from nova import test
from nova.compute import api as compute_api
from nova.scheduler import driver
-from nova.scheduler import zone_aware_scheduler
+from nova.scheduler import abstract_scheduler
from nova.scheduler import zone_manager
@@ -60,7 +60,7 @@ def fake_zone_manager_service_states(num_hosts):
return states
-class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler):
+class FakeAbstractScheduler(abstract_scheduler.AbstractScheduler):
# No need to stub anything at the moment
pass
@@ -161,15 +161,15 @@ def fake_zone_get_all(context):
]
-class ZoneAwareSchedulerTestCase(test.TestCase):
- """Test case for Zone Aware Scheduler."""
+class AbstractSchedulerTestCase(test.TestCase):
+ """Test case for Abstract Scheduler."""
- def test_zone_aware_scheduler(self):
+ def test_abstract_scheduler(self):
"""
Create a nested set of FakeZones, try to build multiple instances
and ensure that a select call returns the appropriate build plan.
"""
- sched = FakeZoneAwareScheduler()
+ sched = FakeAbstractScheduler()
self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all)
@@ -194,7 +194,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
properly adjusted based on the scale/offset in the zone
db entries.
"""
- sched = FakeZoneAwareScheduler()
+ sched = FakeAbstractScheduler()
child_results = fake_call_zone_method(None, None, None, None)
zones = fake_zone_get_all(None)
sched._adjust_child_weights(child_results, zones)
@@ -209,11 +209,11 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
if zone == 'zone3': # Scale x1000
self.assertEqual(scaled.pop(0), w)
- def test_empty_zone_aware_scheduler(self):
+ def test_empty_abstract_scheduler(self):
"""
Ensure empty hosts & child_zones result in NoValidHosts exception.
"""
- sched = FakeZoneAwareScheduler()
+ sched = FakeAbstractScheduler()
self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method)
self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all)
@@ -231,7 +231,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
If the zone_blob hint was passed in, don't re-schedule.
"""
global was_called
- sched = FakeZoneAwareScheduler()
+ sched = FakeAbstractScheduler()
was_called = False
self.stubs.Set(sched, '_provision_resource', fake_provision_resource)
request_spec = {
@@ -248,7 +248,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
def test_provision_resource_local(self):
"""Provision a resource locally or remotely."""
global was_called
- sched = FakeZoneAwareScheduler()
+ sched = FakeAbstractScheduler()
was_called = False
self.stubs.Set(sched, '_provision_resource_locally',
fake_provision_resource_locally)
@@ -260,7 +260,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
def test_provision_resource_remote(self):
"""Provision a resource locally or remotely."""
global was_called
- sched = FakeZoneAwareScheduler()
+ sched = FakeAbstractScheduler()
was_called = False
self.stubs.Set(sched, '_provision_resource_from_blob',
fake_provision_resource_from_blob)
@@ -272,9 +272,9 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
def test_provision_resource_from_blob_empty(self):
"""Provision a resource locally or remotely given no hints."""
global was_called
- sched = FakeZoneAwareScheduler()
+ sched = FakeAbstractScheduler()
request_spec = {}
- self.assertRaises(zone_aware_scheduler.InvalidBlob,
+ self.assertRaises(abstract_scheduler.InvalidBlob,
sched._provision_resource_from_blob,
None, {}, 1, {}, {})
@@ -283,7 +283,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
Provision a resource locally or remotely when blob hint passed in.
"""
global was_called
- sched = FakeZoneAwareScheduler()
+ sched = FakeAbstractScheduler()
was_called = False
def fake_create_db_entry_for_new_instance(self, context,
@@ -317,7 +317,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
passed in.
"""
global was_called
- sched = FakeZoneAwareScheduler()
+ sched = FakeAbstractScheduler()
self.stubs.Set(sched, '_decrypt_blob',
fake_decrypt_blob_returns_child_info)
was_called = False
@@ -336,7 +336,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
from an immediate child.
"""
global was_called
- sched = FakeZoneAwareScheduler()
+ sched = FakeAbstractScheduler()
was_called = False
self.stubs.Set(sched, '_ask_child_zone_to_create_instance',
fake_ask_child_zone_to_create_instance)
@@ -350,14 +350,14 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
def test_decrypt_blob(self):
"""Test that the decrypt method works."""
- fixture = FakeZoneAwareScheduler()
+ fixture = FakeAbstractScheduler()
test_data = {"foo": "bar"}
class StubDecryptor(object):
def decryptor(self, key):
return lambda blob: blob
- self.stubs.Set(zone_aware_scheduler, 'crypto',
+ self.stubs.Set(abstract_scheduler, 'crypto',
StubDecryptor())
self.assertEqual(fixture._decrypt_blob(test_data),
diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py
index fbe6b2f77..de7581d0a 100644
--- a/nova/tests/scheduler/test_least_cost_scheduler.py
+++ b/nova/tests/scheduler/test_least_cost_scheduler.py
@@ -18,7 +18,7 @@ Tests For Least Cost Scheduler
from nova import test
from nova.scheduler import least_cost
-from nova.tests.scheduler import test_zone_aware_scheduler
+from nova.tests.scheduler import test_abstract_scheduler
MB = 1024 * 1024
@@ -70,7 +70,7 @@ class LeastCostSchedulerTestCase(test.TestCase):
zone_manager = FakeZoneManager()
- states = test_zone_aware_scheduler.fake_zone_manager_service_states(
+ states = test_abstract_scheduler.fake_zone_manager_service_states(
num_hosts=10)
zone_manager.service_states = states
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index 33461025f..158df2a27 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -257,7 +257,9 @@ class SimpleDriverTestCase(test.TestCase):
def _create_instance(self, **kwargs):
"""Create a test instance"""
inst = {}
- inst['image_id'] = 1
+ # NOTE(jk0): If an integer is passed as the image_ref, the image
+ # service will use the default image service (in this case, the fake).
+ inst['image_ref'] = '1'
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py
index ad678714e..bfc7a6d44 100644
--- a/nova/tests/test_metadata.py
+++ b/nova/tests/test_metadata.py
@@ -39,7 +39,7 @@ class MetadataTestCase(test.TestCase):
'key_name': None,
'host': 'test',
'launch_index': 1,
- 'instance_type': 'm1.tiny',
+ 'instance_type': {'name': 'm1.tiny'},
'reservation_id': 'r-xxxxxxxx',
'user_data': '',
'image_ref': 7,
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index 0d0f84e32..a6a1febd6 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -28,10 +28,10 @@ from nova import utils
def stubout_instance_snapshot(stubs):
@classmethod
- def fake_fetch_image(cls, context, session, instance_id, image, user,
+ def fake_fetch_image(cls, context, session, instance, image, user,
project, type):
from nova.virt.xenapi.fake import create_vdi
- name_label = "instance-%s" % instance_id
+ name_label = "instance-%s" % instance.id
#TODO: create fake SR record
sr_ref = "fakesr"
vdi_ref = create_vdi(name_label=name_label, read_only=False,
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index 1aa642e4e..7c91aa9b9 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -140,6 +140,7 @@ def create_vdi(name_label, read_only, sr_ref, sharable):
'location': '',
'xenstore_data': '',
'sm_config': {},
+ 'physical_utilisation': '123',
'VBDs': {}})
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index ba5cf4b49..4a1f07bb1 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -31,6 +31,7 @@ import uuid
from xml.dom import minidom
import glance.client
+from nova import db
from nova import exception
from nova import flags
import nova.image
@@ -413,7 +414,7 @@ class VMHelper(HelperBase):
return vdi_ref
@classmethod
- def fetch_image(cls, context, session, instance_id, image, user_id,
+ def fetch_image(cls, context, session, instance, image, user_id,
project_id, image_type):
"""Fetch image from glance based on image type.
@@ -422,18 +423,19 @@ class VMHelper(HelperBase):
"""
if image_type == ImageType.DISK_VHD:
return cls._fetch_image_glance_vhd(context,
- session, instance_id, image, image_type)
+ session, instance, image, image_type)
else:
return cls._fetch_image_glance_disk(context,
- session, instance_id, image, image_type)
+ session, instance, image, image_type)
@classmethod
- def _fetch_image_glance_vhd(cls, context, session, instance_id, image,
+ def _fetch_image_glance_vhd(cls, context, session, instance, image,
image_type):
"""Tell glance to download an image and put the VHDs into the SR
Returns: A list of dictionaries that describe VDIs
"""
+ instance_id = instance.id
LOG.debug(_("Asking xapi to fetch vhd image %(image)s")
% locals())
sr_ref = safe_find_sr(session)
@@ -467,17 +469,58 @@ class VMHelper(HelperBase):
cls.scan_sr(session, instance_id, sr_ref)
- # Pull out the UUID of the first VDI
- vdi_uuid = vdis[0]['vdi_uuid']
+ # Pull out the UUID of the first VDI (which is the os VDI)
+ os_vdi_uuid = vdis[0]['vdi_uuid']
+
# Set the name-label to ease debugging
- vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid)
+ vdi_ref = session.get_xenapi().VDI.get_by_uuid(os_vdi_uuid)
primary_name_label = get_name_label_for_image(image)
session.get_xenapi().VDI.set_name_label(vdi_ref, primary_name_label)
+ cls._check_vdi_size(context, session, instance, os_vdi_uuid)
return vdis
@classmethod
- def _fetch_image_glance_disk(cls, context, session, instance_id, image,
+ def _get_vdi_chain_size(cls, context, session, vdi_uuid):
+ """Compute the total size of a VDI chain, starting with the specified
+ VDI UUID.
+
+ This will walk the VDI chain to the root, add the size of each VDI into
+ the total.
+ """
+ size_bytes = 0
+ for vdi_rec in walk_vdi_chain(session, vdi_uuid):
+ cur_vdi_uuid = vdi_rec['uuid']
+ vdi_size_bytes = int(vdi_rec['physical_utilisation'])
+ LOG.debug(_('vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes='
+ '%(vdi_size_bytes)d' % locals()))
+ size_bytes += vdi_size_bytes
+ return size_bytes
+
+ @classmethod
+ def _check_vdi_size(cls, context, session, instance, vdi_uuid):
+ size_bytes = cls._get_vdi_chain_size(context, session, vdi_uuid)
+
+ # FIXME(jk0): this was copied directly from compute.manager.py, let's
+ # refactor this to a common area
+ instance_type_id = instance['instance_type_id']
+ instance_type = db.instance_type_get(context,
+ instance_type_id)
+ allowed_size_gb = instance_type['local_gb']
+ allowed_size_bytes = allowed_size_gb * 1024 * 1024 * 1024
+
+ LOG.debug(_("image_size_bytes=%(size_bytes)d, allowed_size_bytes="
+ "%(allowed_size_bytes)d") % locals())
+
+ if size_bytes > allowed_size_bytes:
+ LOG.info(_("Image size %(size_bytes)d exceeded"
+ " instance_type allowed size "
+ "%(allowed_size_bytes)d")
+ % locals())
+ raise exception.ImageTooLarge()
+
+ @classmethod
+ def _fetch_image_glance_disk(cls, context, session, instance, image,
image_type):
"""Fetch the image from Glance
@@ -489,6 +532,7 @@ class VMHelper(HelperBase):
Returns: A single filename if image_type is KERNEL_RAMDISK
A list of dictionaries that describe VDIs, otherwise
"""
+ instance_id = instance.id
# FIXME(sirp): Since the Glance plugin seems to be required for the
# VHD disk, it may be worth using the plugin for both VHD and RAW and
# DISK restores
@@ -807,6 +851,21 @@ def get_vhd_parent_uuid(session, vdi_ref):
return None
+def walk_vdi_chain(session, vdi_uuid):
+ """Yield vdi_recs for each element in a VDI chain"""
+ # TODO(jk0): perhaps make get_vhd_parent use this
+ while True:
+ vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid)
+ vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
+ yield vdi_rec
+
+ parent_uuid = vdi_rec['sm_config'].get('vhd-parent')
+ if parent_uuid:
+ vdi_uuid = parent_uuid
+ else:
+ break
+
+
def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
original_parent_uuid):
""" Spin until the parent VHD is coalesced into its parent VHD
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 1fefd1291..eb0a846b5 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -137,7 +137,7 @@ class VMOps(object):
def _create_disks(self, context, instance):
disk_image_type = VMHelper.determine_disk_image_type(instance)
vdis = VMHelper.fetch_image(context, self._session,
- instance.id, instance.image_ref,
+ instance, instance.image_ref,
instance.user_id, instance.project_id,
disk_image_type)
return vdis
@@ -182,11 +182,11 @@ class VMOps(object):
try:
if instance.kernel_id:
kernel = VMHelper.fetch_image(context, self._session,
- instance.id, instance.kernel_id, instance.user_id,
+ instance, instance.kernel_id, instance.user_id,
instance.project_id, ImageType.KERNEL)[0]
if instance.ramdisk_id:
ramdisk = VMHelper.fetch_image(context, self._session,
- instance.id, instance.ramdisk_id, instance.user_id,
+ instance, instance.ramdisk_id, instance.user_id,
instance.project_id, ImageType.RAMDISK)[0]
# Create the VM ref and attach the first disk
first_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid',
diff --git a/smoketests/test_netadmin.py b/smoketests/test_netadmin.py
index 8c8fa35b8..ef73e6f4c 100644
--- a/smoketests/test_netadmin.py
+++ b/smoketests/test_netadmin.py
@@ -107,14 +107,18 @@ class AddressTests(base.UserSmokeTestCase):
class SecurityGroupTests(base.UserSmokeTestCase):
- def __public_instance_is_accessible(self):
- id_url = "latest/meta-data/instance-id"
+ def __get_metadata_item(self, category):
+ id_url = "latest/meta-data/%s" % category
options = "-f -s --max-time 1"
command = "curl %s %s/%s" % (options, self.data['public_ip'], id_url)
status, output = commands.getstatusoutput(command)
- instance_id = output.strip()
+ value = output.strip()
if status > 0:
return False
+ return value
+
+ def __public_instance_is_accessible(self):
+ instance_id = self.__get_metadata_item('instance-id')
if not instance_id:
return False
if instance_id != self.data['instance'].id:
@@ -166,7 +170,14 @@ class SecurityGroupTests(base.UserSmokeTestCase):
finally:
result = self.conn.disassociate_address(self.data['public_ip'])
- def test_005_can_revoke_security_group_ingress(self):
+ def test_005_validate_metadata(self):
+
+ instance = self.data['instance']
+ self.assertTrue(instance.instance_type,
+ self.__get_metadata_item("instance-type"))
+ #FIXME(dprince): validate more metadata here
+
+ def test_006_can_revoke_security_group_ingress(self):
self.assertTrue(self.conn.revoke_security_group(TEST_GROUP,
ip_protocol='tcp',
from_port=80,