summaryrefslogtreecommitdiffstats
path: root/nova/tests
diff options
context:
space:
mode:
authorBrian Elliott <brian.elliott@rackspace.com>2012-06-21 04:25:24 +0000
committerBrian Elliott <brian.elliott@rackspace.com>2012-08-20 15:46:53 +0000
commitc7d812a35bf4ef42907366c3f674fd623cd46905 (patch)
treecc0cb8ac745d1fae51dafe7c24f499d5b0b27ea8 /nova/tests
parent740e93aae891d6c20f38b091ad9f54d71db0d7f7 (diff)
Keep the ComputeNode model updated with usage
Keep the compute host's ComputeNode model in sync with the level of resource usage. This enables the ComputeNode model to be used as a basis for scheduling decisions rather than forcing scheduler to calculate free resources from an instance_get_all on each request. Resources like memory and disk are claimed as instances are built or deleted. There is also support for configurable compute node stats (a generic key/value store) for extensible advertising of other usage stats that may be useful for a particular scheduler implementation. Additionally, there is a periodic task on the compute host that audits actual resource consumption at the virt layer to ensure that the database stays in sync. This change partially implements blueprint: scheduler-resource-race This patch complements: https://review.openstack.org/#/c/9540/ (build re-scheduling support) Change-Id: Ibbe3839a054f8b80664b413d47f766ca8d68e3f2
Diffstat (limited to 'nova/tests')
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_hosts.py2
-rw-r--r--nova/tests/compute/fake_resource_tracker.py35
-rw-r--r--nova/tests/compute/test_compute.py91
-rw-r--r--nova/tests/compute/test_resource_tracker.py492
-rw-r--r--nova/tests/compute/test_stats.py182
-rw-r--r--nova/tests/scheduler/fakes.py7
-rw-r--r--nova/tests/scheduler/test_filter_scheduler.py3
-rw-r--r--nova/tests/scheduler/test_host_filters.py18
-rw-r--r--nova/tests/scheduler/test_host_manager.py4
-rw-r--r--nova/tests/test_db_api.py134
-rw-r--r--nova/tests/test_xenapi.py2
11 files changed, 896 insertions, 74 deletions
diff --git a/nova/tests/api/openstack/compute/contrib/test_hosts.py b/nova/tests/api/openstack/compute/contrib/test_hosts.py
index ea23cd5c2..570b13473 100644
--- a/nova/tests/api/openstack/compute/contrib/test_hosts.py
+++ b/nova/tests/api/openstack/compute/contrib/test_hosts.py
@@ -214,7 +214,7 @@ class HostTestCase(test.TestCase):
'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,
'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10,
'hypervisor_type': 'qemu', 'hypervisor_version': 12003,
- 'cpu_info': ''}
+ 'cpu_info': '', 'stats': {}}
db.compute_node_create(ctxt, dic)
return db.service_get(ctxt, s_ref['id'])
diff --git a/nova/tests/compute/fake_resource_tracker.py b/nova/tests/compute/fake_resource_tracker.py
new file mode 100644
index 000000000..faba13bab
--- /dev/null
+++ b/nova/tests/compute/fake_resource_tracker.py
@@ -0,0 +1,35 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.compute import resource_tracker
+
+
+class FakeResourceTracker(resource_tracker.ResourceTracker):
+ """Version without a DB requirement"""
+
+ def _create(self, context, values):
+ return values
+
+ def _update(self, context, values, prune_stats=False):
+ self.compute_node.update(values)
+ return self.compute_node
+
+ def _get_service(self, context):
+ return {
+ "id": 1,
+ "compute_node": None
+ }
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index b6d775bd2..dc2d43394 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -53,6 +53,7 @@ import nova.policy
from nova import quota
from nova.scheduler import driver as scheduler_driver
from nova import test
+from nova.tests.compute import fake_resource_tracker
from nova.tests.db.fakes import FakeModel
from nova.tests import fake_network
from nova.tests.image import fake as fake_image
@@ -92,6 +93,13 @@ class BaseTestCase(test.TestCase):
network_manager='nova.network.manager.FlatManager')
self.compute = importutils.import_object(FLAGS.compute_manager)
+ # override tracker with a version that doesn't need the database:
+ self.compute.resource_tracker = \
+ fake_resource_tracker.FakeResourceTracker(self.compute.host,
+ self.compute.driver)
+ self.compute.update_available_resource(
+ context.get_admin_context())
+
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
@@ -140,6 +148,7 @@ class BaseTestCase(test.TestCase):
inst['root_gb'] = 0
inst['ephemeral_gb'] = 0
inst['architecture'] = 'x86_64'
+ inst['os_type'] = 'Linux'
inst.update(params)
return db.instance_create(self.context, inst)
@@ -261,6 +270,87 @@ class ComputeTestCase(BaseTestCase):
finally:
db.instance_destroy(self.context, instance['uuid'])
+ def test_create_instance_insufficient_memory(self):
+ params = {"memory_mb": 999999999999}
+ instance = self._create_fake_instance(params)
+ self.assertRaises(exception.ComputeResourcesUnavailable,
+ self.compute.run_instance, self.context, instance=instance)
+
+ def test_create_instance_insufficient_disk(self):
+ params = {"root_gb": 999999999999,
+ "ephemeral_gb": 99999999999}
+ instance = self._create_fake_instance(params)
+ self.assertRaises(exception.ComputeResourcesUnavailable,
+ self.compute.run_instance, self.context, instance=instance)
+
+ def test_create_multiple_instances_then_starve(self):
+ params = {"memory_mb": 1024, "root_gb": 128, "ephemeral_gb": 128}
+ instance = self._create_fake_instance(params)
+ self.compute.run_instance(self.context, instance=instance)
+ self.assertEquals(1024,
+ self.compute.resource_tracker.compute_node['memory_mb_used'])
+ self.assertEquals(256,
+ self.compute.resource_tracker.compute_node['local_gb_used'])
+
+ params = {"memory_mb": 2048, "root_gb": 256, "ephemeral_gb": 256}
+ instance = self._create_fake_instance(params)
+ self.compute.run_instance(self.context, instance=instance)
+ self.assertEquals(3072,
+ self.compute.resource_tracker.compute_node['memory_mb_used'])
+ self.assertEquals(768,
+ self.compute.resource_tracker.compute_node['local_gb_used'])
+
+ params = {"memory_mb": 8192, "root_gb": 8192, "ephemeral_gb": 8192}
+ instance = self._create_fake_instance(params)
+ self.assertRaises(exception.ComputeResourcesUnavailable,
+ self.compute.run_instance, self.context, instance=instance)
+
+ def test_create_instance_with_oversubscribed_ram(self):
+ """Test passing of oversubscribed ram policy from the scheduler."""
+
+ # get total memory as reported by virt driver:
+ resources = self.compute.driver.get_available_resource()
+ total_mem_mb = resources['memory_mb']
+
+ oversub_limit_mb = total_mem_mb * 1.5
+ instance_mb = int(total_mem_mb * 1.45)
+
+ # build an instance, specifying an amount of memory that exceeds
+ # total_mem_mb, but is less than the oversubscribed limit:
+ params = {"memory_mb": instance_mb, "root_gb": 128,
+ "ephemeral_gb": 128}
+ instance = self._create_fake_instance(params)
+
+ filter_properties = dict(memory_mb_limit=oversub_limit_mb)
+ self.compute.run_instance(self.context, instance=instance,
+ filter_properties=filter_properties)
+
+ self.assertEqual(instance_mb,
+ self.compute.resource_tracker.compute_node['memory_mb_used'])
+
+ def test_create_instance_with_oversubscribed_ram_fail(self):
+ """Test passing of oversubscribed ram policy from the scheduler, but
+ with insufficient memory.
+ """
+ # get total memory as reported by virt driver:
+ resources = self.compute.driver.get_available_resource()
+ total_mem_mb = resources['memory_mb']
+
+ oversub_limit_mb = total_mem_mb * 1.5
+ instance_mb = int(total_mem_mb * 1.55)
+
+ # build an instance, specifying an amount of memory that exceeds
+ # total_mem_mb, but is less than the oversubscribed limit:
+ params = {"memory_mb": instance_mb, "root_gb": 128,
+ "ephemeral_gb": 128}
+ instance = self._create_fake_instance(params)
+
+ filter_properties = dict(memory_mb_limit=oversub_limit_mb)
+
+ self.assertRaises(exception.ComputeResourcesUnavailable,
+ self.compute.run_instance, self.context, instance=instance,
+ filter_properties=filter_properties)
+
def test_default_access_ip(self):
self.flags(default_access_ip_network_name='test1', stub_network=False)
instance = jsonutils.to_primitive(self._create_fake_instance())
@@ -3986,6 +4076,7 @@ class ComputeAPITestCase(BaseTestCase):
instance=jsonutils.to_primitive(instance))
instance = self.compute_api.get(self.context, instance['uuid'])
security_group_name = self._create_group()['name']
+
self.security_group_api.add_to_instance(self.context,
instance,
security_group_name)
diff --git a/nova/tests/compute/test_resource_tracker.py b/nova/tests/compute/test_resource_tracker.py
new file mode 100644
index 000000000..cba2a6c17
--- /dev/null
+++ b/nova/tests/compute/test_resource_tracker.py
@@ -0,0 +1,492 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for compute resource tracking"""
+
+import copy
+
+from nova.compute import resource_tracker
+from nova.compute import task_states
+from nova.compute import vm_states
+from nova import db
+from nova import exception
+from nova.openstack.common import timeutils
+from nova import test
+from nova.virt import driver
+
+
+class FakeContext(object):
+ def __init__(self, is_admin=False):
+ self.is_admin = is_admin
+
+ def elevated(self):
+ return FakeContext(is_admin=True)
+
+
+class UnsupportedVirtDriver(driver.ComputeDriver):
+ """Pretend version of a lame virt driver"""
+ def get_available_resource(self):
+ # no support for getting resource usage info
+ return {}
+
+
+class FakeVirtDriver(driver.ComputeDriver):
+
+ def __init__(self):
+ self.memory_mb = 5
+ self.local_gb = 6
+ self.vcpus = 1
+
+ self.memory_mb_used = 0
+ self.local_gb_used = 0
+
+ def get_available_resource(self):
+ d = {
+ 'vcpus': self.vcpus,
+ 'memory_mb': self.memory_mb,
+ 'local_gb': self.local_gb,
+ 'vcpus_used': 0,
+ 'memory_mb_used': self.memory_mb_used,
+ 'local_gb_used': self.local_gb_used,
+ 'hypervisor_type': 'fake',
+ 'hypervisor_version': 0,
+ 'hypervisor_hostname': 'fakehost',
+ 'cpu_info': '',
+ }
+ return d
+
+
+class BaseTestCase(test.TestCase):
+
+ def setUp(self):
+ super(BaseTestCase, self).setUp()
+
+ self.context = FakeContext()
+
+ self.instance_ref = {
+ "memory_mb": 1,
+ "root_gb": 1,
+ "ephemeral_gb": 1,
+ "vm_state": vm_states.BUILDING,
+ "task_state": None,
+ "os_type": "Linux",
+ "project_id": "1234",
+ "vcpus": 1,
+ }
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ self._fake_instance_get_all_by_filters)
+
+ def _create_compute_node(self, values=None):
+ compute = {
+ "id": 1,
+ "service_id": 1,
+ "vcpus": 1,
+ "memory_mb": 1,
+ "local_gb": 1,
+ "vcpus_used": 1,
+ "memory_mb_used": 1,
+ "local_gb_used": 1,
+ "free_ram_mb": 1,
+ "free_disk_gb": 1,
+ "current_workload": 1,
+ "running_vms": 0,
+ "cpu_info": None,
+ "stats": [{"key": "num_instances", "value": "1"}]
+ }
+ if values:
+ compute.update(values)
+ return compute
+
+ def _create_service(self, host="fakehost", compute=None):
+ if compute:
+ compute = [compute]
+
+ service = {
+ "id": 1,
+ "host": host,
+ "binary": "nova-compute",
+ "topic": "compute",
+ "compute_node": compute,
+ }
+ return service
+
+ def _fake_instance_get_all_by_filters(self, ctx, filters, **kwargs):
+ return []
+
+ def _tracker(self, unsupported=False):
+ host = "fakehost"
+
+ if unsupported:
+ driver = UnsupportedVirtDriver()
+ else:
+ driver = FakeVirtDriver()
+
+ tracker = resource_tracker.ResourceTracker(host, driver)
+ return tracker
+
+
+class UnsupportedDriverTestCase(BaseTestCase):
+ """Resource tracking should be disabled when the virt driver doesn't
+ support it.
+ """
+ def setUp(self):
+ super(UnsupportedDriverTestCase, self).setUp()
+ self.tracker = self._tracker(unsupported=True)
+ # seed tracker with data:
+ self.tracker.update_available_resource(self.context)
+
+ def testDisabled(self):
+ # disabled = no compute node stats
+ self.assertTrue(self.tracker.disabled)
+ self.assertEqual(None, self.tracker.compute_node)
+
+ def testDisabledClaim(self):
+ # basic claim:
+ claim = self.tracker.begin_resource_claim(self.context, 1, 1)
+ self.assertEqual(None, claim)
+
+ def testDisabledContextClaim(self):
+ # basic context manager variation:
+ with self.tracker.resource_claim(self.context, 1, 1):
+ pass
+ self.assertEqual(0, len(self.tracker.claims))
+
+ def testDisabledInstanceClaim(self):
+ # instance variation:
+ claim = self.tracker.begin_instance_resource_claim(self.context,
+ self.instance_ref)
+ self.assertEqual(None, claim)
+
+ def testDisabledInstanceContextClaim(self):
+ # instance context manager variation:
+ with self.tracker.instance_resource_claim(self.context,
+ self.instance_ref):
+ pass
+ self.assertEqual(0, len(self.tracker.claims))
+
+ def testDisabledFinishClaim(self):
+ self.assertEqual(None, self.tracker.finish_resource_claim(None))
+
+ def testDisabledAbortClaim(self):
+ self.assertEqual(None, self.tracker.abort_resource_claim(self.context,
+ None))
+
+ def testDisabledFreeResources(self):
+ self.tracker.free_resources(self.context)
+ self.assertTrue(self.tracker.disabled)
+ self.assertEqual(None, self.tracker.compute_node)
+
+
+class MissingServiceTestCase(BaseTestCase):
+ def setUp(self):
+ super(MissingServiceTestCase, self).setUp()
+ self.context = FakeContext(is_admin=True)
+ self.tracker = self._tracker()
+
+ def testMissingService(self):
+ """No service record in DB."""
+ self.tracker.update_available_resource(self.context)
+ self.assertTrue(self.tracker.disabled)
+
+
+class MissingComputeNodeTestCase(BaseTestCase):
+ def setUp(self):
+ super(MissingComputeNodeTestCase, self).setUp()
+ self.tracker = self._tracker()
+
+ self.stubs.Set(db, 'service_get_all_compute_by_host',
+ self._fake_service_get_all_compute_by_host)
+ self.stubs.Set(db, 'compute_node_create',
+ self._fake_create_compute_node)
+
+ def _fake_create_compute_node(self, context, values):
+ self.created = True
+ return self._create_compute_node()
+
+ def _fake_service_get_all_compute_by_host(self, ctx, host):
+ # return a service with no joined compute
+ service = self._create_service()
+ return [service]
+
+ def testCreatedComputeNode(self):
+ self.tracker.update_available_resource(self.context)
+ self.assertTrue(self.created)
+
+ def testEnabled(self):
+ self.tracker.update_available_resource(self.context)
+ self.assertFalse(self.tracker.disabled)
+
+
+class ResourceTestCase(BaseTestCase):
+ def setUp(self):
+ super(ResourceTestCase, self).setUp()
+ self.tracker = self._tracker()
+ self.stubs.Set(db, 'service_get_all_compute_by_host',
+ self._fake_service_get_all_compute_by_host)
+ self.stubs.Set(db, 'compute_node_update',
+ self._fake_compute_node_update)
+
+ self.tracker.update_available_resource(self.context)
+
+ def _fake_service_get_all_compute_by_host(self, ctx, host):
+ self.compute = self._create_compute_node()
+ self.service = self._create_service(host, compute=self.compute)
+ return [self.service]
+
+ def _fake_compute_node_update(self, ctx, compute_node_id, values,
+ prune_stats=False):
+ self.updated = True
+ values['stats'] = [{"key": "num_instances", "value": "1"}]
+ self.compute.update(values)
+ return self.compute
+
+ def testFreeRamResourceValue(self):
+ driver = FakeVirtDriver()
+ mem_free = driver.memory_mb - driver.memory_mb_used
+ self.assertEqual(mem_free, self.tracker.compute_node['free_ram_mb'])
+
+ def testFreeDiskResourceValue(self):
+ driver = FakeVirtDriver()
+ mem_free = driver.local_gb - driver.local_gb_used
+ self.assertEqual(mem_free, self.tracker.compute_node['free_disk_gb'])
+
+ def testUpdateComputeNode(self):
+ self.assertFalse(self.tracker.disabled)
+ self.assertTrue(self.updated)
+
+ def testInsufficientMemoryClaim(self):
+ """Exceed memory limit of 5MB"""
+ claim = self.tracker.begin_resource_claim(self.context, memory_mb=2,
+ disk_gb=0)
+ self.assertNotEqual(None, claim)
+
+ claim = self.tracker.begin_resource_claim(self.context, memory_mb=3,
+ disk_gb=0)
+ self.assertNotEqual(None, claim)
+
+ claim = self.tracker.begin_resource_claim(self.context, memory_mb=1,
+ disk_gb=0)
+ self.assertEqual(None, claim)
+
+ def testInsufficientMemoryClaimWithOversubscription(self):
+ """Exceed oversubscribed memory limit of 10MB"""
+ claim = self.tracker.begin_resource_claim(self.context, memory_mb=10,
+ disk_gb=0, memory_mb_limit=10)
+ self.assertNotEqual(None, claim)
+
+ claim = self.tracker.begin_resource_claim(self.context, memory_mb=1,
+ disk_gb=0, memory_mb_limit=10)
+ self.assertEqual(None, claim)
+
+ def testInsufficientDiskClaim(self):
+ """Exceed disk limit of 5GB"""
+ claim = self.tracker.begin_resource_claim(self.context, memory_mb=0,
+ disk_gb=2)
+ self.assertNotEqual(None, claim)
+
+ claim = self.tracker.begin_resource_claim(self.context, memory_mb=0,
+ disk_gb=3)
+ self.assertNotEqual(None, claim)
+
+ claim = self.tracker.begin_resource_claim(self.context, memory_mb=0,
+ disk_gb=5)
+ self.assertEqual(None, claim)
+
+ def testClaimAndFinish(self):
+ self.assertEqual(5, self.tracker.compute_node['memory_mb'])
+ self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
+
+ self.assertEqual(6, self.tracker.compute_node['local_gb'])
+ self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
+
+ claim_mem = 3
+ claim_disk = 2
+ claim = self.tracker.begin_resource_claim(self.context, claim_mem,
+ claim_disk)
+
+ self.assertEqual(5, self.compute["memory_mb"])
+ self.assertEqual(claim_mem, self.compute["memory_mb_used"])
+ self.assertEqual(5 - claim_mem, self.compute["free_ram_mb"])
+
+ self.assertEqual(6, self.compute["local_gb"])
+ self.assertEqual(claim_disk, self.compute["local_gb_used"])
+ self.assertEqual(6 - claim_disk, self.compute["free_disk_gb"])
+
+ # 1st pretend that the compute operation finished and claimed the
+ # desired resources from the virt layer
+ driver = self.tracker.driver
+ driver.memory_mb_used = claim_mem
+ driver.local_gb_used = claim_disk
+
+ # 2nd update compute node from the virt layer. because the claim is
+ # in-progress (unfinished), the audit will actually mark the resources
+ # as unsubscribed:
+ self.tracker.update_available_resource(self.context)
+
+ self.assertEqual(2 * claim_mem,
+ self.compute['memory_mb_used'])
+ self.assertEqual(5 - (2 * claim_mem),
+ self.compute['free_ram_mb'])
+
+ self.assertEqual(2 * claim_disk,
+ self.compute['local_gb_used'])
+ self.assertEqual(6 - (2 * claim_disk),
+ self.compute['free_disk_gb'])
+
+ # Finally, finish the claimm and update from the virt layer again.
+ # Resource usage will be consistent again:
+ self.tracker.finish_resource_claim(claim)
+ self.tracker.update_available_resource(self.context)
+
+ self.assertEqual(claim_mem,
+ self.compute['memory_mb_used'])
+ self.assertEqual(5 - claim_mem,
+ self.compute['free_ram_mb'])
+
+ self.assertEqual(claim_disk,
+ self.compute['local_gb_used'])
+ self.assertEqual(6 - claim_disk,
+ self.compute['free_disk_gb'])
+
+ def testClaimAndAbort(self):
+ self.assertEqual(5, self.tracker.compute_node['memory_mb'])
+ self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
+
+ self.assertEqual(6, self.tracker.compute_node['local_gb'])
+ self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
+
+ claim_mem = 3
+ claim_disk = 2
+ claim = self.tracker.begin_resource_claim(self.context, claim_mem,
+ claim_disk)
+
+ self.assertEqual(5, self.compute["memory_mb"])
+ self.assertEqual(claim_mem, self.compute["memory_mb_used"])
+ self.assertEqual(5 - claim_mem, self.compute["free_ram_mb"])
+
+ self.assertEqual(6, self.compute["local_gb"])
+ self.assertEqual(claim_disk, self.compute["local_gb_used"])
+ self.assertEqual(6 - claim_disk, self.compute["free_disk_gb"])
+
+ self.tracker.abort_resource_claim(self.context, claim)
+
+ self.assertEqual(5, self.compute["memory_mb"])
+ self.assertEqual(0, self.compute["memory_mb_used"])
+ self.assertEqual(5, self.compute["free_ram_mb"])
+
+ self.assertEqual(6, self.compute["local_gb"])
+ self.assertEqual(0, self.compute["local_gb_used"])
+ self.assertEqual(6, self.compute["free_disk_gb"])
+
+ def testExpiredClaims(self):
+ """Test that old claims get cleaned up automatically if not finished
+ or aborted explicitly.
+ """
+ claim = self.tracker.begin_resource_claim(self.context, memory_mb=2,
+ disk_gb=2)
+ claim.expire_ts = timeutils.utcnow_ts() - 1
+ self.assertTrue(claim.is_expired())
+
+ # and an unexpired claim
+ claim2 = self.tracker.begin_resource_claim(self.context, memory_mb=1,
+ disk_gb=1)
+
+ self.assertEqual(2, len(self.tracker.claims))
+ self.assertEqual(2 + 1, self.tracker.compute_node['memory_mb_used'])
+ self.assertEqual(2 + 1, self.tracker.compute_node['local_gb_used'])
+
+ # expired claims get expunged when audit runs:
+ self.tracker.update_available_resource(self.context)
+
+ self.assertEqual(1, len(self.tracker.claims))
+ self.assertEqual(1, self.tracker.compute_node['memory_mb_used'])
+ self.assertEqual(1, self.tracker.compute_node['local_gb_used'])
+
+ # and just call finish & abort to ensure expired claims do not cause
+ # any other explosions:
+ self.tracker.abort_resource_claim(self.context, claim)
+ self.tracker.finish_resource_claim(claim)
+
+ def testInstanceClaim(self):
+ self.tracker.begin_instance_resource_claim(self.context,
+ self.instance_ref)
+ self.assertEqual(1, self.tracker.compute_node['memory_mb_used'])
+ self.assertEqual(2, self.tracker.compute_node['local_gb_used'])
+
+ def testContextClaim(self):
+ with self.tracker.resource_claim(self.context, memory_mb=1, disk_gb=1):
+ # <insert exciting things that utilize resources>
+ self.assertEqual(1, self.tracker.compute_node['memory_mb_used'])
+ self.assertEqual(1, self.tracker.compute_node['local_gb_used'])
+ self.assertEqual(1, self.compute['memory_mb_used'])
+ self.assertEqual(1, self.compute['local_gb_used'])
+
+ self.tracker.update_available_resource(self.context)
+ self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
+ self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
+ self.assertEqual(0, self.compute['memory_mb_used'])
+ self.assertEqual(0, self.compute['local_gb_used'])
+
+ def testContextClaimWithException(self):
+ try:
+ with self.tracker.resource_claim(self.context, memory_mb=1,
+ disk_gb=1):
+ # <insert exciting things that utilize resources>
+ raise Exception("THE SKY IS FALLING")
+ except Exception:
+ pass
+
+ self.tracker.update_available_resource(self.context)
+ self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
+ self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
+ self.assertEqual(0, self.compute['memory_mb_used'])
+ self.assertEqual(0, self.compute['local_gb_used'])
+
+ def testInstanceContextClaim(self):
+ with self.tracker.instance_resource_claim(self.context,
+ self.instance_ref):
+ # <insert exciting things that utilize resources>
+ self.assertEqual(1, self.tracker.compute_node['memory_mb_used'])
+ self.assertEqual(2, self.tracker.compute_node['local_gb_used'])
+ self.assertEqual(1, self.compute['memory_mb_used'])
+ self.assertEqual(2, self.compute['local_gb_used'])
+
+ self.tracker.update_available_resource(self.context)
+ self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
+ self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
+ self.assertEqual(0, self.compute['memory_mb_used'])
+ self.assertEqual(0, self.compute['local_gb_used'])
+
+ def testUpdateLoadStatsForInstance(self):
+ self.assertFalse(self.tracker.disabled)
+ self.assertEqual(0, self.tracker.compute_node['current_workload'])
+
+ old_ref = self.instance_ref
+ old_ref['task_state'] = task_states.SCHEDULING
+ with self.tracker.instance_resource_claim(self.context, old_ref):
+ pass
+
+ self.assertEqual(1, self.tracker.compute_node['current_workload'])
+
+ new_ref = copy.copy(old_ref)
+ new_ref['vm_state'] = vm_states.ACTIVE
+ new_ref['task_state'] = None
+
+ self.tracker.update_load_stats_for_instance(self.context, old_ref,
+ new_ref)
+ self.assertEqual(0, self.tracker.compute_node['current_workload'])
diff --git a/nova/tests/compute/test_stats.py b/nova/tests/compute/test_stats.py
new file mode 100644
index 000000000..0e4ac666d
--- /dev/null
+++ b/nova/tests/compute/test_stats.py
@@ -0,0 +1,182 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for compute node stats"""
+
+from nova.compute import stats
+from nova.compute import task_states
+from nova.compute import vm_states
+from nova import db
+from nova import exception
+from nova import test
+
+
+class StatsTestCase(test.TestCase):
+ def setUp(self):
+ super(StatsTestCase, self).setUp()
+ self.stats = stats.Stats()
+
+ def _create_instance(self, values=None):
+ instance = {
+ "os_type": "Linux",
+ "project_id": "1234",
+ "task_state": None,
+ "vm_state": vm_states.BUILDING,
+ "vcpus": 1,
+ }
+ if values:
+ instance.update(values)
+ return instance
+
+ def testOsTypeCount(self):
+ os_type = "Linux"
+ self.assertEqual(0, self.stats.num_os_type(os_type))
+ self.stats._increment("num_os_type_" + os_type)
+ self.stats._increment("num_os_type_" + os_type)
+ self.stats._increment("num_os_type_Vax")
+ self.assertEqual(2, self.stats.num_os_type(os_type))
+ self.stats["num_os_type_" + os_type] -= 1
+ self.assertEqual(1, self.stats.num_os_type(os_type))
+
+ def testUpdateProjectCount(self):
+ proj_id = "1234"
+
+ def _get():
+ return self.stats.num_instances_for_project(proj_id)
+
+ self.assertEqual(0, _get())
+ self.stats._increment("num_proj_" + proj_id)
+ self.assertEqual(1, _get())
+ self.stats["num_proj_" + proj_id] -= 1
+ self.assertEqual(0, _get())
+
+ def testInstanceCount(self):
+ self.assertEqual(0, self.stats.num_instances)
+ for i in range(5):
+ self.stats._increment("num_instances")
+ self.stats["num_instances"] -= 1
+ self.assertEqual(4, self.stats.num_instances)
+
+ def testAddStatsForInstance(self):
+ instance = {
+ "os_type": "Linux",
+ "project_id": "1234",
+ "task_state": None,
+ "vm_state": vm_states.BUILDING,
+ "vcpus": 3,
+ }
+ self.stats.add_stats_for_instance(instance)
+
+ instance = {
+ "os_type": "FreeBSD",
+ "project_id": "1234",
+ "task_state": task_states.SCHEDULING,
+ "vm_state": None,
+ "vcpus": 1,
+ }
+ self.stats.add_stats_for_instance(instance)
+
+ instance = {
+ "os_type": "Linux",
+ "project_id": "2345",
+ "task_state": task_states.SCHEDULING,
+ "vm_state": vm_states.BUILDING,
+ "vcpus": 2,
+ }
+ self.stats.add_stats_for_instance(instance)
+
+ self.assertEqual(2, self.stats.num_os_type("Linux"))
+ self.assertEqual(1, self.stats.num_os_type("FreeBSD"))
+
+ self.assertEquals(2, self.stats.num_instances_for_project("1234"))
+ self.assertEquals(1, self.stats.num_instances_for_project("2345"))
+
+ self.assertEqual(1, self.stats["num_task_None"])
+ self.assertEqual(2, self.stats["num_task_" + task_states.SCHEDULING])
+
+ self.assertEqual(1, self.stats["num_vm_None"])
+ self.assertEqual(2, self.stats["num_vm_" + vm_states.BUILDING])
+
+ self.assertEqual(6, self.stats.num_vcpus_used)
+
+ def testCalculateWorkload(self):
+ self.stats._increment("num_task_None")
+ self.stats._increment("num_task_" + task_states.SCHEDULING)
+ self.stats._increment("num_task_" + task_states.SCHEDULING)
+ self.assertEqual(2, self.stats.calculate_workload())
+
+ def testUpdateStatsForInstanceNoChange(self):
+ old = self._create_instance()
+ self.stats.add_stats_for_instance(old)
+
+ self.stats.update_stats_for_instance(old, old) # no change
+ self.assertEqual(1, self.stats.num_instances)
+ self.assertEqual(1, self.stats.num_instances_for_project("1234"))
+ self.assertEqual(1, self.stats["num_os_type_Linux"])
+ self.assertEqual(1, self.stats["num_task_None"])
+ self.assertEqual(1, self.stats["num_vm_" + vm_states.BUILDING])
+
+ def testUpdateStatsForInstanceVmChange(self):
+ old = self._create_instance()
+ self.stats.add_stats_for_instance(old)
+
+ new = self._create_instance({"vm_state": vm_states.PAUSED})
+ self.stats.update_stats_for_instance(old, new)
+ self.assertEqual(1, self.stats.num_instances)
+ self.assertEqual(1, self.stats.num_instances_for_project(1234))
+ self.assertEqual(1, self.stats.num_os_type("Linux"))
+ self.assertEqual(0, self.stats.num_vm_state(vm_states.BUILDING))
+ self.assertEqual(1, self.stats.num_vm_state(vm_states.PAUSED))
+
+ def testUpdateStatsForInstanceVmChange(self):
+ old = self._create_instance()
+ self.stats.add_stats_for_instance(old)
+
+ new = self._create_instance({"task_state": task_states.REBUILDING})
+ self.stats.update_stats_for_instance(old, new)
+ self.assertEqual(1, self.stats.num_instances)
+ self.assertEqual(1, self.stats.num_instances_for_project("1234"))
+ self.assertEqual(1, self.stats["num_os_type_Linux"])
+ self.assertEqual(0, self.stats["num_task_None"])
+ self.assertEqual(1, self.stats["num_task_" + task_states.REBUILDING])
+
+ def testUpdateStatsForInstanceDeleted(self):
+ old = self._create_instance()
+ self.stats.add_stats_for_instance(old)
+ self.assertEqual(1, self.stats["num_proj_1234"])
+
+ new = self._create_instance({"vm_state": vm_states.DELETED})
+ self.stats.update_stats_for_instance(old, new)
+
+ self.assertEqual(0, self.stats.num_instances)
+ self.assertEqual(0, self.stats.num_instances_for_project("1234"))
+ self.assertEqual(0, self.stats.num_os_type("Linux"))
+ self.assertEqual(0, self.stats["num_vm_" + vm_states.BUILDING])
+ self.assertEqual(0, self.stats.num_vcpus_used)
+
+ def testIoWorkload(self):
+ vms = [vm_states.ACTIVE, vm_states.BUILDING, vm_states.PAUSED]
+ tasks = [task_states.RESIZE_MIGRATING, task_states.REBUILDING,
+ task_states.RESIZE_PREP, task_states.IMAGE_SNAPSHOT,
+ task_states.IMAGE_BACKUP, task_states.RESCUING]
+
+ for state in vms:
+ self.stats._increment("num_vm_" + state)
+ for state in tasks:
+ self.stats._increment("num_task_" + state)
+
+ self.assertEqual(6, self.stats.calculate_io_workload())
diff --git a/nova/tests/scheduler/fakes.py b/nova/tests/scheduler/fakes.py
index 84424c1c7..e48e987a4 100644
--- a/nova/tests/scheduler/fakes.py
+++ b/nova/tests/scheduler/fakes.py
@@ -27,12 +27,16 @@ from nova.scheduler import host_manager
COMPUTE_NODES = [
dict(id=1, local_gb=1024, memory_mb=1024, vcpus=1,
+ free_disk_gb=512, free_ram_mb=512, vcpus_used=1,
service=dict(host='host1', disabled=False)),
dict(id=2, local_gb=2048, memory_mb=2048, vcpus=2,
+ free_disk_gb=1024, free_ram_mb=1024, vcpus_used=2,
service=dict(host='host2', disabled=True)),
dict(id=3, local_gb=4096, memory_mb=4096, vcpus=4,
+ free_disk_gb=3072, free_ram_mb=3072, vcpus_used=1,
service=dict(host='host3', disabled=False)),
dict(id=4, local_gb=8192, memory_mb=8192, vcpus=8,
+ free_disk_gb=8192, free_ram_mb=8192, vcpus_used=0,
service=dict(host='host4', disabled=False)),
# Broken entry
dict(id=5, local_gb=1024, memory_mb=1024, vcpus=1, service=None),
@@ -128,8 +132,5 @@ class FakeComputeAPI(object):
def mox_host_manager_db_calls(mock, context):
mock.StubOutWithMock(db, 'compute_node_get_all')
- mock.StubOutWithMock(db, 'instance_get_all')
db.compute_node_get_all(mox.IgnoreArg()).AndReturn(COMPUTE_NODES)
- db.instance_get_all(mox.IgnoreArg(),
- columns_to_join=['instance_type']).AndReturn(INSTANCES)
diff --git a/nova/tests/scheduler/test_filter_scheduler.py b/nova/tests/scheduler/test_filter_scheduler.py
index 64bf5415f..1cab6ebbf 100644
--- a/nova/tests/scheduler/test_filter_scheduler.py
+++ b/nova/tests/scheduler/test_filter_scheduler.py
@@ -203,7 +203,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertEquals(weight, -1.0)
hostinfo = host_manager.HostState('host', 'compute')
hostinfo.update_from_compute_node(dict(memory_mb=1000,
- local_gb=0, vcpus=1))
+ local_gb=0, vcpus=1, free_disk_gb=1000, free_ram_mb=1000,
+ vcpus_used=0))
self.assertEquals(1000 - 128, fn(hostinfo, {}))
def test_max_attempts(self):
diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py
index d6f083576..a25e9afae 100644
--- a/nova/tests/scheduler/test_host_filters.py
+++ b/nova/tests/scheduler/test_host_filters.py
@@ -339,6 +339,24 @@ class HostFiltersTestCase(test.TestCase):
'capabilities': capabilities, 'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ def test_ram_filter_sets_memory_limit(self):
+ """Test that ram filter sets a filter_property denoting the memory
+ ceiling.
+ """
+ self._stub_service_is_up(True)
+ filt_cls = self.class_map['RamFilter']()
+ self.flags(ram_allocation_ratio=2.0)
+ filter_properties = {'instance_type': {'memory_mb': 1024}}
+ capabilities = {'enabled': True}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1', 'compute',
+ {'free_ram_mb': -1024, 'total_usable_ram_mb': 2048,
+ 'capabilities': capabilities, 'service': service})
+ filt_cls.host_passes(host, filter_properties)
+
+ self.assertEqual(host.total_usable_ram_mb * 2.0,
+ filter_properties['memory_limit_mb'])
+
def test_compute_filter_fails_on_service_disabled(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
diff --git a/nova/tests/scheduler/test_host_manager.py b/nova/tests/scheduler/test_host_manager.py
index 45ad1021b..a2f9fc425 100644
--- a/nova/tests/scheduler/test_host_manager.py
+++ b/nova/tests/scheduler/test_host_manager.py
@@ -130,14 +130,10 @@ class HostManagerTestCase(test.TestCase):
self.mox.StubOutWithMock(db, 'compute_node_get_all')
self.mox.StubOutWithMock(host_manager.LOG, 'warn')
- self.mox.StubOutWithMock(db, 'instance_get_all')
db.compute_node_get_all(context).AndReturn(fakes.COMPUTE_NODES)
# Invalid service
host_manager.LOG.warn("No service for compute ID 5")
- db.instance_get_all(context,
- columns_to_join=['instance_type']).AndReturn(
- fakes.INSTANCES)
self.mox.ReplayAll()
host_states = self.host_manager.get_all_host_states(context, topic)
diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py
index 93e0edfbc..e2420b48f 100644
--- a/nova/tests/test_db_api.py
+++ b/nova/tests/test_db_api.py
@@ -887,9 +887,15 @@ class CapacityTestCase(test.TestCase):
self.compute_node_dict = dict(vcpus=2, memory_mb=1024, local_gb=2048,
vcpus_used=0, memory_mb_used=0,
- local_gb_used=0, hypervisor_type="xen",
+ local_gb_used=0, free_ram_mb=1024,
+ free_disk_gb=2048, hypervisor_type="xen",
hypervisor_version=1, cpu_info="",
+ running_vms=0, current_workload=0,
service_id=self.service.id)
+ # add some random stats
+ stats = dict(num_instances=3, num_proj_12345=2,
+ num_proj_23456=2, num_vm_building=3)
+ self.compute_node_dict['stats'] = stats
self.flags(reserved_host_memory_mb=0)
self.flags(reserved_host_disk_mb=0)
@@ -898,6 +904,13 @@ class CapacityTestCase(test.TestCase):
self.compute_node_dict['host'] = host
return db.compute_node_create(self.ctxt, self.compute_node_dict)
+ def _stats_as_dict(self, stats):
+ d = {}
+ for s in stats:
+ key = s['key']
+ d[key] = s['value']
+ return d
+
def test_compute_node_create(self):
item = self._create_helper('host1')
self.assertEquals(item.free_ram_mb, 1024)
@@ -905,70 +918,63 @@ class CapacityTestCase(test.TestCase):
self.assertEquals(item.running_vms, 0)
self.assertEquals(item.current_workload, 0)
- def test_compute_node_create_with_reservations(self):
- self.flags(reserved_host_memory_mb=256)
+ stats = self._stats_as_dict(item['stats'])
+ self.assertEqual(3, stats['num_instances'])
+ self.assertEqual(2, stats['num_proj_12345'])
+ self.assertEqual(3, stats['num_vm_building'])
+
+ def test_compute_node_get_all(self):
+ item = self._create_helper('host1')
+ nodes = db.compute_node_get_all(self.ctxt)
+ self.assertEqual(1, len(nodes))
+
+ node = nodes[0]
+ self.assertEqual(2, node['vcpus'])
+
+ stats = self._stats_as_dict(node['stats'])
+ self.assertEqual(3, int(stats['num_instances']))
+ self.assertEqual(2, int(stats['num_proj_12345']))
+ self.assertEqual(3, int(stats['num_vm_building']))
+
+ def test_compute_node_update(self):
+ item = self._create_helper('host1')
+
+ compute_node_id = item['id']
+ stats = self._stats_as_dict(item['stats'])
+
+ # change some values:
+ stats['num_instances'] = 8
+ stats['num_tribbles'] = 1
+ values = {
+ 'vcpus': 4,
+ 'stats': stats,
+ }
+ item = db.compute_node_update(self.ctxt, compute_node_id, values)
+ stats = self._stats_as_dict(item['stats'])
+
+ self.assertEqual(4, item['vcpus'])
+ self.assertEqual(8, int(stats['num_instances']))
+ self.assertEqual(2, int(stats['num_proj_12345']))
+ self.assertEqual(1, int(stats['num_tribbles']))
+
+ def test_compute_node_stat_prune(self):
item = self._create_helper('host1')
- self.assertEquals(item.free_ram_mb, 1024 - 256)
-
- def test_compute_node_set(self):
- self._create_helper('host1')
-
- x = db.compute_node_utilization_set(self.ctxt, 'host1',
- free_ram_mb=2048, free_disk_gb=4096)
- self.assertEquals(x.free_ram_mb, 2048)
- self.assertEquals(x.free_disk_gb, 4096)
- self.assertEquals(x.running_vms, 0)
- self.assertEquals(x.current_workload, 0)
-
- x = db.compute_node_utilization_set(self.ctxt, 'host1', work=3)
- self.assertEquals(x.free_ram_mb, 2048)
- self.assertEquals(x.free_disk_gb, 4096)
- self.assertEquals(x.current_workload, 3)
- self.assertEquals(x.running_vms, 0)
-
- x = db.compute_node_utilization_set(self.ctxt, 'host1', vms=5)
- self.assertEquals(x.free_ram_mb, 2048)
- self.assertEquals(x.free_disk_gb, 4096)
- self.assertEquals(x.current_workload, 3)
- self.assertEquals(x.running_vms, 5)
-
- def test_compute_node_utilization_update(self):
- self._create_helper('host1')
-
- x = db.compute_node_utilization_update(self.ctxt, 'host1',
- free_ram_mb_delta=-24)
- self.assertEquals(x.free_ram_mb, 1000)
- self.assertEquals(x.free_disk_gb, 2048)
- self.assertEquals(x.running_vms, 0)
- self.assertEquals(x.current_workload, 0)
-
- x = db.compute_node_utilization_update(self.ctxt, 'host1',
- free_disk_gb_delta=-48)
- self.assertEquals(x.free_ram_mb, 1000)
- self.assertEquals(x.free_disk_gb, 2000)
- self.assertEquals(x.running_vms, 0)
- self.assertEquals(x.current_workload, 0)
-
- x = db.compute_node_utilization_update(self.ctxt, 'host1',
- work_delta=3)
- self.assertEquals(x.free_ram_mb, 1000)
- self.assertEquals(x.free_disk_gb, 2000)
- self.assertEquals(x.current_workload, 3)
- self.assertEquals(x.running_vms, 0)
-
- x = db.compute_node_utilization_update(self.ctxt, 'host1',
- work_delta=-1)
- self.assertEquals(x.free_ram_mb, 1000)
- self.assertEquals(x.free_disk_gb, 2000)
- self.assertEquals(x.current_workload, 2)
- self.assertEquals(x.running_vms, 0)
-
- x = db.compute_node_utilization_update(self.ctxt, 'host1',
- vm_delta=5)
- self.assertEquals(x.free_ram_mb, 1000)
- self.assertEquals(x.free_disk_gb, 2000)
- self.assertEquals(x.current_workload, 2)
- self.assertEquals(x.running_vms, 5)
+ for stat in item['stats']:
+ if stat['key'] == 'num_instances':
+ num_instance_stat = stat
+ break
+
+ values = {
+ 'stats': dict(num_instances=1)
+ }
+ db.compute_node_update(self.ctxt, item['id'], values, prune_stats=True)
+ item = db.compute_node_get_all(self.ctxt)[0]
+ self.assertEqual(1, len(item['stats']))
+
+ stat = item['stats'][0]
+ self.assertEqual(num_instance_stat['id'], stat['id'])
+ self.assertEqual(num_instance_stat['key'], stat['key'])
+ self.assertEqual(1, int(stat['value']))
class TestIpAllocation(test.TestCase):
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index c84a924e3..5ab059d07 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -1240,7 +1240,7 @@ class XenAPIHostTestCase(stubs.XenAPITestBase):
def test_host_state(self):
stats = self.conn.get_host_stats()
- self.assertEquals(stats['disk_total'], 10000)
+ self.assertEquals(stats['disk_total'], 40000)
self.assertEquals(stats['disk_used'], 20000)
self.assertEquals(stats['host_memory_total'], 10)
self.assertEquals(stats['host_memory_overhead'], 20)