summaryrefslogtreecommitdiffstats
path: root/nova/tests/scheduler
diff options
context:
space:
mode:
authorJenkins <jenkins@review.openstack.org>2013-02-06 23:39:09 +0000
committerGerrit Code Review <review@openstack.org>2013-02-06 23:39:09 +0000
commit7139b272ec1aeb355727b7180d5f125b89fdca36 (patch)
tree5ac4f8bcb2eff8684aa07386350a736a154a902c /nova/tests/scheduler
parenteaadc9d998922c93a8fa258e7f730f509a45bba9 (diff)
parent3783cf3cc9c571beb9c75e5b0e39bf449520aaf3 (diff)
downloadnova-7139b272ec1aeb355727b7180d5f125b89fdca36.tar.gz
nova-7139b272ec1aeb355727b7180d5f125b89fdca36.tar.xz
nova-7139b272ec1aeb355727b7180d5f125b89fdca36.zip
Merge "Add support for memory overcommit in live-migration"
Diffstat (limited to 'nova/tests/scheduler')
-rw-r--r--nova/tests/scheduler/test_filter_scheduler.py138
-rw-r--r--nova/tests/scheduler/test_scheduler.py30
2 files changed, 159 insertions, 9 deletions
diff --git a/nova/tests/scheduler/test_filter_scheduler.py b/nova/tests/scheduler/test_filter_scheduler.py
index ff3a00f22..b4d73ec0c 100644
--- a/nova/tests/scheduler/test_filter_scheduler.py
+++ b/nova/tests/scheduler/test_filter_scheduler.py
@@ -19,16 +19,19 @@ Tests For Filter Scheduler.
import mox
from nova.compute import instance_types
+from nova.compute import rpcapi as compute_rpcapi
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor import api as conductor_api
from nova import context
from nova import db
from nova import exception
+from nova.openstack.common import rpc
from nova.scheduler import driver
from nova.scheduler import filter_scheduler
from nova.scheduler import host_manager
from nova.scheduler import weights
+from nova import servicegroup
from nova.tests.scheduler import fakes
from nova.tests.scheduler import test_scheduler
@@ -342,3 +345,138 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertEqual([['host', 'node']],
filter_properties['retry']['hosts'])
+
+ def test_live_migration_dest_check_service_memory_overcommit(self):
+ # Live-migration should work since default is to overcommit memory.
+ self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
+ self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
+ self.mox.StubOutWithMock(self.driver, '_get_compute_info')
+ self.mox.StubOutWithMock(self.driver, '_live_migration_common_check')
+ self.mox.StubOutWithMock(rpc, 'call')
+ self.mox.StubOutWithMock(self.driver.compute_rpcapi, 'live_migration')
+
+ dest = 'fake_host2'
+ block_migration = False
+ disk_over_commit = False
+ instance = self._live_migration_instance()
+
+ self.driver._live_migration_src_check(self.context, instance)
+ db.service_get_by_compute_host(self.context,
+ dest).AndReturn('fake_service3')
+ self.servicegroup_api.service_is_up('fake_service3').AndReturn(True)
+
+ self.driver._get_compute_info(self.context, dest).AndReturn(
+ {'memory_mb': 2048,
+ 'free_disk_gb': 512,
+ 'local_gb_used': 512,
+ 'free_ram_mb': 512,
+ 'local_gb': 1024,
+ 'vcpus': 4,
+ 'vcpus_used': 2,
+ 'updated_at': None})
+
+ self.driver._live_migration_common_check(self.context, instance, dest)
+
+ rpc.call(self.context, "compute.fake_host2",
+ {"method": 'check_can_live_migrate_destination',
+ "args": {'instance': instance,
+ 'block_migration': block_migration,
+ 'disk_over_commit': disk_over_commit},
+ "version": compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION},
+ None).AndReturn({})
+
+ self.driver.compute_rpcapi.live_migration(self.context,
+ host=instance['host'], instance=instance, dest=dest,
+ block_migration=block_migration, migrate_data={})
+
+ self.mox.ReplayAll()
+ result = self.driver.schedule_live_migration(self.context,
+ instance=instance, dest=dest,
+ block_migration=block_migration,
+ disk_over_commit=disk_over_commit)
+ self.assertEqual(result, None)
+
+ def test_live_migration_assert_memory_no_overcommit(self):
+ # Test that memory check passes with no memory overcommit.
+ def fake_get(context, host):
+ return {'memory_mb': 2048,
+ 'free_disk_gb': 512,
+ 'local_gb_used': 512,
+ 'free_ram_mb': 1024,
+ 'local_gb': 1024,
+ 'vcpus': 4,
+ 'vcpus_used': 2,
+ 'updated_at': None}
+
+ self.stubs.Set(self.driver, '_get_compute_info', fake_get)
+
+ self.flags(ram_allocation_ratio=1.0)
+ instance = self._live_migration_instance()
+ dest = 'fake_host2'
+ result = self.driver._assert_compute_node_has_enough_memory(
+ self.context, instance, dest)
+ self.assertEqual(result, None)
+
+ def test_live_migration_assert_memory_no_overcommit_lack_memory(self):
+ # Test that memory check fails with no memory overcommit.
+ def fake_get(context, host):
+ return {'memory_mb': 2048,
+ 'free_disk_gb': 512,
+ 'local_gb_used': 512,
+ 'free_ram_mb': 1023,
+ 'local_gb': 1024,
+ 'vcpus': 4,
+ 'vcpus_used': 2,
+ 'updated_at': None}
+
+ self.stubs.Set(self.driver, '_get_compute_info', fake_get)
+
+ self.flags(ram_allocation_ratio=1.0)
+ instance = self._live_migration_instance()
+ dest = 'fake_host2'
+ self.assertRaises(exception.MigrationError,
+ self.driver._assert_compute_node_has_enough_memory,
+ context, instance, dest)
+
+ def test_live_migration_assert_memory_overcommit(self):
+ # Test that memory check passes with memory overcommit.
+ def fake_get(context, host):
+ return {'memory_mb': 2048,
+ 'free_disk_gb': 512,
+ 'local_gb_used': 512,
+ 'free_ram_mb': -1024,
+ 'local_gb': 1024,
+ 'vcpus': 4,
+ 'vcpus_used': 2,
+ 'updated_at': None}
+
+ self.stubs.Set(self.driver, '_get_compute_info', fake_get)
+
+ self.flags(ram_allocation_ratio=2.0)
+ instance = self._live_migration_instance()
+ dest = 'fake_host2'
+ result = self.driver._assert_compute_node_has_enough_memory(
+ self.context, instance, dest)
+ self.assertEqual(result, None)
+
+ def test_live_migration_assert_memory_overcommit_lack_memory(self):
+ # Test that memory check fails with memory overcommit.
+ def fake_get(context, host):
+ return {'memory_mb': 2048,
+ 'free_disk_gb': 512,
+ 'local_gb_used': 512,
+ 'free_ram_mb': -1025,
+ 'local_gb': 1024,
+ 'vcpus': 4,
+ 'vcpus_used': 2,
+ 'updated_at': None}
+
+ self.stubs.Set(self.driver, '_get_compute_info', fake_get)
+
+ self.flags(ram_allocation_ratio=2.0)
+ instance = self._live_migration_instance()
+ dest = 'fake_host2'
+ self.assertRaises(exception.MigrationError,
+ self.driver._assert_compute_node_has_enough_memory,
+ self.context, instance, dest)
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index 14be14a1a..44e1f3537 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -350,7 +350,8 @@ class SchedulerTestCase(test.TestCase):
'root_gb': 1024,
'ephemeral_gb': 0,
'vm_state': '',
- 'task_state': ''}
+ 'task_state': '',
+ 'instance_type': {'memory_mb': 1024}}
def test_live_migration_basic(self):
# Test basic schedule_live_migration functionality.
@@ -389,9 +390,7 @@ class SchedulerTestCase(test.TestCase):
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
- self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(rpc, 'call')
- self.mox.StubOutWithMock(rpc, 'cast')
self.mox.StubOutWithMock(self.driver.compute_rpcapi,
'live_migration')
@@ -412,9 +411,14 @@ class SchedulerTestCase(test.TestCase):
# assert_compute_node_has_enough_memory()
db.service_get_by_compute_host(self.context, dest).AndReturn(
{'compute_node': [{'memory_mb': 2048,
+ 'free_disk_gb': 512,
+ 'local_gb_used': 512,
+ 'free_ram_mb': 1280,
+ 'local_gb': 1024,
+ 'vcpus': 4,
+ 'vcpus_used': 2,
+ 'updated_at': None,
'hypervisor_version': 1}]})
- db.instance_get_all_by_host(self.context, dest).AndReturn(
- [dict(memory_mb=256), dict(memory_mb=512)])
# Common checks (same hypervisor, etc)
db.service_get_by_compute_host(self.context, dest).AndReturn(
@@ -557,11 +561,14 @@ class SchedulerTestCase(test.TestCase):
def test_live_migration_dest_check_service_lack_memory(self):
# Confirms exception raises when dest doesn't have enough memory.
+ # Flag needed to make FilterScheduler test hit memory limit since the
+ # default for it is to allow memory overcommit by a factor of 1.5.
+ self.flags(ram_allocation_ratio=1.0)
+
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
self.mox.StubOutWithMock(self.driver, '_get_compute_info')
- self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
dest = 'fake_host2'
block_migration = False
@@ -574,9 +581,14 @@ class SchedulerTestCase(test.TestCase):
self.servicegroup_api.service_is_up('fake_service3').AndReturn(True)
self.driver._get_compute_info(self.context, dest).AndReturn(
- {'memory_mb': 2048})
- db.instance_get_all_by_host(self.context, dest).AndReturn(
- [dict(memory_mb=1024), dict(memory_mb=512)])
+ {'memory_mb': 2048,
+ 'free_disk_gb': 512,
+ 'local_gb_used': 512,
+ 'free_ram_mb': 512,
+ 'local_gb': 1024,
+ 'vcpus': 4,
+ 'vcpus_used': 2,
+ 'updated_at': None})
self.mox.ReplayAll()
self.assertRaises(exception.MigrationError,