summaryrefslogtreecommitdiffstats
path: root/nova/tests
diff options
context:
space:
mode:
Diffstat (limited to 'nova/tests')
-rw-r--r--nova/tests/api/ec2/test_cloud.py21
-rw-r--r--nova/tests/api/openstack/v2/test_servers.py5
-rw-r--r--nova/tests/scheduler/test_scheduler.py57
-rw-r--r--nova/tests/test_compute.py208
4 files changed, 260 insertions, 31 deletions
diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py
index 78e7f96fe..039282147 100644
--- a/nova/tests/api/ec2/test_cloud.py
+++ b/nova/tests/api/ec2/test_cloud.py
@@ -1418,9 +1418,10 @@ class CloudTestCase(test.TestCase):
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
- # a running instance can't be started. It is just ignored.
- result = self.cloud.start_instances(self.context, [instance_id])
- self.assertTrue(result)
+ # a running instance can't be started.
+ self.assertRaises(exception.InstanceInvalidState,
+ self.cloud.start_instances,
+ self.context, [instance_id])
result = self.cloud.stop_instances(self.context, [instance_id])
self.assertTrue(result)
@@ -1469,9 +1470,10 @@ class CloudTestCase(test.TestCase):
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
- # a running instance can't be started. It is just ignored.
- result = self.cloud.start_instances(self.context, [instance_id])
- self.assertTrue(result)
+ # a running instance can't be started.
+ self.assertRaises(exception.InstanceInvalidState,
+ self.cloud.start_instances,
+ self.context, [instance_id])
result = self.cloud.terminate_instances(self.context, [instance_id])
self.assertTrue(result)
@@ -1483,9 +1485,10 @@ class CloudTestCase(test.TestCase):
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
- # a running instance can't be started. It is just ignored.
- result = self.cloud.start_instances(self.context, [instance_id])
- self.assertTrue(result)
+ # a running instance can't be started.
+ self.assertRaises(exception.InstanceInvalidState,
+ self.cloud.start_instances,
+ self.context, [instance_id])
result = self.cloud.reboot_instances(self.context, [instance_id])
self.assertTrue(result)
diff --git a/nova/tests/api/openstack/v2/test_servers.py b/nova/tests/api/openstack/v2/test_servers.py
index f6f78609d..258928eae 100644
--- a/nova/tests/api/openstack/v2/test_servers.py
+++ b/nova/tests/api/openstack/v2/test_servers.py
@@ -1182,6 +1182,11 @@ class ServersControllerTest(test.TestCase):
body = dict(reboot=dict(type="HARD"))
req = fakes.HTTPRequest.blank(
'/v2/fake/servers/%s/action' % FAKE_UUID)
+ # Assume the instance is in ACTIVE state before calling reboot
+ self.stubs.Set(nova.db, 'instance_get',
+ return_server_with_state(vm_states.ACTIVE))
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ return_server_with_state(vm_states.ACTIVE))
self.controller.action(req, FAKE_UUID, body)
self.test_server_actions()
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index 32ff67f1d..c0fce0fe3 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -21,6 +21,7 @@ Tests For Scheduler
import datetime
import mox
+import stubout
from novaclient import v1_1 as novaclient
from novaclient import exceptions as novaclient_exceptions
@@ -38,9 +39,9 @@ from nova.scheduler import driver
from nova.scheduler import manager
from nova.scheduler.simple import SimpleScheduler
from nova.compute import power_state
+from nova.compute import task_states
from nova.compute import vm_states
-
FLAGS = flags.FLAGS
flags.DECLARE('max_cores', 'nova.scheduler.simple')
flags.DECLARE('stub_network', 'nova.compute.manager')
@@ -143,6 +144,10 @@ class SchedulerTestCase(test.TestCase):
driver = 'nova.tests.scheduler.test_scheduler.TestDriver'
self.flags(scheduler_driver=driver)
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(SchedulerTestCase, self).tearDown()
+
def _create_compute_service(self):
"""Create compute-manager(ComputeNode and Service record)."""
ctxt = context.get_admin_context()
@@ -205,6 +210,41 @@ class SchedulerTestCase(test.TestCase):
return False
return True
+ def _assert_state(self, state_dict):
+ """assert the instance is in the state defined by state_dict"""
+ instances = db.instance_get_all(context.get_admin_context())
+ self.assertEqual(len(instances), 1)
+
+ if 'vm_state' in state_dict:
+ self.assertEqual(state_dict['vm_state'], instances[0]['vm_state'])
+ if 'task_state' in state_dict:
+ self.assertEqual(state_dict['task_state'],
+ instances[0]['task_state'])
+ if 'power_state' in state_dict:
+ self.assertEqual(state_dict['power_state'],
+ instances[0]['power_state'])
+
+ def test_no_valid_host_exception_on_start(self):
+ """check the vm goes to ERROR state if the scheduler fails.
+
+ If the scheduler driver cannot allocate a host for the VM during
+ start_instance, it will raise a NoValidHost exception. In this
+ scenario, we have to make sure that the VM state is set to ERROR.
+ """
+ def NoValidHost_raiser(context, topic, *args, **kwargs):
+ raise exception.NoValidHost(_("Test NoValidHost exception"))
+ scheduler = manager.SchedulerManager()
+ ins_ref = _create_instance(task_state=task_states.STARTING,
+ vm_state=vm_states.STOPPED)
+ self.stubs = stubout.StubOutForTesting()
+ self.stubs.Set(TestDriver, 'schedule', NoValidHost_raiser)
+ self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
+ ctxt = context.get_admin_context()
+ scheduler.start_instance(ctxt, 'topic', instance_id=ins_ref['id'])
+ # assert that the instance goes to ERROR state
+ self._assert_state({'vm_state': vm_states.ERROR,
+ 'task_state': task_states.STARTING})
+
def test_show_host_resources_no_project(self):
"""No instance are running on the given host."""
@@ -247,21 +287,6 @@ class SchedulerTestCase(test.TestCase):
db.instance_destroy(ctxt, i_ref1['id'])
db.instance_destroy(ctxt, i_ref2['id'])
- def test_exception_puts_instance_in_error_state(self):
- """Test that an exception from the scheduler puts an instance
- in the ERROR state."""
-
- scheduler = manager.SchedulerManager()
- ctxt = context.get_admin_context()
- inst = _create_instance()
- self.assertRaises(Exception, scheduler._schedule,
- 'failing_method', ctxt, 'scheduler',
- instance_id=inst['uuid'])
-
- # Refresh the instance
- inst = db.instance_get(ctxt, inst['id'])
- self.assertEqual(inst['vm_state'], vm_states.ERROR)
-
class SimpleDriverTestCase(test.TestCase):
"""Test case for simple driver"""
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index a0517934e..abe21db91 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -19,7 +19,6 @@
"""
Tests For Compute
"""
-
from copy import copy
from webob import exc
@@ -30,6 +29,7 @@ from nova import compute
from nova.compute import instance_types
from nova.compute import manager as compute_manager
from nova.compute import power_state
+from nova.compute import state_checker
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
@@ -111,6 +111,8 @@ class BaseTestCase(test.TestCase):
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
test_notifier.NOTIFICATIONS = []
+ self.mox = mox.Mox()
+ self.total_waits = 0
def fake_show(meh, context, id):
return {'id': 1, 'min_disk': None, 'min_ram': None,
@@ -120,7 +122,14 @@ class BaseTestCase(test.TestCase):
self.stubs.Set(rpc, 'call', rpc_call_wrapper)
self.stubs.Set(rpc, 'cast', rpc_cast_wrapper)
- def _create_fake_instance(self, params=None):
+ def tearDown(self):
+ self.mox.UnsetStubs()
+ instances = db.instance_get_all(self.context.elevated())
+ for instance in instances:
+ db.instance_destroy(self.context.elevated(), instance['id'])
+ super(BaseTestCase, self).tearDown()
+
+ def _create_fake_instance(self, params=None, type_name='m1.tiny'):
"""Create a test instance"""
if not params:
params = {}
@@ -131,12 +140,16 @@ class BaseTestCase(test.TestCase):
inst['launch_time'] = '10'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
- type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
+ type_id = instance_types.get_instance_type_by_name(type_name)['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
inst.update(params)
return db.instance_create(self.context, inst)
+ def _create_instance(self, params=None, type_name='m1.tiny'):
+ """Create a test instance. Returns uuid"""
+ return self._create_fake_instance(params, type_name=type_name)['uuid']
+
def _create_instance_type(self, params=None):
"""Create a test instance type"""
if not params:
@@ -195,6 +208,77 @@ class ComputeTestCase(BaseTestCase):
finally:
db.instance_destroy(self.context, instance['id'])
+ def _assert_state(self, state_dict):
+ """assert the instance is in the state defined by state_dict"""
+ instances = db.instance_get_all(context.get_admin_context())
+ self.assertEqual(len(instances), 1)
+
+ if 'vm_state' in state_dict:
+ self.assertEqual(state_dict['vm_state'], instances[0]['vm_state'])
+ if 'task_state' in state_dict:
+ self.assertEqual(state_dict['task_state'],
+ instances[0]['task_state'])
+ if 'power_state' in state_dict:
+ self.assertEqual(state_dict['power_state'],
+ instances[0]['power_state'])
+
+ def test_fail_to_schedule_persists(self):
+ """check the persistence of the ERROR(scheduling) state"""
+ self._create_instance(params={'vm_state': vm_states.ERROR,
+ 'task_state': task_states.SCHEDULING})
+ #check state is failed even after the periodic poll
+ error_list = self.compute.periodic_tasks(context.get_admin_context())
+ self._assert_state({'vm_state': vm_states.ERROR,
+ 'task_state': task_states.SCHEDULING})
+
+ def test_run_instance_setup_block_device_mapping_fail(self):
+ """ block device mapping failure test.
+
+ Make sure that when there is a block device mapping problem,
+ the instance goes to ERROR state, keeping the task state
+ """
+ def fake(*args, **kwargs):
+ raise Exception("Failed to block device mapping")
+ self.stubs.Set(nova.compute.manager.ComputeManager,
+ '_setup_block_device_mapping', fake)
+ instance_uuid = self._create_instance()
+ self.assertRaises(Exception, self.compute.run_instance,
+ self.context, instance_uuid)
+ #check state is failed even after the periodic poll
+ self._assert_state({'vm_state': vm_states.ERROR,
+ 'task_state': task_states.BLOCK_DEVICE_MAPPING})
+ error_list = self.compute.periodic_tasks(context.get_admin_context())
+ self._assert_state({'vm_state': vm_states.ERROR,
+ 'task_state': task_states.BLOCK_DEVICE_MAPPING})
+
+ def test_run_instance_spawn_fail(self):
+ """ spawn failure test.
+
+ Make sure that when there is a spawning problem,
+ the instance goes to ERROR state, keeping the task state"""
+ def fake(*args, **kwargs):
+ raise Exception("Failed to spawn")
+ self.stubs.Set(self.compute.driver, 'spawn', fake)
+ instance_uuid = self._create_instance()
+ self.assertRaises(Exception, self.compute.run_instance,
+ self.context, instance_uuid)
+ #check state is failed even after the periodic poll
+ self._assert_state({'vm_state': vm_states.ERROR,
+ 'task_state': task_states.SPAWNING})
+ error_list = self.compute.periodic_tasks(context.get_admin_context())
+ self._assert_state({'vm_state': vm_states.ERROR,
+ 'task_state': task_states.SPAWNING})
+
+ def test_can_terminate_on_error_state(self):
+ """Make sure that the instance can be terminated in ERROR state"""
+ elevated = context.get_admin_context()
+ #check failed to schedule --> terminate
+ instance_uuid = self._create_instance(params={'vm_state':
+ vm_states.ERROR})
+ self.compute.terminate_instance(self.context, instance_uuid)
+ self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
+ elevated, instance_uuid)
+
def test_run_terminate(self):
"""Make sure it is possible to run and terminate instance"""
instance = self._create_fake_instance()
@@ -1157,6 +1241,108 @@ class ComputeAPITestCase(BaseTestCase):
'properties': {'kernel_id': 1, 'ramdisk_id': 1},
}
+ def test_check_vm_state_filtered_function(self):
+ """Test the check_vm_state mechanism for filtered functions.
+
+ Checks that the filtered_function is correctly filtered
+ in the right states only for the api_check_vm_states flag set to True.
+
+ Note that the filtered_function takes the same number of arguments
+ than the real functions that are decorated in the compute api.
+ """
+ @compute.api.check_vm_state('filtered_function')
+ def filtered_function(api, context, instance_ref):
+ LOG.debug("filtered_function executed")
+ return True
+
+ def filtered_assume_right_state(instance_ref):
+ self.flags(api_check_vm_states=True)
+ self.assertTrue(filtered_function(self.compute_api,
+ self.context, instance_ref))
+
+ def filtered_assume_wrong_state(instance_ref):
+ self.flags(api_check_vm_states=True)
+ self.assertRaises(exception.InstanceInvalidState,
+ filtered_function, self.compute_api,
+ self.context, instance_ref)
+ self.flags(api_check_vm_states=False)
+ self.assertTrue(filtered_function(self.compute_api,
+ self.context, instance_ref))
+
+ # check that the filtered_function is correctly filtered
+ self._execute_allowed_and_blocked('filtered_function',
+ filtered_assume_right_state,
+ filtered_assume_wrong_state)
+
+ def test_check_vm_state_non_filtered_function(self):
+ """Test the check_vm_state mechanism for non filtered functions.
+
+ Checks that if a function that is decorated with the check_vm_state
+ but it is not defined in any blocked dictionary, it will always
+ be executed
+ """
+ @compute.api.check_vm_state('non_filtered_function')
+ def non_filtered_function(api, context, instance_ref):
+ LOG.debug("non_filtered_function executed")
+ return True
+
+ def non_filtered_assume_executed(instance_ref):
+ self.flags(api_check_vm_states=True)
+ self.assertTrue(non_filtered_function(self.compute_api,
+ self.context, instance_ref))
+
+ # check that the non_filtered_function is never filtered
+ self._execute_allowed_and_blocked('non_filtered_function',
+ non_filtered_assume_executed,
+ non_filtered_assume_executed)
+
+ def _execute_allowed_and_blocked(self, func_name, f_allowed, f_blocked):
+ """Execute f_allowed and f_blocked functions for all the scenarios.
+
+ Get an allowed vm_state, a blocked vm_state, an allowed task_state,
+ and a blocked task_state for the function defined by func_name to be
+ executed. Then it executes the function f_allowed or f_blocked
+ accordingly, passing as parameter a new instance id. Theses functions
+ have to run the func_name function and assert the expected result
+ """
+
+ # define blocked and allowed states
+ blocked_tsk = task_states.SCHEDULING
+ ok_task = task_states.NETWORKING
+ blocked_vm = vm_states.BUILDING
+ ok_vm = vm_states.RESCUED
+ blocked_comb = {'power_state': power_state.RUNNING,
+ 'vm_state': vm_states.ACTIVE, 'task_state': None}
+ ok_comb = {'power_state': power_state.RUNNING,
+ 'vm_state': vm_states.PAUSED, 'task_state': None}
+
+ # To guarantee a 100% test coverage we create fake lists.
+ fake_block_for_task_state = {'filtered_function': [blocked_tsk]}
+
+ fake_block_for_vm_state = {'filtered_function': [blocked_vm]}
+
+ fake_block_for_combination = {'filtered_function': [blocked_comb]}
+
+ self.stubs.Set(nova.compute.state_checker, 'block_for_task_state',
+ fake_block_for_task_state)
+ self.stubs.Set(nova.compute.state_checker, 'block_for_vm_state',
+ fake_block_for_vm_state)
+ self.stubs.Set(nova.compute.state_checker, 'block_for_combination',
+ fake_block_for_combination)
+
+ i_ref = self._create_fake_instance(params={'task_state': blocked_tsk})
+ f_blocked(i_ref)
+ i_ref = self._create_fake_instance(params={'task_state': ok_task})
+ f_allowed(i_ref)
+ i_ref = self._create_fake_instance(params={'vm_state': blocked_vm})
+ f_blocked(i_ref)
+ i_ref = self._create_fake_instance(params={'vm_state': ok_vm})
+ f_allowed(i_ref)
+ i_ref = self._create_fake_instance(params=blocked_comb)
+ f_blocked(i_ref)
+ i_ref = self._create_fake_instance(params=ok_comb)
+ f_allowed(i_ref)
+
def test_create_with_too_little_ram(self):
"""Test an instance type with too little memory"""
@@ -1416,10 +1602,14 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance_uuid)
def test_resume(self):
- """Ensure instance can be resumed"""
+ """Ensure instance can be resumed (if suspended)"""
instance = self._create_fake_instance()
instance_uuid = instance['uuid']
+ instance_id = instance['id']
self.compute.run_instance(self.context, instance_uuid )
+ db.instance_update(self.context, instance_id,
+ {'vm_state': vm_states.SUSPENDED})
+ instance = db.instance_get(self.context, instance_id)
self.assertEqual(instance['task_state'], None)
@@ -1578,6 +1768,7 @@ class ComputeAPITestCase(BaseTestCase):
params = {'vm_state': vm_states.RESCUED, 'task_state': None}
db.instance_update(self.context, instance_uuid, params)
+ instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.compute_api.unrescue(self.context, instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
@@ -1625,7 +1816,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_update(self.context, instance_uuid, instance_values)
instance = self.compute_api.get(self.context, instance_uuid)
- self.assertRaises(exception.InstanceBackingUp,
+ self.assertRaises(exception.InstanceInvalidState,
self.compute_api.backup,
self.context,
instance,
@@ -1643,7 +1834,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_update(self.context, instance_uuid, instance_values)
instance = self.compute_api.get(self.context, instance_uuid)
- self.assertRaises(exception.InstanceSnapshotting,
+ self.assertRaises(exception.InstanceInvalidState,
self.compute_api.snapshot,
self.context,
instance,
@@ -1663,6 +1854,11 @@ class ComputeAPITestCase(BaseTestCase):
migration_ref = db.migration_create(context,
{'instance_uuid': instance['uuid'],
'status': 'finished'})
+ # set the state that the instance gets when resize finishes
+ db.instance_update(self.context, instance['uuid'],
+ {'task_state': task_states.RESIZE_VERIFY,
+ 'vm_state': vm_states.ACTIVE})
+ instance = db.instance_get_by_uuid(context, instance['uuid'])
self.compute_api.confirm_resize(context, instance)
self.compute.terminate_instance(context, instance['uuid'])