summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Behrens <cbehrens@codestud.com>2012-03-07 21:01:47 -0800
committerChris Behrens <cbehrens@codestud.com>2012-03-07 22:27:11 -0800
commitbee9963beb24408e81d7dca0fe1050eb57bfed47 (patch)
tree35d7213717b01fcbdca5a38a0d7e45ce69d6ef4d
parent0193d1253c48c719b7f10bb19505ebb4b52defd3 (diff)
downloadnova-bee9963beb24408e81d7dca0fe1050eb57bfed47.tar.gz
nova-bee9963beb24408e81d7dca0fe1050eb57bfed47.tar.xz
nova-bee9963beb24408e81d7dca0fe1050eb57bfed47.zip
Don't use ec2 IDs in scheduler driver
Fixes bug 949650 Change-Id: I5dddd84a0062a9aac3d608b6483deec0414e4b28
-rw-r--r--nova/scheduler/driver.py18
-rw-r--r--nova/tests/scheduler/test_scheduler.py13
2 files changed, 13 insertions, 18 deletions
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index 5981252ce..2deff42c4 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -21,7 +21,6 @@
Scheduler base class that all Schedulers should inherit from
"""
-from nova.api.ec2 import ec2utils
from nova.compute import api as compute_api
from nova.compute import power_state
from nova.compute import vm_states
@@ -249,8 +248,8 @@ class Scheduler(object):
# Checking instance is running.
if instance_ref['power_state'] != power_state.RUNNING:
- instance_id = ec2utils.id_to_ec2_id(instance_ref['id'])
- raise exception.InstanceNotRunning(instance_id=instance_id)
+ raise exception.InstanceNotRunning(
+ instance_id=instance_ref['uuid'])
# Checing volume node is running when any volumes are mounted
# to the instance.
@@ -291,9 +290,8 @@ class Scheduler(object):
# and dest is not same.
src = instance_ref['host']
if dest == src:
- instance_id = ec2utils.id_to_ec2_id(instance_ref['id'])
- raise exception.UnableToMigrateToSelf(instance_id=instance_id,
- host=dest)
+ raise exception.UnableToMigrateToSelf(
+ instance_id=instance_ref['uuid'], host=dest)
# Checking dst host still has enough capacities.
self.assert_compute_node_has_enough_resources(context,
@@ -417,8 +415,8 @@ class Scheduler(object):
mem_inst = instance_ref['memory_mb']
avail = avail - used
if avail <= mem_inst:
- instance_id = ec2utils.id_to_ec2_id(instance_ref['id'])
- reason = _("Unable to migrate %(instance_id)s to %(dest)s: "
+ instance_uuid = instance_ref['uuid']
+ reason = _("Unable to migrate %(instance_uuid)s to %(dest)s: "
"Lack of memory(host:%(avail)s <= "
"instance:%(mem_inst)s)")
raise exception.MigrationError(reason=reason % locals())
@@ -473,8 +471,8 @@ class Scheduler(object):
# Check that available disk > necessary disk
if (available - necessary) < 0:
- instance_id = ec2utils.id_to_ec2_id(instance_ref['id'])
- reason = _("Unable to migrate %(instance_id)s to %(dest)s: "
+ instance_uuid = instance_ref['uuid']
+ reason = _("Unable to migrate %(instance_uuid)s to %(dest)s: "
"Lack of disk(host:%(available)s "
"<= instance:%(necessary)s)")
raise exception.MigrationError(reason=reason % locals())
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index b92df7872..1bfc120d5 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -415,7 +415,9 @@ class SchedulerTestCase(test.TestCase):
def _live_migration_instance(self):
volume1 = {'id': 31338}
volume2 = {'id': 31339}
- return {'id': 31337, 'name': 'fake-instance',
+ return {'id': 31337,
+ 'uuid': 'fake_uuid',
+ 'name': 'fake-instance',
'host': 'fake_host1',
'volumes': [volume1, volume2],
'power_state': power_state.RUNNING,
@@ -575,15 +577,10 @@ class SchedulerTestCase(test.TestCase):
self.mox.ReplayAll()
- c = False
- try:
- self.driver.schedule_live_migration(self.context,
+ self.assertRaises(exception.InstanceNotRunning,
+ self.driver.schedule_live_migration, self.context,
instance_id=instance['id'], dest=dest,
block_migration=block_migration)
- self._test_scheduler_live_migration(options)
- except exception.Invalid, e:
- c = (str(e).find('is not running') > 0)
- self.assertTrue(c)
def test_live_migration_volume_node_not_alive(self):
"""Raise exception when volume node is not alive."""