summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMark Washenberger <mark.washenberger@rackspace.com>2011-11-29 16:24:05 -0500
committerMark Washenberger <mark.washenberger@rackspace.com>2011-12-01 10:30:21 -0500
commit0d54770ee109bc7d598539b9238affdd1880997b (patch)
treef15ad7c650e4d799b71092a3c24b2553dbfd47d7
parent22df7020b1d7105586404cf7ec920e6d623cc325 (diff)
downloadnova-0d54770ee109bc7d598539b9238affdd1880997b.tar.gz
nova-0d54770ee109bc7d598539b9238affdd1880997b.tar.xz
nova-0d54770ee109bc7d598539b9238affdd1880997b.zip
Make run_instance only support instance uuids.
Related to blueprint internal-uuids. This patchset also attempts a major overhaul of run_instance so that the code is cleaner and easier to understand (no more global-style variables!) Change-Id: I2289f3c253c6246ea51395b2dcfccee2256a2813
-rw-r--r--nova/compute/manager.py311
-rw-r--r--nova/rpc/impl_fake.py2
-rw-r--r--nova/scheduler/chance.py2
-rw-r--r--nova/scheduler/distributed_scheduler.py2
-rw-r--r--nova/scheduler/driver.py6
-rw-r--r--nova/scheduler/simple.py2
-rw-r--r--nova/tests/scheduler/test_scheduler.py130
-rw-r--r--nova/tests/test_compute.py197
8 files changed, 332 insertions, 320 deletions
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 0cbddca3e..71ba69c92 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -151,9 +151,6 @@ class ComputeManager(manager.SchedulerDependentManager):
def _instance_update(self, context, instance_id, **kwargs):
"""Update an instance in the database using kwargs as value."""
- if utils.is_uuid_like(instance_id):
- instance = self.db.instance_get_by_uuid(context, instance_id)
- instance_id = instance['id']
return self.db.instance_update(context, instance_id, kwargs)
def init_host(self):
@@ -320,182 +317,176 @@ class ComputeManager(manager.SchedulerDependentManager):
return (swap, ephemerals, block_device_mapping)
- def _run_instance(self, context, instance_uuid, **kwargs):
+ def _run_instance(self, context, instance_uuid,
+ requested_networks=None,
+ injected_files=[],
+ admin_pass=None,
+ **kwargs):
"""Launch a new instance with specified options."""
- def _check_image_size(image_meta):
- """Ensure image is smaller than the maximum size allowed by the
- instance_type.
+ context = context.elevated()
+ try:
+ instance = self.db.instance_get_by_uuid(context, instance_uuid)
+ self._check_instance_not_already_created(context, instance)
+ image_meta = self._check_image_size(context, instance)
+ self._start_building(context, instance)
+ network_info = self._allocate_network(context, instance,
+ requested_networks)
+ try:
+ block_device_info = self._prep_block_device(context, instance)
+ instance = self._spawn(context, instance, image_meta,
+ network_info, block_device_info,
+ injected_files, admin_pass)
+ except:
+ self._deallocate_network(context, instance)
+ raise
+ self._notify_about_instance_usage(instance)
+ except exception.InstanceNotFound:
+ LOG.exception(_("Instance %s not found.") % instance_uuid)
+ return # assuming the instance was already deleted
+ except Exception:
+ with utils.save_and_reraise_exception():
+ self._instance_update(context, instance_uuid,
+ vm_state=vm_states.ERROR)
- The image stored in Glance is potentially compressed, so we use two
- checks to ensure that the size isn't exceeded:
+ def _check_instance_not_already_created(self, context, instance):
+ """Ensure an instance with the same name is not already present."""
+ if instance['name'] in self.driver.list_instances():
+ raise exception.Error(_("Instance has already been created"))
- 1) This one - checks compressed size, this a quick check to
- eliminate any images which are obviously too large
+ def _check_image_size(self, context, instance):
+ """Ensure image is smaller than the maximum size allowed by the
+ instance_type.
- 2) Check uncompressed size in nova.virt.xenapi.vm_utils. This
- is a slower check since it requires uncompressing the entire
- image, but is accurate because it reflects the image's
- actual size.
- """
+ The image stored in Glance is potentially compressed, so we use two
+ checks to ensure that the size isn't exceeded:
- try:
- size_bytes = image_meta['size']
- except KeyError:
- # Size is not a required field in the image service (yet), so
- # we are unable to rely on it being there even though it's in
- # glance.
-
- # TODO(jk0): Should size be required in the image service?
- return
-
- instance_type_id = instance['instance_type_id']
- instance_type = instance_types.get_instance_type(instance_type_id)
- allowed_size_gb = instance_type['local_gb']
-
- # NOTE(jk0): Since libvirt uses local_gb as a secondary drive, we
- # need to handle potential situations where local_gb is 0. This is
- # the default for m1.tiny.
- if allowed_size_gb == 0:
- return
-
- allowed_size_bytes = allowed_size_gb * 1024 * 1024 * 1024
-
- image_id = image_meta['id']
- LOG.debug(_("image_id=%(image_id)s, image_size_bytes="
- "%(size_bytes)d, allowed_size_bytes="
- "%(allowed_size_bytes)d") % locals())
-
- if size_bytes > allowed_size_bytes:
- LOG.info(_("Image '%(image_id)s' size %(size_bytes)d exceeded"
- " instance_type allowed size "
- "%(allowed_size_bytes)d")
- % locals())
- raise exception.ImageTooLarge()
-
- def _make_network_info():
- if FLAGS.stub_network:
- # TODO(tr3buchet) not really sure how this should be handled.
- # virt requires network_info to be passed in but stub_network
- # is enabled. Setting to [] for now will cause virt to skip
- # all vif creation and network injection, maybe this is correct
- network_info = []
- else:
- # NOTE(vish): This could be a cast because we don't do
- # anything with the address currently, but I'm leaving it as a
- # call to ensure that network setup completes. We will
- # eventually also need to save the address here.
- network_info = self.network_api.allocate_for_instance(context,
- instance, vpn=is_vpn,
- requested_networks=requested_networks)
- LOG.debug(_("instance network_info: |%s|"), network_info)
- return network_info
-
- def _make_block_device_info():
- (swap, ephemerals,
- block_device_mapping) = self._setup_block_device_mapping(
- context, instance)
- block_device_info = {
- 'root_device_name': instance['root_device_name'],
- 'swap': swap,
- 'ephemerals': ephemerals,
- 'block_device_mapping': block_device_mapping}
- return block_device_info
+ 1) This one - checks compressed size, this a quick check to
+ eliminate any images which are obviously too large
- def _deallocate_network():
- if not FLAGS.stub_network:
- LOG.debug(_("deallocating network for instance: %s"),
- instance['id'])
- self.network_api.deallocate_for_instance(context,
- instance)
+ 2) Check uncompressed size in nova.virt.xenapi.vm_utils. This
+ is a slower check since it requires uncompressing the entire
+ image, but is accurate because it reflects the image's
+ actual size.
+ """
+ image_meta = _get_image_meta(context, instance['image_ref'])
- def _cleanup():
- with utils.save_and_reraise_exception():
- self._instance_update(context,
- instance_id,
- vm_state=vm_states.ERROR)
- if network_info is not None:
- _deallocate_network()
+ try:
+ size_bytes = image_meta['size']
+ except KeyError:
+ # Size is not a required field in the image service (yet), so
+ # we are unable to rely on it being there even though it's in
+ # glance.
- def _error_message(instance_uuid, message):
- return _("Instance '%(instance_uuid)s' "
- "failed %(message)s.") % locals()
+ # TODO(jk0): Should size be required in the image service?
+ return
- context = context.elevated()
- if utils.is_uuid_like(instance_uuid):
- instance = self.db.instance_get_by_uuid(context, instance_uuid)
- instance_id = instance['id']
- else:
- instance_id = instance_uuid
- instance = self.db.instance_get(context, instance_id)
- instance_uuid = instance['uuid']
+ instance_type_id = instance['instance_type_id']
+ instance_type = instance_types.get_instance_type(instance_type_id)
+ allowed_size_gb = instance_type['local_gb']
+
+ # NOTE(jk0): Since libvirt uses local_gb as a secondary drive, we
+ # need to handle potential situations where local_gb is 0. This is
+ # the default for m1.tiny.
+ if allowed_size_gb == 0:
+ return
- requested_networks = kwargs.get('requested_networks', None)
+ allowed_size_bytes = allowed_size_gb * 1024 * 1024 * 1024
- if instance['name'] in self.driver.list_instances():
- raise exception.Error(_("Instance has already been created"))
+ image_id = image_meta['id']
+ LOG.debug(_("image_id=%(image_id)s, image_size_bytes="
+ "%(size_bytes)d, allowed_size_bytes="
+ "%(allowed_size_bytes)d") % locals())
- image_meta = _get_image_meta(context, instance['image_ref'])
+ if size_bytes > allowed_size_bytes:
+ LOG.info(_("Image '%(image_id)s' size %(size_bytes)d exceeded"
+ " instance_type allowed size "
+ "%(allowed_size_bytes)d")
+ % locals())
+ raise exception.ImageTooLarge()
- _check_image_size(image_meta)
+ return image_meta
- LOG.audit(_("instance %s: starting..."), instance_uuid,
+ def _start_building(self, context, instance):
+ """Save the host and launched_on fields and log appropriately."""
+ LOG.audit(_("instance %s: starting..."), instance['uuid'],
context=context)
- updates = {}
- updates['host'] = self.host
- updates['launched_on'] = self.host
- updates['vm_state'] = vm_states.BUILDING
- updates['task_state'] = task_states.NETWORKING
- instance = self.db.instance_update(context, instance_id, updates)
- instance['injected_files'] = kwargs.get('injected_files', [])
- instance['admin_pass'] = kwargs.get('admin_password', None)
+ self._instance_update(context, instance['uuid'],
+ host=self.host, launched_on=self.host,
+ vm_state=vm_states.BUILDING,
+ task_state=None)
+ def _allocate_network(self, context, instance, requested_networks):
+ """Allocate networks for an instance and return the network info"""
+ if FLAGS.stub_network:
+ msg = _("Skipping network allocation for instance %s")
+ LOG.debug(msg % instance['uuid'])
+ return []
+ self._instance_update(context, instance['uuid'],
+ vm_state=vm_states.BUILDING,
+ task_state=task_states.NETWORKING)
is_vpn = instance['image_ref'] == str(FLAGS.vpn_image_id)
try:
- network_info = None
- with utils.logging_error(_error_message(instance_id,
- "network setup")):
- network_info = _make_network_info()
+ network_info = self.network_api.allocate_for_instance(
+ context, instance, vpn=is_vpn,
+ requested_networks=requested_networks)
+ except:
+ msg = _("Instance %s failed network setup")
+ LOG.exception(msg % instance['uuid'])
+ raise
+ LOG.debug(_("instance network_info: |%s|"), network_info)
+ return network_info
- self._instance_update(context,
- instance_uuid,
- vm_state=vm_states.BUILDING,
- task_state=task_states.BLOCK_DEVICE_MAPPING)
- with utils.logging_error(_error_message(instance_uuid,
- "block device setup")):
- block_device_info = _make_block_device_info()
+ def _prep_block_device(self, context, instance):
+ """Set up the block device for an instance with error logging"""
+ self._instance_update(context, instance['uuid'],
+ vm_state=vm_states.BUILDING,
+ task_state=task_states.BLOCK_DEVICE_MAPPING)
+ try:
+ mapping = self._setup_block_device_mapping(context, instance)
+ swap, ephemerals, block_device_mapping = mapping
+ except:
+ msg = _("Instance %s failed block device setup")
+ LOG.exception(msg % instance['uuid'])
+ raise
+ return {'root_device_name': instance['root_device_name'],
+ 'swap': swap,
+ 'ephemerals': ephemerals,
+ 'block_device_mapping': block_device_mapping}
- self._instance_update(context,
- instance_uuid,
- vm_state=vm_states.BUILDING,
- task_state=task_states.SPAWNING)
-
- # TODO(vish) check to make sure the availability zone matches
- with utils.logging_error(_error_message(instance_uuid,
- "failed to spawn")):
- self.driver.spawn(context, instance, image_meta,
- network_info, block_device_info)
-
- current_power_state = self._get_power_state(context, instance)
- instance = self._instance_update(context,
- instance_uuid,
- power_state=current_power_state,
- vm_state=vm_states.ACTIVE,
- task_state=None,
- launched_at=utils.utcnow())
-
- usage_info = utils.usage_from_instance(instance)
- notifier.notify('compute.%s' % self.host,
- 'compute.instance.create',
- notifier.INFO, usage_info)
+ def _spawn(self, context, instance, image_meta, network_info,
+ block_device_info, injected_files, admin_pass):
+ """Spawn an instance with error logging and update its power state"""
+ self._instance_update(context, instance['uuid'],
+ vm_state=vm_states.BUILDING,
+ task_state=task_states.SPAWNING)
+ instance['injected_files'] = injected_files
+ instance['admin_pass'] = admin_pass
+ try:
+ self.driver.spawn(context, instance, image_meta,
+ network_info, block_device_info)
+ except:
+ msg = _("Instance %s failed to spawn")
+ LOG.exception(msg % instance['uuid'])
+ raise
- except exception.InstanceNotFound:
- # FIXME(wwolf): We are just ignoring InstanceNotFound
- # exceptions here in case the instance was immediately
- # deleted before it actually got created. This should
- # be fixed once we have no-db-messaging
- pass
- except Exception:
- _cleanup()
+ current_power_state = self._get_power_state(context, instance)
+ return self._instance_update(context, instance['uuid'],
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ task_state=None,
+ launched_at=utils.utcnow())
+
+ def _notify_about_instance_usage(self, instance):
+ usage_info = utils.usage_from_instance(instance)
+ notifier.notify('compute.%s' % self.host,
+ 'compute.instance.create',
+ notifier.INFO, usage_info)
+
+ def _deallocate_network(self, context, instance):
+ if not FLAGS.stub_network:
+ msg = _("deallocating network for instance: %s")
+ LOG.debug(msg % instance['uuid'])
+ self.network_api.deallocate_for_instance(context, instance)
def _get_instance_volume_bdms(self, context, instance_id):
bdms = self.db.block_device_mapping_get_all_by_instance(context,
@@ -515,8 +506,8 @@ class ComputeManager(manager.SchedulerDependentManager):
return {'block_device_mapping': block_device_mapping}
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
- def run_instance(self, context, instance_id, **kwargs):
- self._run_instance(context, instance_id, **kwargs)
+ def run_instance(self, context, instance_uuid, **kwargs):
+ self._run_instance(context, instance_uuid, **kwargs)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock_uuid
diff --git a/nova/rpc/impl_fake.py b/nova/rpc/impl_fake.py
index 9a94be07e..4038c4961 100644
--- a/nova/rpc/impl_fake.py
+++ b/nova/rpc/impl_fake.py
@@ -69,7 +69,7 @@ class Consumer(object):
exc_info = sys.exc_info()
raise rpc_common.RemoteError(exc_info[0].__name__,
str(exc_info[1]),
- traceback.format_exception(*exc_info))
+ ''.join(traceback.format_exception(*exc_info)))
class Connection(object):
diff --git a/nova/scheduler/chance.py b/nova/scheduler/chance.py
index 7882a06c8..fe47e4f54 100644
--- a/nova/scheduler/chance.py
+++ b/nova/scheduler/chance.py
@@ -72,7 +72,7 @@ class ChanceScheduler(driver.Scheduler):
host = self._schedule(context, 'compute', request_spec, **kwargs)
instance = self.create_instance_db_entry(elevated, request_spec)
driver.cast_to_compute_host(context, host,
- 'run_instance', instance_id=instance['id'], **kwargs)
+ 'run_instance', instance_uuid=instance['uuid'], **kwargs)
instances.append(driver.encode_instance(instance))
return instances
diff --git a/nova/scheduler/distributed_scheduler.py b/nova/scheduler/distributed_scheduler.py
index 74b97f1fb..888594731 100644
--- a/nova/scheduler/distributed_scheduler.py
+++ b/nova/scheduler/distributed_scheduler.py
@@ -172,7 +172,7 @@ class DistributedScheduler(driver.Scheduler):
"""Create the requested resource in this Zone."""
instance = self.create_instance_db_entry(context, request_spec)
driver.cast_to_compute_host(context, weighted_host.host,
- 'run_instance', instance_id=instance['id'], **kwargs)
+ 'run_instance', instance_uuid=instance['uuid'], **kwargs)
return driver.encode_instance(instance, local=True)
def _make_weighted_host_from_blob(self, blob):
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index 5041c3346..8c7945200 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -59,10 +59,12 @@ def cast_to_compute_host(context, host, method, update_db=True, **kwargs):
"""Cast request to a compute host queue"""
if update_db:
+ # fall back on the id if the uuid is not present
instance_id = kwargs.get('instance_id', None)
- if instance_id is not None:
+ instance_uuid = kwargs.get('instance_uuid', instance_id)
+ if instance_uuid is not None:
now = utils.utcnow()
- db.instance_update(context, instance_id,
+ db.instance_update(context, instance_uuid,
{'host': host, 'scheduled_at': now})
rpc.cast(context,
db.queue_get_for(context, 'compute', host),
diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py
index 8f993d9df..050edf3f5 100644
--- a/nova/scheduler/simple.py
+++ b/nova/scheduler/simple.py
@@ -78,7 +78,7 @@ class SimpleScheduler(chance.ChanceScheduler):
instance_ref = self.create_instance_db_entry(context,
request_spec)
driver.cast_to_compute_host(context, host, 'run_instance',
- instance_id=instance_ref['id'], **_kwargs)
+ instance_uuid=instance_ref['uuid'], **_kwargs)
instances.append(driver.encode_instance(instance_ref))
return instances
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index c1ab919ea..9938b5dd9 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -110,8 +110,8 @@ def _fake_cast_to_volume_host(context, host, method, **kwargs):
def _fake_create_instance_db_entry(simple_self, context, request_spec):
instance = _create_instance_from_spec(request_spec)
- global instance_ids
- instance_ids.append((instance['id'], instance['uuid']))
+ global instance_uuids
+ instance_uuids.append(instance['uuid'])
return instance
@@ -313,8 +313,8 @@ class SimpleDriverTestCase(test.TestCase):
compute1.start()
_create_instance()
ctxt = context.RequestContext('fake', 'fake', False)
- global instance_ids
- instance_ids = []
+ global instance_uuids
+ instance_uuids = []
self.stubs.Set(SimpleScheduler,
'create_instance_db_entry', _fake_create_instance_db_entry)
self.stubs.Set(driver,
@@ -378,11 +378,11 @@ class SimpleDriverTestCase(test.TestCase):
FLAGS.compute_manager)
compute2.start()
- global instance_ids
- instance_ids = []
+ global instance_uuids
+ instance_uuids = []
instance = _create_instance()
- instance_ids.append((instance['id'], instance['uuid']))
- compute1.run_instance(self.context, instance_ids[0][0])
+ instance_uuids.append(instance['uuid'])
+ compute1.run_instance(self.context, instance_uuids[0])
self.stubs.Set(SimpleScheduler,
'create_instance_db_entry', _fake_create_instance_db_entry)
@@ -396,12 +396,12 @@ class SimpleDriverTestCase(test.TestCase):
self.context, request_spec)
self.assertEqual(_picked_host, 'host2')
- self.assertEqual(len(instance_ids), 2)
+ self.assertEqual(len(instance_uuids), 2)
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0].get('_is_precooked', False), False)
- compute1.terminate_instance(self.context, instance_ids[0][1])
- compute2.terminate_instance(self.context, instance_ids[1][1])
+ compute1.terminate_instance(self.context, instance_uuids[0])
+ compute2.terminate_instance(self.context, instance_uuids[1])
compute1.kill()
compute2.kill()
@@ -418,11 +418,11 @@ class SimpleDriverTestCase(test.TestCase):
FLAGS.compute_manager)
compute2.start()
- global instance_ids
- instance_ids = []
+ global instance_uuids
+ instance_uuids = []
instance = _create_instance()
- instance_ids.append((instance['id'], instance['uuid']))
- compute1.run_instance(self.context, instance_ids[0][0])
+ instance_uuids.append(instance['uuid'])
+ compute1.run_instance(self.context, instance_uuids[0])
self.stubs.Set(SimpleScheduler,
'create_instance_db_entry', _fake_create_instance_db_entry)
@@ -434,10 +434,10 @@ class SimpleDriverTestCase(test.TestCase):
request_spec = _create_request_spec(availability_zone='nova:host1')
self.scheduler.driver.schedule_run_instance(self.context, request_spec)
self.assertEqual(_picked_host, 'host1')
- self.assertEqual(len(instance_ids), 2)
+ self.assertEqual(len(instance_uuids), 2)
- compute1.terminate_instance(self.context, instance_ids[0][1])
- compute1.terminate_instance(self.context, instance_ids[1][1])
+ compute1.terminate_instance(self.context, instance_uuids[0])
+ compute1.terminate_instance(self.context, instance_uuids[1])
compute1.kill()
compute2.kill()
@@ -453,8 +453,8 @@ class SimpleDriverTestCase(test.TestCase):
past = now - delta
db.service_update(self.context, s1['id'], {'updated_at': past})
- global instance_ids
- instance_ids = []
+ global instance_uuids
+ instance_uuids = []
self.stubs.Set(SimpleScheduler,
'create_instance_db_entry', _fake_create_instance_db_entry)
global _picked_host
@@ -478,8 +478,8 @@ class SimpleDriverTestCase(test.TestCase):
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
db.service_update(self.context, s1['id'], {'disabled': True})
- global instance_ids
- instance_ids = []
+ global instance_uuids
+ instance_uuids = []
self.stubs.Set(SimpleScheduler,
'create_instance_db_entry', _fake_create_instance_db_entry)
global _picked_host
@@ -490,8 +490,8 @@ class SimpleDriverTestCase(test.TestCase):
request_spec = _create_request_spec(availability_zone='nova:host1')
self.scheduler.driver.schedule_run_instance(self.context, request_spec)
self.assertEqual(_picked_host, 'host1')
- self.assertEqual(len(instance_ids), 1)
- compute1.terminate_instance(self.context, instance_ids[0][1])
+ self.assertEqual(len(instance_uuids), 1)
+ compute1.terminate_instance(self.context, instance_uuids[0])
compute1.kill()
def test_specific_zone_gets_instance_no_queue(self):
@@ -509,11 +509,11 @@ class SimpleDriverTestCase(test.TestCase):
FLAGS.compute_manager)
compute2.start()
- global instance_ids
- instance_ids = []
+ global instance_uuids
+ instance_uuids = []
instance = _create_instance()
- instance_ids.append((instance['id'], instance['uuid']))
- compute1.run_instance(self.context, instance_ids[0][0])
+ instance_uuids.append(instance['uuid'])
+ compute1.run_instance(self.context, instance_uuids[0])
self.stubs.Set(SimpleScheduler,
'create_instance_db_entry', _fake_create_instance_db_entry)
@@ -525,10 +525,10 @@ class SimpleDriverTestCase(test.TestCase):
request_spec = _create_request_spec(availability_zone='zone1')
self.scheduler.driver.schedule_run_instance(self.context, request_spec)
self.assertEqual(_picked_host, 'host1')
- self.assertEqual(len(instance_ids), 2)
+ self.assertEqual(len(instance_uuids), 2)
- compute1.terminate_instance(self.context, instance_ids[0][1])
- compute1.terminate_instance(self.context, instance_ids[1][1])
+ compute1.terminate_instance(self.context, instance_uuids[0])
+ compute1.terminate_instance(self.context, instance_uuids[1])
compute1.kill()
compute2.kill()
@@ -578,23 +578,23 @@ class SimpleDriverTestCase(test.TestCase):
'compute',
FLAGS.compute_manager)
compute2.start()
- instance_ids1 = []
- instance_ids2 = []
+ instance_uuids1 = []
+ instance_uuids2 = []
for index in xrange(FLAGS.max_cores):
instance = _create_instance()
- compute1.run_instance(self.context, instance['id'])
- instance_ids1.append((instance['id'], instance['uuid']))
+ compute1.run_instance(self.context, instance['uuid'])
+ instance_uuids1.append(instance['uuid'])
instance = _create_instance()
- compute2.run_instance(self.context, instance['id'])
- instance_ids2.append((instance['id'], instance['uuid']))
+ compute2.run_instance(self.context, instance['uuid'])
+ instance_uuids2.append(instance['uuid'])
request_spec = _create_request_spec()
self.assertRaises(exception.NoValidHost,
self.scheduler.driver.schedule_run_instance,
self.context,
request_spec)
- for (_, instance_uuid) in instance_ids1:
+ for instance_uuid in instance_uuids1:
compute1.terminate_instance(self.context, instance_uuid)
- for (_, instance_uuid) in instance_ids2:
+ for instance_uuid in instance_uuids2:
compute2.terminate_instance(self.context, instance_uuid)
compute1.kill()
compute2.kill()
@@ -653,11 +653,11 @@ class SimpleDriverTestCase(test.TestCase):
compute1 = self.start_service('compute', host='host1')
compute2 = self.start_service('compute', host='host2')
- global instance_ids
- instance_ids = []
+ global instance_uuids
+ instance_uuids = []
instance = _create_instance()
- instance_ids.append((instance['id'], instance['uuid']))
- compute1.run_instance(self.context, instance_ids[0][0])
+ instance_uuids.append(instance['uuid'])
+ compute1.run_instance(self.context, instance_uuids[0])
self.stubs.Set(SimpleScheduler,
'create_instance_db_entry', _fake_create_instance_db_entry)
@@ -669,10 +669,10 @@ class SimpleDriverTestCase(test.TestCase):
request_spec = _create_request_spec()
self.scheduler.driver.schedule_run_instance(self.context, request_spec)
self.assertEqual(_picked_host, 'host2')
- self.assertEqual(len(instance_ids), 2)
+ self.assertEqual(len(instance_uuids), 2)
- compute1.terminate_instance(self.context, instance_ids[0][1])
- compute2.terminate_instance(self.context, instance_ids[1][1])
+ compute1.terminate_instance(self.context, instance_uuids[0])
+ compute2.terminate_instance(self.context, instance_uuids[1])
compute1.kill()
compute2.kill()
@@ -681,11 +681,11 @@ class SimpleDriverTestCase(test.TestCase):
compute1 = self.start_service('compute', host='host1')
compute2 = self.start_service('compute', host='host2')
- global instance_ids
- instance_ids = []
+ global instance_uuids
+ instance_uuids = []
instance = _create_instance()
- instance_ids.append((instance['id'], instance['uuid']))
- compute1.run_instance(self.context, instance_ids[0][0])
+ instance_uuids.append(instance['uuid'])
+ compute1.run_instance(self.context, instance_uuids[0])
self.stubs.Set(SimpleScheduler,
'create_instance_db_entry', _fake_create_instance_db_entry)
@@ -697,10 +697,10 @@ class SimpleDriverTestCase(test.TestCase):
request_spec = _create_request_spec(availability_zone='nova:host1')
self.scheduler.driver.schedule_run_instance(self.context, request_spec)
self.assertEqual(_picked_host, 'host1')
- self.assertEqual(len(instance_ids), 2)
+ self.assertEqual(len(instance_uuids), 2)
- compute1.terminate_instance(self.context, instance_ids[0][1])
- compute1.terminate_instance(self.context, instance_ids[1][1])
+ compute1.terminate_instance(self.context, instance_uuids[0])
+ compute1.terminate_instance(self.context, instance_uuids[1])
compute1.kill()
compute2.kill()
@@ -723,8 +723,8 @@ class SimpleDriverTestCase(test.TestCase):
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
db.service_update(self.context, s1['id'], {'disabled': True})
- global instance_ids
- instance_ids = []
+ global instance_uuids
+ instance_uuids = []
self.stubs.Set(SimpleScheduler,
'create_instance_db_entry', _fake_create_instance_db_entry)
global _picked_host
@@ -735,23 +735,23 @@ class SimpleDriverTestCase(test.TestCase):
request_spec = _create_request_spec(availability_zone='nova:host1')
self.scheduler.driver.schedule_run_instance(self.context, request_spec)
self.assertEqual(_picked_host, 'host1')
- self.assertEqual(len(instance_ids), 1)
- compute1.terminate_instance(self.context, instance_ids[0][1])
+ self.assertEqual(len(instance_uuids), 1)
+ compute1.terminate_instance(self.context, instance_uuids[0])
compute1.kill()
def test_too_many_cores(self):
"""Ensures we don't go over max cores"""
compute1 = self.start_service('compute', host='host1')
compute2 = self.start_service('compute', host='host2')
- instance_ids1 = []
- instance_ids2 = []
+ instance_uuids1 = []
+ instance_uuids2 = []
for index in xrange(FLAGS.max_cores):
instance = _create_instance()
- compute1.run_instance(self.context, instance['id'])
- instance_ids1.append((instance['id'], instance['uuid']))
+ compute1.run_instance(self.context, instance['uuid'])
+ instance_uuids1.append(instance['uuid'])
instance = _create_instance()
- compute2.run_instance(self.context, instance['id'])
- instance_ids2.append((instance['id'], instance['uuid']))
+ compute2.run_instance(self.context, instance['uuid'])
+ instance_uuids2.append(instance['uuid'])
def _create_instance_db_entry(simple_self, context, request_spec):
self.fail(_("Shouldn't try to create DB entry when at "
@@ -770,9 +770,9 @@ class SimpleDriverTestCase(test.TestCase):
self.scheduler.driver.schedule_run_instance,
self.context,
request_spec)
- for (_, instance_uuid) in instance_ids1:
+ for instance_uuid in instance_uuids1:
compute1.terminate_instance(self.context, instance_uuid)
- for (_, instance_uuid) in instance_ids2:
+ for instance_uuid in instance_uuids2:
compute2.terminate_instance(self.context, instance_uuid)
compute1.kill()
compute2.kill()
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index 0b1879ed1..a193c7e49 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -171,36 +171,38 @@ class ComputeTestCase(BaseTestCase):
def test_create_instance_with_img_ref_associates_config_drive(self):
"""Make sure create associates a config drive."""
- instance_id = self._create_instance(params={'config_drive': '1234', })
+ instance = self._create_fake_instance(
+ params={'config_drive': '1234', })
try:
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, instance['uuid'])
instances = db.instance_get_all(context.get_admin_context())
instance = instances[0]
self.assertTrue(instance.config_drive)
finally:
- db.instance_destroy(self.context, instance_id)
+ db.instance_destroy(self.context, instance['id'])
def test_create_instance_associates_config_drive(self):
"""Make sure create associates a config drive."""
- instance_id = self._create_instance(params={'config_drive': True, })
+ instance = self._create_fake_instance(
+ params={'config_drive': '1234', })
try:
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, instance['uuid'])
instances = db.instance_get_all(context.get_admin_context())
instance = instances[0]
self.assertTrue(instance.config_drive)
finally:
- db.instance_destroy(self.context, instance_id)
+ db.instance_destroy(self.context, instance['id'])
def test_run_terminate(self):
"""Make sure it is possible to run and terminate instance"""
instance = self._create_fake_instance()
- self.compute.run_instance(self.context, instance['id'])
+ self.compute.run_instance(self.context, instance['uuid'])
instances = db.instance_get_all(context.get_admin_context())
LOG.info(_("Running instances: %s"), instances)
@@ -219,7 +221,7 @@ class ComputeTestCase(BaseTestCase):
self.assertEqual(instance['launched_at'], None)
self.assertEqual(instance['deleted_at'], None)
launch = utils.utcnow()
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, instance['uuid'])
instance = db.instance_get(self.context, instance_id)
self.assert_(instance['launched_at'] > launch)
self.assertEqual(instance['deleted_at'], None)
@@ -235,7 +237,7 @@ class ComputeTestCase(BaseTestCase):
instance = self._create_fake_instance()
instance_id = instance['id']
instance_uuid = instance['uuid']
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, instance_uuid)
self.compute.stop_instance(self.context, instance_uuid)
self.compute.terminate_instance(self.context, instance_uuid)
@@ -244,7 +246,7 @@ class ComputeTestCase(BaseTestCase):
instance = self._create_fake_instance()
instance_id = instance['id']
instance_uuid = instance['uuid']
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, instance_uuid)
self.compute.stop_instance(self.context, instance_uuid)
self.compute.start_instance(self.context, instance_uuid)
self.compute.terminate_instance(self.context, instance_uuid)
@@ -269,7 +271,7 @@ class ComputeTestCase(BaseTestCase):
instance = self._create_fake_instance()
instance_id = instance['id']
instance_uuid = instance['uuid']
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, instance_uuid)
self.compute.rescue_instance(self.context, instance_uuid)
self.assertTrue(called['rescued'])
self.compute.unrescue_instance(self.context, instance_uuid)
@@ -290,7 +292,7 @@ class ComputeTestCase(BaseTestCase):
instance = self._create_fake_instance()
instance_id = instance['id']
instance_uuid = instance['uuid']
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, instance_uuid)
self.compute.power_on_instance(self.context, instance_uuid)
self.assertTrue(called['power_on'])
self.compute.terminate_instance(self.context, instance_uuid)
@@ -309,7 +311,7 @@ class ComputeTestCase(BaseTestCase):
instance = self._create_fake_instance()
instance_id = instance['id']
instance_uuid = instance['uuid']
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, instance_uuid)
self.compute.power_off_instance(self.context, instance_uuid)
self.assertTrue(called['power_off'])
self.compute.terminate_instance(self.context, instance_uuid)
@@ -319,7 +321,7 @@ class ComputeTestCase(BaseTestCase):
instance = self._create_fake_instance()
instance_id = instance['id']
instance_uuid = instance['uuid']
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, instance_uuid)
self.compute.pause_instance(self.context, instance_uuid)
self.compute.unpause_instance(self.context, instance_uuid)
self.compute.terminate_instance(self.context, instance_uuid)
@@ -329,7 +331,7 @@ class ComputeTestCase(BaseTestCase):
instance = self._create_fake_instance()
instance_id = instance['id']
instance_uuid = instance['uuid']
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, instance_uuid)
self.compute.suspend_instance(self.context, instance_uuid)
self.compute.resume_instance(self.context, instance_uuid)
self.compute.terminate_instance(self.context, instance_uuid)
@@ -339,7 +341,7 @@ class ComputeTestCase(BaseTestCase):
instance = self._create_fake_instance()
instance_uuid = instance['uuid']
- self.compute.run_instance(self.context, instance['id'])
+ self.compute.run_instance(self.context, instance_uuid)
self.compute.rebuild_instance(self.context, instance_uuid)
self.compute.terminate_instance(self.context, instance_uuid)
@@ -349,7 +351,7 @@ class ComputeTestCase(BaseTestCase):
instance_id = instance['id']
instance_uuid = instance['uuid']
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, instance_uuid)
db.instance_update(self.context, instance_id,
{'task_state': task_states.REBOOTING})
@@ -368,7 +370,7 @@ class ComputeTestCase(BaseTestCase):
instance_id = instance['id']
instance_uuid = instance['uuid']
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, instance_uuid)
db.instance_update(self.context, instance_id,
{'task_state': task_states.REBOOTING_HARD})
@@ -386,7 +388,7 @@ class ComputeTestCase(BaseTestCase):
instance = self._create_fake_instance()
instance_id = instance['id']
instance_uuid = instance['uuid']
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, instance_uuid)
db.instance_update(self.context, instance_id,
{'task_state': task_states.UPDATING_PASSWORD})
@@ -415,7 +417,7 @@ class ComputeTestCase(BaseTestCase):
fake_driver_inject_file)
instance = self._create_fake_instance()
- self.compute.run_instance(self.context, instance['id'])
+ self.compute.run_instance(self.context, instance['uuid'])
self.compute.inject_file(self.context, instance['uuid'], "/tmp/test",
"File Contents")
self.assertTrue(called['inject'])
@@ -434,7 +436,7 @@ class ComputeTestCase(BaseTestCase):
instance = self._create_fake_instance()
instance_id = instance['id']
instance_uuid = instance['uuid']
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, instance_uuid)
self.compute.inject_network_info(self.context, instance_uuid)
self.assertTrue(called['inject'])
self.compute.terminate_instance(self.context, instance_uuid)
@@ -452,7 +454,7 @@ class ComputeTestCase(BaseTestCase):
instance = self._create_fake_instance()
instance_id = instance['id']
instance_uuid = instance['uuid']
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, instance_uuid)
self.compute.reset_network(self.context, instance_uuid)
self.assertTrue(called['reset'])
self.compute.terminate_instance(self.context, instance_uuid)
@@ -470,7 +472,7 @@ class ComputeTestCase(BaseTestCase):
fake_driver_agent_update)
instance = self._create_fake_instance()
- self.compute.run_instance(self.context, instance['id'])
+ self.compute.run_instance(self.context, instance['uuid'])
self.compute.agent_update(self.context, instance['uuid'],
'http://fake/url/', 'fakehash')
self.assertTrue(called['agent_update'])
@@ -482,14 +484,14 @@ class ComputeTestCase(BaseTestCase):
instance_id = instance['id']
instance_uuid = instance['uuid']
name = "myfakesnapshot"
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, instance_uuid)
self.compute.snapshot_instance(self.context, instance_uuid, name)
self.compute.terminate_instance(self.context, instance_uuid)
def test_console_output(self):
"""Make sure we can get console output from instance"""
instance = self._create_fake_instance()
- self.compute.run_instance(self.context, instance['id'])
+ self.compute.run_instance(self.context, instance['uuid'])
console = self.compute.get_console_output(self.context,
instance['uuid'])
@@ -499,7 +501,7 @@ class ComputeTestCase(BaseTestCase):
def test_ajax_console(self):
"""Make sure we can get console output from instance"""
instance = self._create_fake_instance()
- self.compute.run_instance(self.context, instance['id'])
+ self.compute.run_instance(self.context, instance['uuid'])
console = self.compute.get_ajax_console(self.context,
instance['uuid'])
@@ -509,7 +511,7 @@ class ComputeTestCase(BaseTestCase):
def test_vnc_console(self):
"""Make sure we can a vnc console for an instance."""
instance = self._create_fake_instance()
- self.compute.run_instance(self.context, instance['id'])
+ self.compute.run_instance(self.context, instance['uuid'])
console = self.compute.get_vnc_console(self.context, instance['uuid'])
self.assert_(console)
@@ -518,7 +520,7 @@ class ComputeTestCase(BaseTestCase):
def test_diagnostics(self):
"""Make sure we can get diagnostics for an instance."""
instance = self._create_fake_instance()
- self.compute.run_instance(self.context, instance['id'])
+ self.compute.run_instance(self.context, instance['uuid'])
diagnostics = self.compute.get_diagnostics(self.context,
instance['uuid'])
@@ -569,8 +571,9 @@ class ComputeTestCase(BaseTestCase):
def test_run_instance_usage_notification(self):
"""Ensure run instance generates apropriate usage notification"""
- instance_id = self._create_instance()
- self.compute.run_instance(self.context, instance_id)
+ inst_ref = self._create_fake_instance()
+ instance_id = inst_ref['id']
+ self.compute.run_instance(self.context, inst_ref['uuid'])
self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
inst_ref = db.instance_get(self.context, instance_id)
msg = test_notifier.NOTIFICATIONS[0]
@@ -596,7 +599,7 @@ class ComputeTestCase(BaseTestCase):
"""Ensure terminate_instance generates apropriate usage notification"""
instance_id = self._create_instance()
inst_ref = db.instance_get(self.context, instance_id)
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, inst_ref['uuid'])
test_notifier.NOTIFICATIONS = []
self.compute.terminate_instance(self.context, inst_ref['uuid'])
@@ -624,17 +627,18 @@ class ComputeTestCase(BaseTestCase):
def test_run_instance_existing(self):
"""Ensure failure when running an instance that already exists"""
instance = self._create_fake_instance()
- instance_id = instance['id']
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, instance['uuid'])
self.assertRaises(exception.Error,
self.compute.run_instance,
self.context,
- instance_id)
+ instance['uuid'])
self.compute.terminate_instance(self.context, instance['uuid'])
def test_instance_set_to_error_on_uncaught_exception(self):
"""Test that instance is set to error state when exception is raised"""
- instance_id = self._create_instance()
+ instance = self._create_fake_instance()
+ instance_id = instance['id']
+ instance_uuid = instance['uuid']
self.mox.StubOutWithMock(self.compute.network_api,
"allocate_for_instance")
@@ -651,7 +655,7 @@ class ComputeTestCase(BaseTestCase):
self.assertRaises(quantum_client.QuantumServerException,
self.compute.run_instance,
self.context,
- instance_id)
+ instance_uuid)
instance = db.instance_get(context.get_admin_context(), instance_id)
self.assertEqual(vm_states.ERROR, instance['vm_state'])
@@ -660,8 +664,7 @@ class ComputeTestCase(BaseTestCase):
def test_network_is_deallocated_on_spawn_failure(self):
"""When a spawn fails the network must be deallocated"""
- instance_id = self._create_instance()
- instance = db.instance_get(self.context, instance_id)
+ instance = self._create_fake_instance()
self.mox.StubOutWithMock(self.compute, "_setup_block_device_mapping")
self.compute._setup_block_device_mapping(mox.IgnoreArg(),
@@ -673,7 +676,7 @@ class ComputeTestCase(BaseTestCase):
self.assertRaises(rpc.common.RemoteError,
self.compute.run_instance,
self.context,
- instance_id)
+ instance['uuid'])
self.compute.terminate_instance(self.context, instance['uuid'])
@@ -682,7 +685,7 @@ class ComputeTestCase(BaseTestCase):
instance = self._create_fake_instance()
instance_id = instance['id']
instance_uuid = instance['uuid']
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, instance_uuid)
non_admin_context = context.RequestContext(None, None, False, False)
@@ -730,7 +733,7 @@ class ComputeTestCase(BaseTestCase):
context = self.context.elevated()
inst_ref = db.instance_get(context, instance_id)
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, inst_ref['uuid'])
test_notifier.NOTIFICATIONS = []
db.instance_update(self.context, instance_id, {'host': 'foo'})
@@ -763,7 +766,7 @@ class ComputeTestCase(BaseTestCase):
context = self.context.elevated()
inst_ref = db.instance_get(context, instance_id)
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, inst_ref['uuid'])
db.instance_update(self.context, inst_ref['uuid'],
{'host': 'foo'})
self.compute.prep_resize(context, inst_ref['uuid'], 1)
@@ -776,7 +779,9 @@ class ComputeTestCase(BaseTestCase):
def test_finish_revert_resize(self):
"""Ensure that the flavor is reverted to the original on revert"""
context = self.context.elevated()
- instance_id = self._create_instance()
+ instance = self._create_fake_instance()
+ instance_id = instance['id']
+ instance_uuid = instance['uuid']
def fake(*args, **kwargs):
pass
@@ -785,7 +790,7 @@ class ComputeTestCase(BaseTestCase):
self.stubs.Set(self.compute.driver, 'finish_revert_migration', fake)
self.stubs.Set(self.compute.network_api, 'get_instance_nw_info', fake)
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, instance_uuid)
# Confirm the instance size before the resize starts
inst_ref = db.instance_get(context, instance_id)
@@ -833,12 +838,12 @@ class ComputeTestCase(BaseTestCase):
def test_resize_same_source_fails(self):
"""Ensure instance fails to migrate when source and destination are
the same host"""
- instance_id = self._create_instance()
- self.compute.run_instance(self.context, instance_id)
- inst_ref = db.instance_get(self.context, instance_id)
+ instance = self._create_fake_instance()
+ self.compute.run_instance(self.context, instance['uuid'])
+ instance = db.instance_get(self.context, instance['id'])
self.assertRaises(exception.Error, self.compute.prep_resize,
- self.context, inst_ref['uuid'], 1)
- self.compute.terminate_instance(self.context, inst_ref['uuid'])
+ self.context, instance['uuid'], 1)
+ self.compute.terminate_instance(self.context, instance['uuid'])
def test_resize_instance_handles_migration_error(self):
"""Ensure vm_state is ERROR when MigrationError occurs"""
@@ -852,7 +857,7 @@ class ComputeTestCase(BaseTestCase):
context = self.context.elevated()
inst_ref = db.instance_get(context, instance_id)
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, inst_ref['uuid'])
db.instance_update(self.context, inst_ref['uuid'], {'host': 'foo'})
self.compute.prep_resize(context, inst_ref['uuid'], 1)
migration_ref = db.migration_get_by_instance_and_status(context,
@@ -1028,9 +1033,9 @@ class ComputeTestCase(BaseTestCase):
self.stubs.Set(compute_manager.ComputeManager,
'_report_driver_status', nop_report_driver_status)
- instance_id = self._create_instance()
+ instance = self._create_fake_instance()
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, instance['uuid'])
instances = db.instance_get_all(context.get_admin_context())
LOG.info(_("Running instances: %s"), instances)
@@ -1225,10 +1230,10 @@ class ComputeAPITestCase(BaseTestCase):
def test_start(self):
instance = self._create_fake_instance()
instance_id = instance['id']
+ instance_uuid = instance['uuid']
+ self.compute.run_instance(self.context, instance_uuid)
- self.compute.run_instance(self.context, instance_id)
-
- self.compute.stop_instance(self.context, instance['uuid'])
+ self.compute.stop_instance(self.context, instance_uuid)
instance = db.instance_get(self.context, instance_id)
self.assertEqual(instance['task_state'], None)
@@ -1241,8 +1246,10 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance_id)
def test_stop(self):
- instance_id = self._create_instance()
- self.compute.run_instance(self.context, instance_id)
+ instance = self._create_fake_instance()
+ instance_id = instance['id']
+ instance_uuid = instance['uuid']
+ self.compute.run_instance(self.context, instance_uuid)
instance = db.instance_get(self.context, instance_id)
self.assertEqual(instance['task_state'], None)
@@ -1255,8 +1262,10 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance_id)
def test_delete(self):
- instance_id = self._create_instance()
- self.compute.run_instance(self.context, instance_id)
+ instance = self._create_fake_instance()
+ instance_id = instance['id']
+ instance_uuid = instance['uuid']
+ self.compute.run_instance(self.context, instance_uuid)
instance = db.instance_get(self.context, instance_id)
self.assertEqual(instance['task_state'], None)
@@ -1269,8 +1278,9 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance_id)
def test_delete_soft(self):
- instance_id = self._create_instance()
- self.compute.run_instance(self.context, instance_id)
+ instance = self._create_fake_instance()
+ instance_id = instance['id']
+ self.compute.run_instance(self.context, instance['uuid'])
instance = db.instance_get(self.context, instance_id)
self.assertEqual(instance['task_state'], None)
@@ -1284,8 +1294,9 @@ class ComputeAPITestCase(BaseTestCase):
def test_force_delete(self):
"""Ensure instance can be deleted after a soft delete"""
- instance_id = self._create_instance()
- self.compute.run_instance(self.context, instance_id)
+ instance = self._create_fake_instance()
+ instance_id = instance['id']
+ self.compute.run_instance(self.context, instance['uuid'])
instance = db.instance_get(self.context, instance_id)
self.compute_api.soft_delete(self.context, instance)
@@ -1303,7 +1314,7 @@ class ComputeAPITestCase(BaseTestCase):
instance = self._create_fake_instance()
instance_id = instance['id']
instance_uuid = instance['uuid']
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, instance_uuid)
self.assertEqual(instance['task_state'], None)
@@ -1318,7 +1329,7 @@ class ComputeAPITestCase(BaseTestCase):
"""Ensure instance can be resumed"""
instance = self._create_fake_instance()
instance_id = instance['id']
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, instance['uuid'])
self.assertEqual(instance['task_state'], None)
@@ -1333,7 +1344,7 @@ class ComputeAPITestCase(BaseTestCase):
"""Ensure instance can be paused"""
instance = self._create_fake_instance()
instance_id = instance['id']
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, instance['uuid'])
self.assertEqual(instance['task_state'], None)
@@ -1349,7 +1360,7 @@ class ComputeAPITestCase(BaseTestCase):
instance = self._create_fake_instance()
instance_id = instance['id']
instance_uuid = instance['uuid']
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, instance_uuid)
self.assertEqual(instance['task_state'], None)
@@ -1364,8 +1375,10 @@ class ComputeAPITestCase(BaseTestCase):
def test_restore(self):
"""Ensure instance can be restored from a soft delete"""
- instance_id = self._create_instance()
- self.compute.run_instance(self.context, instance_id)
+ instance = self._create_fake_instance()
+ instance_id = instance['id']
+ instance_uuid = instance['uuid']
+ self.compute.run_instance(self.context, instance_uuid)
instance = db.instance_get(self.context, instance_id)
self.compute_api.soft_delete(self.context, instance)
@@ -1383,7 +1396,8 @@ class ComputeAPITestCase(BaseTestCase):
def test_rebuild(self):
inst_ref = self._create_fake_instance()
instance_id = inst_ref['id']
- self.compute.run_instance(self.context, instance_id)
+ instance_uuid = inst_ref['uuid']
+ self.compute.run_instance(self.context, instance_uuid)
instance = db.instance_get(self.context, instance_id)
self.assertEqual(instance['task_state'], None)
@@ -1401,7 +1415,8 @@ class ComputeAPITestCase(BaseTestCase):
"""Ensure instance can be soft rebooted"""
inst_ref = self._create_fake_instance()
instance_id = inst_ref['id']
- self.compute.run_instance(self.context, instance_id)
+ instance_uuid = inst_ref['uuid']
+ self.compute.run_instance(self.context, instance_uuid)
inst_ref = db.instance_get(self.context, instance_id)
self.assertEqual(inst_ref['task_state'], None)
@@ -1418,7 +1433,8 @@ class ComputeAPITestCase(BaseTestCase):
"""Ensure instance can be hard rebooted"""
inst_ref = self._create_fake_instance()
instance_id = inst_ref['id']
- self.compute.run_instance(self.context, instance_id)
+ instance_uuid = inst_ref['uuid']
+ self.compute.run_instance(self.context, instance_uuid)
inst_ref = db.instance_get(self.context, instance_id)
self.assertEqual(inst_ref['task_state'], None)
@@ -1443,8 +1459,10 @@ class ComputeAPITestCase(BaseTestCase):
def test_set_admin_password(self):
"""Ensure instance can have its admin password set"""
- instance_id = self._create_instance()
- self.compute.run_instance(self.context, instance_id)
+ instance = self._create_fake_instance()
+ instance_id = instance['id']
+ instance_uuid = instance['uuid']
+ self.compute.run_instance(self.context, instance_uuid)
inst_ref = db.instance_get(self.context, instance_id)
self.assertEqual(inst_ref['vm_state'], vm_states.ACTIVE)
@@ -1462,7 +1480,7 @@ class ComputeAPITestCase(BaseTestCase):
instance = self._create_fake_instance()
instance_id = instance['id']
instance_uuid = instance['uuid']
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, instance_uuid)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['vm_state'], vm_states.ACTIVE)
@@ -1536,7 +1554,7 @@ class ComputeAPITestCase(BaseTestCase):
instance_id = self._create_instance()
context = self.context.elevated()
instance = db.instance_get(context, instance_id)
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, instance['uuid'])
self.compute_api.resize(context, instance, '4')
# create a fake migration record (manager does this)
@@ -1552,7 +1570,7 @@ class ComputeAPITestCase(BaseTestCase):
instance_id = self._create_instance()
context = self.context.elevated()
instance = db.instance_get(context, instance_id)
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, instance['uuid'])
self.compute_api.resize(context, instance, '4')
@@ -1569,7 +1587,7 @@ class ComputeAPITestCase(BaseTestCase):
instance_id = self._create_instance()
context = self.context.elevated()
instance = db.instance_get(context, instance_id)
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, instance['uuid'])
self.assertRaises(exception.NotFound, self.compute_api.resize,
context, instance, 200)
@@ -1578,10 +1596,11 @@ class ComputeAPITestCase(BaseTestCase):
def test_resize_down_fails(self):
"""Ensure resizing down raises and fails"""
- context = self.context.elevated()
instance_id = self._create_instance()
+ context = self.context.elevated()
+ instance = db.instance_get(context, instance_id)
+ self.compute.run_instance(self.context, instance['uuid'])
- self.compute.run_instance(self.context, instance_id)
inst_type = instance_types.get_instance_type_by_name('m1.xlarge')
db.instance_update(self.context, instance_id,
{'instance_type_id': inst_type['id']})
@@ -1598,7 +1617,7 @@ class ComputeAPITestCase(BaseTestCase):
instance_id = self._create_instance()
instance = db.instance_get(context, instance_id)
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, instance['uuid'])
self.assertRaises(exception.CannotResizeToSameSize,
self.compute_api.resize, context, instance, 1)
@@ -1609,7 +1628,7 @@ class ComputeAPITestCase(BaseTestCase):
context = self.context.elevated()
instance_id = self._create_instance()
instance = db.instance_get(context, instance_id)
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, instance['uuid'])
# Migrate simply calls resize() without a flavor_id.
self.compute_api.resize(context, instance, None)
self.compute.terminate_instance(context, instance['uuid'])
@@ -1626,7 +1645,7 @@ class ComputeAPITestCase(BaseTestCase):
context = self.context.elevated()
instance_id = self._create_instance(dict(host='host2'))
instance = db.instance_get(context, instance_id)
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, instance['uuid'])
try:
self.compute_api.resize(context, instance, None)
finally:
@@ -1645,7 +1664,7 @@ class ComputeAPITestCase(BaseTestCase):
context = self.context.elevated()
instance_id = self._create_instance(dict(host='host2'))
instance = db.instance_get(context, instance_id)
- self.compute.run_instance(self.context, instance_id)
+ self.compute.run_instance(self.context, instance['uuid'])
try:
self.compute_api.resize(context, instance, None)
finally:
@@ -2303,14 +2322,14 @@ class ComputeAPITestCase(BaseTestCase):
def test_inject_network_info(self):
instance = self._create_fake_instance()
- self.compute.run_instance(self.context, instance['id'])
+ self.compute.run_instance(self.context, instance['uuid'])
instance = self.compute_api.get(self.context, instance['uuid'])
self.compute_api.inject_network_info(self.context, instance)
self.compute_api.delete(self.context, instance)
def test_reset_network(self):
instance = self._create_fake_instance()
- self.compute.run_instance(self.context, instance['id'])
+ self.compute.run_instance(self.context, instance['uuid'])
instance = self.compute_api.get(self.context, instance['uuid'])
self.compute_api.reset_network(self.context, instance)
@@ -2331,9 +2350,9 @@ class ComputeAPITestCase(BaseTestCase):
self.assertTrue(self.compute_api.get_lock(self.context, instance))
def test_add_remove_security_group(self):
- instance_id = self._create_instance()
- self.compute.run_instance(self.context, instance_id)
- instance = self.compute_api.get(self.context, instance_id)
+ instance = self._create_fake_instance()
+ self.compute.run_instance(self.context, instance['uuid'])
+ instance = self.compute_api.get(self.context, instance['id'])
security_group_name = self._create_group()['name']
self.compute_api.add_security_group(self.context,
instance,