summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJenkins <jenkins@review.openstack.org>2012-07-26 03:10:26 +0000
committerGerrit Code Review <review@openstack.org>2012-07-26 03:10:26 +0000
commit965a5a0e059e8a6879cf7a6b167ad112bea159b9 (patch)
tree6d44cbeede51f6738dea4ed7eb1e6185587c3f76
parentd19c6db90a625333874eb534b49e9894e57453a8 (diff)
parent8dac75fa0b6bb16adfdca2198703031249722f3d (diff)
downloadnova-965a5a0e059e8a6879cf7a6b167ad112bea159b9.tar.gz
nova-965a5a0e059e8a6879cf7a6b167ad112bea159b9.tar.xz
nova-965a5a0e059e8a6879cf7a6b167ad112bea159b9.zip
Merge changes I85ad29e8,I21d91e6c,I114081fc,If881fb41,Ia7e31428,I15ceb762,Id2c455f6,I45914ed5
* changes: Send a full instance via rpc for attach_volume. Send a full instance via rpc for add_fixed_ip_to_instance. Send a full instance via rpc for get_console_output. Send a full instance via rpc for suspend_instance. Send a full instance via rpc for (un)pause_instance. Don't use rpc to lock/unlock an instance. Convert reboot_instance to take a full instance. Include name in a primitive Instance.
-rw-r--r--nova/compute/api.py19
-rw-r--r--nova/compute/manager.py117
-rw-r--r--nova/compute/rpcapi.py62
-rw-r--r--nova/db/sqlalchemy/models.py20
-rw-r--r--nova/tests/compute/test_compute.py48
-rw-r--r--nova/tests/compute/test_rpcapi.py48
6 files changed, 193 insertions, 121 deletions
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 7cdb8d167..0571c7313 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -130,6 +130,15 @@ class API(base.Base):
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
super(API, self).__init__(**kwargs)
+ def _instance_update(self, context, instance_uuid, **kwargs):
+ """Update an instance in the database using kwargs as value."""
+
+ (old_ref, instance_ref) = self.db.instance_update_and_get_original(
+ context, instance_uuid, kwargs)
+ notifications.send_update(context, old_ref, instance_ref)
+
+ return instance_ref
+
def _check_injected_file_quota(self, context, injected_files):
"""Enforce quota limits on injected files.
@@ -1531,12 +1540,18 @@ class API(base.Base):
@wrap_check_policy
def lock(self, context, instance):
"""Lock the given instance."""
- self.compute_rpcapi.lock_instance(context, instance=instance)
+ context = context.elevated()
+ instance_uuid = instance['uuid']
+ LOG.debug(_('Locking'), context=context, instance_uuid=instance_uuid)
+ self._instance_update(context, instance_uuid, locked=True)
@wrap_check_policy
def unlock(self, context, instance):
"""Unlock the given instance."""
- self.compute_rpcapi.unlock_instance(context, instance=instance)
+ context = context.elevated()
+ instance_uuid = instance['uuid']
+ LOG.debug(_('Unlocking'), context=context, instance_uuid=instance_uuid)
+ self._instance_update(context, instance_uuid, locked=False)
@wrap_check_policy
def get_lock(self, context, instance):
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 3e66bd20f..afe8cae51 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -297,7 +297,7 @@ def _get_additional_capabilities():
class ComputeManager(manager.SchedulerDependentManager):
"""Manages the running instances from creation to destruction."""
- RPC_API_VERSION = '1.3'
+ RPC_API_VERSION = '1.9'
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
@@ -1090,18 +1090,19 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
@wrap_instance_fault
- def reboot_instance(self, context, instance_uuid, reboot_type="SOFT"):
+ def reboot_instance(self, context, instance=None, instance_uuid=None,
+ reboot_type="SOFT"):
"""Reboot an instance on this host."""
LOG.audit(_("Rebooting instance"), context=context,
instance_uuid=instance_uuid)
context = context.elevated()
- instance = self.db.instance_get_by_uuid(context, instance_uuid)
+ if not instance:
+ instance = self.db.instance_get_by_uuid(context, instance_uuid)
self._notify_about_instance_usage(context, instance, "reboot.start")
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context,
- instance_uuid,
+ self._instance_update(context, instance['uuid'],
power_state=current_power_state,
vm_state=vm_states.ACTIVE)
@@ -1125,8 +1126,7 @@ class ComputeManager(manager.SchedulerDependentManager):
# Fall through and reset task_state to None
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context,
- instance_uuid,
+ self._instance_update(context, instance['uuid'],
power_state=current_power_state,
vm_state=vm_states.ACTIVE,
task_state=None)
@@ -1666,26 +1666,28 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
@wrap_instance_fault
- def add_fixed_ip_to_instance(self, context, instance_uuid, network_id):
+ def add_fixed_ip_to_instance(self, context, network_id, instance=None,
+ instance_uuid=None):
"""Calls network_api to add new fixed_ip to instance
then injects the new network info and resets instance networking.
"""
- instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
+ if not instance:
+ instance = self.db.instance_get_by_uuid(context, instance_uuid)
self._notify_about_instance_usage(
- context, instance_ref, "create_ip.start")
+ context, instance, "create_ip.start")
- instance_id = instance_ref['id']
+ instance_id = instance['id']
self.network_api.add_fixed_ip_to_instance(context,
- instance_ref,
+ instance,
network_id)
network_info = self.inject_network_info(context,
- instance_ref['uuid'])
- self.reset_network(context, instance_ref['uuid'])
+ instance['uuid'])
+ self.reset_network(context, instance['uuid'])
self._notify_about_instance_usage(
- context, instance_ref, "create_ip.end", network_info=network_info)
+ context, instance, "create_ip.end", network_info=network_info)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
@@ -1713,17 +1715,18 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
@wrap_instance_fault
- def pause_instance(self, context, instance_uuid):
+ def pause_instance(self, context, instance=None, instance_uuid=None):
"""Pause an instance on this host."""
context = context.elevated()
- instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
+ if not instance:
+ instance = self.db.instance_get_by_uuid(context, instance_uuid)
- LOG.audit(_('Pausing'), context=context, instance=instance_ref)
- self.driver.pause(instance_ref)
+ LOG.audit(_('Pausing'), context=context, instance=instance)
+ self.driver.pause(instance)
- current_power_state = self._get_power_state(context, instance_ref)
+ current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
- instance_ref['uuid'],
+ instance['uuid'],
power_state=current_power_state,
vm_state=vm_states.PAUSED,
task_state=None)
@@ -1731,17 +1734,18 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
@wrap_instance_fault
- def unpause_instance(self, context, instance_uuid):
+ def unpause_instance(self, context, instance=None, instance_uuid=None):
"""Unpause a paused instance on this host."""
context = context.elevated()
- instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
+ if not instance:
+ instance = self.db.instance_get_by_uuid(context, instance_uuid)
- LOG.audit(_('Unpausing'), context=context, instance=instance_ref)
- self.driver.unpause(instance_ref)
+ LOG.audit(_('Unpausing'), context=context, instance=instance)
+ self.driver.unpause(instance)
- current_power_state = self._get_power_state(context, instance_ref)
+ current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
- instance_ref['uuid'],
+ instance['uuid'],
power_state=current_power_state,
vm_state=vm_states.ACTIVE,
task_state=None)
@@ -1781,22 +1785,23 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
@wrap_instance_fault
- def suspend_instance(self, context, instance_uuid):
+ def suspend_instance(self, context, instance=None, instance_uuid=None):
"""Suspend the given instance."""
context = context.elevated()
- instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
+ if not instance:
+ instance = self.db.instance_get_by_uuid(context, instance_uuid)
- LOG.audit(_('Suspending'), context=context, instance=instance_ref)
- self.driver.suspend(instance_ref)
+ LOG.audit(_('Suspending'), context=context, instance=instance)
+ self.driver.suspend(instance)
- current_power_state = self._get_power_state(context, instance_ref)
+ current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
- instance_ref['uuid'],
+ instance['uuid'],
power_state=current_power_state,
vm_state=vm_states.SUSPENDED,
task_state=None)
- self._notify_about_instance_usage(context, instance_ref, 'suspend')
+ self._notify_about_instance_usage(context, instance, 'suspend')
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
@@ -1821,7 +1826,12 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@wrap_instance_fault
def lock_instance(self, context, instance_uuid):
- """Lock the given instance."""
+ """Lock the given instance.
+
+ This isn't actually used in the current code. The same thing is now
+ done directly in nova.compute.api. This must stay here for backwards
+ compatibility of the rpc API.
+ """
context = context.elevated()
LOG.debug(_('Locking'), context=context, instance_uuid=instance_uuid)
@@ -1830,7 +1840,12 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@wrap_instance_fault
def unlock_instance(self, context, instance_uuid):
- """Unlock the given instance."""
+ """Unlock the given instance.
+
+ This isn't actually used in the current code. The same thing is now
+ done directly in nova.compute.api. This must stay here for backwards
+ compatibility of the rpc API.
+ """
context = context.elevated()
LOG.debug(_('Unlocking'), context=context, instance_uuid=instance_uuid)
@@ -1872,14 +1887,16 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@wrap_instance_fault
- def get_console_output(self, context, instance_uuid, tail_length=None):
+ def get_console_output(self, context, instance=None, instance_uuid=None,
+ tail_length=None):
"""Send the console output for the given instance."""
context = context.elevated()
- instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
+ if not instance:
+ instance = self.db.instance_get_by_uuid(context, instance_uuid)
LOG.audit(_("Get console output"), context=context,
- instance=instance_ref)
- output = self.driver.get_console_output(instance_ref)
+ instance=instance)
+ output = self.driver.get_console_output(instance)
if tail_length is not None:
output = self._tail_log(output, tail_length)
@@ -1944,15 +1961,17 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
@wrap_instance_fault
- def attach_volume(self, context, instance_uuid, volume_id, mountpoint):
+ def attach_volume(self, context, volume_id, mountpoint, instance_uuid=None,
+ instance=None):
"""Attach a volume to an instance."""
volume = self.volume_api.get(context, volume_id)
context = context.elevated()
- instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
+ if not instance:
+ instance = self.db.instance_get_by_uuid(context, instance_uuid)
LOG.audit(_('Attaching volume %(volume_id)s to %(mountpoint)s'),
- locals(), context=context, instance=instance_ref)
+ locals(), context=context, instance=instance)
try:
- connector = self.driver.get_volume_connector(instance_ref)
+ connector = self.driver.get_volume_connector(instance)
connection_info = self.volume_api.initialize_connection(context,
volume,
connector)
@@ -1961,28 +1980,28 @@ class ComputeManager(manager.SchedulerDependentManager):
msg = _("Failed to connect to volume %(volume_id)s "
"while attaching at %(mountpoint)s")
LOG.exception(msg % locals(), context=context,
- instance=instance_ref)
+ instance=instance)
self.volume_api.unreserve_volume(context, volume)
try:
self.driver.attach_volume(connection_info,
- instance_ref['name'],
+ instance['name'],
mountpoint)
except Exception: # pylint: disable=W0702
with excutils.save_and_reraise_exception():
msg = _("Failed to attach volume %(volume_id)s "
"at %(mountpoint)s")
LOG.exception(msg % locals(), context=context,
- instance=instance_ref)
+ instance=instance)
self.volume_api.terminate_connection(context,
volume,
connector)
self.volume_api.attach(context,
volume,
- instance_ref['uuid'],
+ instance['uuid'],
mountpoint)
values = {
- 'instance_uuid': instance_ref['uuid'],
+ 'instance_uuid': instance['uuid'],
'connection_info': jsonutils.dumps(connection_info),
'device_name': mountpoint,
'delete_on_termination': False,
diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py
index e637c9f64..b11b17144 100644
--- a/nova/compute/rpcapi.py
+++ b/nova/compute/rpcapi.py
@@ -20,6 +20,7 @@ Client side of the compute RPC API.
from nova import exception
from nova import flags
+from nova.openstack.common import jsonutils
from nova.openstack.common import rpc
from nova.openstack.common.rpc import common as rpc_common
import nova.openstack.common.rpc.proxy
@@ -58,6 +59,15 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
1.1 - Adds get_host_uptime()
1.2 - Adds check_can_live_migrate_[destination|source]
1.3 - Adds change_instance_metadata()
+ 1.4 - Remove instance_uuid, add instance argument to reboot_instance()
+ 1.5 - Remove instance_uuid, add instance argument to pause_instance(),
+ unpause_instance()
+ 1.6 - Remove instance_uuid, add instance argument to suspend_instance()
+ 1.7 - Remove instance_uuid, add instance argument to
+ get_console_output()
+ 1.8 - Remove instance_uuid, add instance argument to
+ add_fixed_ip_to_instance()
+ 1.9 - Remove instance_uuid, add instance argument to attach_volume()
'''
BASE_RPC_API_VERSION = '1.0'
@@ -81,15 +91,19 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
topic=_compute_topic(self.topic, ctxt, host, None))
def add_fixed_ip_to_instance(self, ctxt, instance, network_id):
+ instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('add_fixed_ip_to_instance',
- instance_uuid=instance['uuid'], network_id=network_id),
- topic=_compute_topic(self.topic, ctxt, None, instance))
+ instance=instance_p, network_id=network_id),
+ topic=_compute_topic(self.topic, ctxt, None, instance),
+ version='1.8')
def attach_volume(self, ctxt, instance, volume_id, mountpoint):
+ instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('attach_volume',
- instance_uuid=instance['uuid'], volume_id=volume_id,
+ instance=instance_p, volume_id=volume_id,
mountpoint=mountpoint),
- topic=_compute_topic(self.topic, ctxt, None, instance))
+ topic=_compute_topic(self.topic, ctxt, None, instance),
+ version='1.9')
def check_can_live_migrate_destination(self, ctxt, instance, destination,
block_migration, disk_over_commit):
@@ -144,9 +158,11 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
topic=_compute_topic(self.topic, ctxt, host, None))
def get_console_output(self, ctxt, instance, tail_length):
+ instance_p = jsonutils.to_primitive(instance)
return self.call(ctxt, self.make_msg('get_console_output',
- instance_uuid=instance['uuid'], tail_length=tail_length),
- topic=_compute_topic(self.topic, ctxt, None, instance))
+ instance=instance_p, tail_length=tail_length),
+ topic=_compute_topic(self.topic, ctxt, None, instance),
+ version='1.7')
def get_console_pool_info(self, ctxt, console_type, host):
return self.call(ctxt, self.make_msg('get_console_pool_info',
@@ -201,11 +217,6 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
instance_uuid=instance['uuid']),
topic=_compute_topic(self.topic, ctxt, None, instance))
- def lock_instance(self, ctxt, instance):
- self.cast(ctxt, self.make_msg('lock_instance',
- instance_uuid=instance['uuid']),
- topic=_compute_topic(self.topic, ctxt, None, instance))
-
def post_live_migration_at_destination(self, ctxt, instance,
block_migration, host):
return self.call(ctxt,
@@ -214,9 +225,11 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
_compute_topic(self.topic, ctxt, host, None))
def pause_instance(self, ctxt, instance):
+ instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('pause_instance',
- instance_uuid=instance['uuid']),
- topic=_compute_topic(self.topic, ctxt, None, instance))
+ instance=instance_p),
+ topic=_compute_topic(self.topic, ctxt, None, instance),
+ version='1.5')
def power_off_instance(self, ctxt, instance):
self.cast(ctxt, self.make_msg('power_off_instance',
@@ -235,9 +248,11 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
disk=disk), _compute_topic(self.topic, ctxt, host, None))
def reboot_instance(self, ctxt, instance, reboot_type):
+ instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('reboot_instance',
- instance_uuid=instance['uuid'], reboot_type=reboot_type),
- topic=_compute_topic(self.topic, ctxt, None, instance))
+ instance=instance_p, reboot_type=reboot_type),
+ topic=_compute_topic(self.topic, ctxt, None, instance),
+ version='1.4')
def rebuild_instance(self, ctxt, instance, new_pass, injected_files,
image_ref, orig_image_ref):
@@ -352,24 +367,23 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
topic=_compute_topic(self.topic, ctxt, None, instance))
def suspend_instance(self, ctxt, instance):
+ instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('suspend_instance',
- instance_uuid=instance['uuid']),
- topic=_compute_topic(self.topic, ctxt, None, instance))
+ instance=instance_p),
+ topic=_compute_topic(self.topic, ctxt, None, instance),
+ version='1.6')
def terminate_instance(self, ctxt, instance):
self.cast(ctxt, self.make_msg('terminate_instance',
instance_uuid=instance['uuid']),
topic=_compute_topic(self.topic, ctxt, None, instance))
- def unlock_instance(self, ctxt, instance):
- self.cast(ctxt, self.make_msg('unlock_instance',
- instance_uuid=instance['uuid']),
- topic=_compute_topic(self.topic, ctxt, None, instance))
-
def unpause_instance(self, ctxt, instance):
+ instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('unpause_instance',
- instance_uuid=instance['uuid']),
- topic=_compute_topic(self.topic, ctxt, None, instance))
+ instance=instance_p),
+ topic=_compute_topic(self.topic, ctxt, None, instance),
+ version='1.5')
def unrescue_instance(self, ctxt, instance):
self.cast(ctxt, self.make_msg('unrescue_instance',
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 3ad6e71dd..5f7e85511 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -76,11 +76,17 @@ class NovaBase(object):
return getattr(self, key, default)
def __iter__(self):
- self._i = iter(object_mapper(self).columns)
+ columns = dict(object_mapper(self).columns).keys()
+ # NOTE(russellb): Allow models to specify other keys that can be looked
+ # up, beyond the actual db columns. An example would be the 'name'
+ # property for an Instance.
+ if hasattr(self, '_extra_keys'):
+ columns.extend(self._extra_keys())
+ self._i = iter(columns)
return self
def next(self):
- n = self._i.next().name
+ n = self._i.next()
return n, getattr(self, n)
def update(self, values):
@@ -183,12 +189,15 @@ class Instance(BASE, NovaBase):
except TypeError:
# Support templates like "uuid-%(uuid)s", etc.
info = {}
- for key, value in self.iteritems():
+ # NOTE(russellb): Don't use self.iteritems() here, as it will
+ # result in infinite recursion on the name property.
+ for column in iter(object_mapper(self).columns):
+ key = column.name
# prevent recursion if someone specifies %(name)s
# %(name)s will not be valid.
if key == 'name':
continue
- info[key] = value
+ info[key] = self[key]
try:
base_name = FLAGS.instance_name_template % info
except KeyError:
@@ -197,6 +206,9 @@ class Instance(BASE, NovaBase):
base_name += "-rescue"
return base_name
+ def _extra_keys(self):
+ return ['name']
+
user_id = Column(String(255))
project_id = Column(String(255))
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index f6bc0ee4f..c0356dda6 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -40,6 +40,7 @@ from nova import db
from nova import exception
from nova import flags
from nova.openstack.common import importutils
+from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import test_notifier
from nova.openstack.common import policy as common_policy
@@ -212,6 +213,7 @@ class ComputeTestCase(BaseTestCase):
fake_get_nw_info)
self.stubs.Set(nova.network.API, 'allocate_for_instance',
fake_get_nw_info)
+ self.compute_api = compute.API()
def tearDown(self):
super(ComputeTestCase, self).tearDown()
@@ -490,8 +492,10 @@ class ComputeTestCase(BaseTestCase):
instance = self._create_fake_instance()
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance_uuid)
- self.compute.pause_instance(self.context, instance_uuid)
- self.compute.unpause_instance(self.context, instance_uuid)
+ self.compute.pause_instance(self.context,
+ instance=jsonutils.to_primitive(instance))
+ self.compute.unpause_instance(self.context,
+ instance=jsonutils.to_primitive(instance))
self.compute.terminate_instance(self.context, instance_uuid)
def test_suspend(self):
@@ -499,7 +503,8 @@ class ComputeTestCase(BaseTestCase):
instance = self._create_fake_instance()
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance_uuid)
- self.compute.suspend_instance(self.context, instance_uuid)
+ self.compute.suspend_instance(self.context,
+ instance=jsonutils.to_primitive(instance))
self.compute.resume_instance(self.context, instance_uuid)
self.compute.terminate_instance(self.context, instance_uuid)
@@ -539,8 +544,9 @@ class ComputeTestCase(BaseTestCase):
{'task_state': task_states.REBOOTING})
reboot_type = "SOFT"
- self.compute.reboot_instance(self.context, instance['uuid'],
- reboot_type)
+ self.compute.reboot_instance(self.context,
+ instance=jsonutils.to_primitive(instance),
+ reboot_type=reboot_type)
inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(inst_ref['power_state'], power_state.RUNNING)
@@ -556,8 +562,9 @@ class ComputeTestCase(BaseTestCase):
{'task_state': task_states.REBOOTING_HARD})
reboot_type = "HARD"
- self.compute.reboot_instance(self.context, instance['uuid'],
- reboot_type)
+ self.compute.reboot_instance(self.context,
+ instance=jsonutils.to_primitive(instance),
+ reboot_type=reboot_type)
inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(inst_ref['power_state'], power_state.RUNNING)
@@ -762,7 +769,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.run_instance(self.context, instance['uuid'])
output = self.compute.get_console_output(self.context,
- instance['uuid'])
+ instance=jsonutils.to_primitive(instance))
self.assertEqual(output, 'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE')
self.compute.terminate_instance(self.context, instance['uuid'])
@@ -772,8 +779,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.run_instance(self.context, instance['uuid'])
output = self.compute.get_console_output(self.context,
- instance['uuid'],
- tail_length=2)
+ instance=jsonutils.to_primitive(instance), tail_length=2)
self.assertEqual(output, 'ANOTHER\nLAST LINE')
self.compute.terminate_instance(self.context, instance['uuid'])
@@ -844,11 +850,12 @@ class ComputeTestCase(BaseTestCase):
self.stubs.Set(nova.compute.manager.ComputeManager,
'reset_network', dummy)
- instance = self._create_fake_instance()
+ instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.assertEquals(len(test_notifier.NOTIFICATIONS), 0)
- self.compute.add_fixed_ip_to_instance(self.context, instance_uuid, 1)
+ self.compute.add_fixed_ip_to_instance(self.context, network_id=1,
+ instance=instance)
self.assertEquals(len(test_notifier.NOTIFICATIONS), 2)
self.compute.terminate_instance(self.context, instance_uuid)
@@ -1018,7 +1025,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance['uuid'])
def test_get_lock(self):
- instance = self._create_fake_instance()
+ instance = jsonutils.to_primitive(self._create_fake_instance())
self.assertFalse(self.compute._get_lock(self.context,
instance['uuid']))
db.instance_update(self.context, instance['uuid'], {'locked': True})
@@ -1035,15 +1042,15 @@ class ComputeTestCase(BaseTestCase):
is_admin=False)
# decorator should return False (fail) with locked nonadmin context
- self.compute.lock_instance(self.context, instance_uuid)
+ self.compute_api.lock(self.context, instance)
ret_val = self.compute.reboot_instance(non_admin_context,
- instance_uuid)
+ instance=jsonutils.to_primitive(instance))
self.assertEqual(ret_val, False)
# decorator should return None (success) with unlocked nonadmin context
- self.compute.unlock_instance(self.context, instance_uuid)
+ self.compute_api.unlock(self.context, instance)
ret_val = self.compute.reboot_instance(non_admin_context,
- instance_uuid)
+ instance=jsonutils.to_primitive(instance))
self.assertEqual(ret_val, None)
self.compute.terminate_instance(self.context, instance_uuid)
@@ -2540,7 +2547,8 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEqual(instance['task_state'], None)
- self.compute.pause_instance(self.context, instance_uuid)
+ self.compute.pause_instance(self.context,
+ instance=jsonutils.to_primitive(instance))
# set the state that the instance gets when pause finishes
instance = db.instance_update(self.context, instance['uuid'],
{'vm_state': vm_states.PAUSED})
@@ -3670,9 +3678,9 @@ class ComputeAPITestCase(BaseTestCase):
self.mox.StubOutWithMock(rpc, 'call')
rpc_msg = {'method': 'get_console_output',
- 'args': {'instance_uuid': fake_instance['uuid'],
+ 'args': {'instance': fake_instance,
'tail_length': fake_tail_length},
- 'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION}
+ 'version': '1.7'}
rpc.call(self.context, 'compute.%s' % fake_instance['host'],
rpc_msg, None).AndReturn(fake_console_output)
diff --git a/nova/tests/compute/test_rpcapi.py b/nova/tests/compute/test_rpcapi.py
index d1d633023..ee74f2c6b 100644
--- a/nova/tests/compute/test_rpcapi.py
+++ b/nova/tests/compute/test_rpcapi.py
@@ -20,7 +20,9 @@ Unit Tests for nova.compute.rpcapi
from nova.compute import rpcapi as compute_rpcapi
from nova import context
+from nova import db
from nova import flags
+from nova.openstack.common import jsonutils
from nova.openstack.common import rpc
from nova import test
@@ -31,19 +33,27 @@ FLAGS = flags.FLAGS
class ComputeRpcAPITestCase(test.TestCase):
def setUp(self):
- self.fake_instance = {
- 'uuid': 'fake_uuid',
- 'host': 'fake_host',
- 'name': 'fake_name',
- 'id': 'fake_id',
- }
+ self.context = context.get_admin_context()
+ inst = db.instance_create(self.context, {'host': 'fake_host',
+ 'instance_type_id': 1})
+ self.fake_instance = jsonutils.to_primitive(inst)
super(ComputeRpcAPITestCase, self).setUp()
def tearDown(self):
super(ComputeRpcAPITestCase, self).tearDown()
+ def test_serialized_instance_has_name(self):
+ self.assertTrue('name' in self.fake_instance)
+
def _test_compute_api(self, method, rpc_method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
+
+ methods_with_instance = [
+ 'add_fixed_ip_to_instance', 'attach_volume', 'get_console_output',
+ 'pause_instance', 'reboot_instance', 'suspend_instance',
+ 'unpause_instance'
+ ]
+
if 'rpcapi_class' in kwargs:
rpcapi_class = kwargs['rpcapi_class']
del kwargs['rpcapi_class']
@@ -62,7 +72,8 @@ class ComputeRpcAPITestCase(test.TestCase):
del expected_msg['args']['host']
if 'destination' in expected_msg['args']:
del expected_msg['args']['destination']
- if 'instance' in expected_msg['args']:
+ if 'instance' in expected_msg['args'] and (method not in
+ methods_with_instance):
instance = expected_msg['args']['instance']
del expected_msg['args']['instance']
if method in ['rollback_live_migration_at_destination',
@@ -112,11 +123,12 @@ class ComputeRpcAPITestCase(test.TestCase):
def test_add_fixed_ip_to_instance(self):
self._test_compute_api('add_fixed_ip_to_instance', 'cast',
- instance=self.fake_instance, network_id='id')
+ instance=self.fake_instance, network_id='id', version='1.8')
def test_attach_volume(self):
self._test_compute_api('attach_volume', 'cast',
- instance=self.fake_instance, volume_id='id', mountpoint='mp')
+ instance=self.fake_instance, volume_id='id', mountpoint='mp',
+ version='1.9')
def test_check_can_live_migrate_destination(self):
self._test_compute_api('check_can_live_migrate_destination', 'call',
@@ -151,7 +163,7 @@ class ComputeRpcAPITestCase(test.TestCase):
def test_get_console_output(self):
self._test_compute_api('get_console_output', 'call',
- instance=self.fake_instance, tail_length='tl')
+ instance=self.fake_instance, tail_length='tl', version='1.7')
def test_get_console_pool_info(self):
self._test_compute_api('get_console_pool_info', 'call',
@@ -188,10 +200,6 @@ class ComputeRpcAPITestCase(test.TestCase):
self._test_compute_api('inject_network_info', 'cast',
instance=self.fake_instance)
- def test_lock_instance(self):
- self._test_compute_api('lock_instance', 'cast',
- instance=self.fake_instance)
-
def test_post_live_migration_at_destination(self):
self._test_compute_api('post_live_migration_at_destination', 'call',
instance=self.fake_instance, block_migration='block_migration',
@@ -199,7 +207,7 @@ class ComputeRpcAPITestCase(test.TestCase):
def test_pause_instance(self):
self._test_compute_api('pause_instance', 'cast',
- instance=self.fake_instance)
+ instance=self.fake_instance, version='1.5')
def test_power_off_instance(self):
self._test_compute_api('power_off_instance', 'cast',
@@ -216,7 +224,7 @@ class ComputeRpcAPITestCase(test.TestCase):
def test_reboot_instance(self):
self._test_compute_api('reboot_instance', 'cast',
- instance=self.fake_instance, reboot_type='type')
+ instance=self.fake_instance, reboot_type='type', version='1.4')
def test_rebuild_instance(self):
self._test_compute_api('rebuild_instance', 'cast',
@@ -305,19 +313,15 @@ class ComputeRpcAPITestCase(test.TestCase):
def test_suspend_instance(self):
self._test_compute_api('suspend_instance', 'cast',
- instance=self.fake_instance)
+ instance=self.fake_instance, version='1.6')
def test_terminate_instance(self):
self._test_compute_api('terminate_instance', 'cast',
instance=self.fake_instance)
- def test_unlock_instance(self):
- self._test_compute_api('unlock_instance', 'cast',
- instance=self.fake_instance)
-
def test_unpause_instance(self):
self._test_compute_api('unpause_instance', 'cast',
- instance=self.fake_instance)
+ instance=self.fake_instance, version='1.5')
def test_unrescue_instance(self):
self._test_compute_api('unrescue_instance', 'cast',