summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xbin/nova-ajax-console-proxy12
-rwxr-xr-xcontrib/nova.sh2
-rw-r--r--nova/api/ec2/cloud.py50
-rw-r--r--nova/api/openstack/common.py78
-rw-r--r--nova/api/openstack/contrib/floating_ips.py9
-rw-r--r--nova/api/openstack/servers.py22
-rw-r--r--nova/api/openstack/views/servers.py13
-rw-r--r--nova/compute/api.py118
-rw-r--r--nova/compute/manager.py453
-rw-r--r--nova/compute/task_states.py59
-rw-r--r--nova/compute/vm_states.py39
-rw-r--r--nova/context.py2
-rw-r--r--nova/db/sqlalchemy/api.py20
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py138
-rw-r--r--nova/db/sqlalchemy/models.py16
-rw-r--r--nova/exception.py6
-rw-r--r--nova/flags.py8
-rw-r--r--nova/network/api.py6
-rw-r--r--nova/network/manager.py7
-rw-r--r--nova/notifier/api.py3
-rw-r--r--nova/rpc/__init__.py39
-rw-r--r--nova/rpc/common.py6
-rw-r--r--nova/rpc/impl_carrot.py (renamed from nova/rpc/amqp.py)102
-rw-r--r--nova/rpc/impl_kombu.py781
-rw-r--r--nova/scheduler/driver.py10
-rw-r--r--nova/service.py28
-rw-r--r--nova/tests/api/openstack/contrib/test_floating_ips.py118
-rw-r--r--nova/tests/api/openstack/test_server_actions.py34
-rw-r--r--nova/tests/api/openstack/test_servers.py101
-rw-r--r--nova/tests/integrated/test_servers.py34
-rw-r--r--nova/tests/scheduler/test_scheduler.py13
-rw-r--r--nova/tests/test_adminapi.py2
-rw-r--r--nova/tests/test_cloud.py17
-rw-r--r--nova/tests/test_compute.py38
-rw-r--r--nova/tests/test_context.py33
-rw-r--r--nova/tests/test_network.py16
-rw-r--r--nova/tests/test_rpc.py162
-rw-r--r--nova/tests/test_rpc_amqp.py88
-rw-r--r--nova/tests/test_rpc_carrot.py45
-rw-r--r--nova/tests/test_rpc_common.py189
-rw-r--r--nova/tests/test_rpc_kombu.py110
-rw-r--r--nova/tests/test_test.py5
-rw-r--r--nova/tests/test_xenapi.py37
-rw-r--r--nova/tests/vmwareapi/db_fakes.py5
-rw-r--r--tools/pip-requires1
45 files changed, 2273 insertions, 802 deletions
diff --git a/bin/nova-ajax-console-proxy b/bin/nova-ajax-console-proxy
index 0a789b4b9..23fb42fb5 100755
--- a/bin/nova-ajax-console-proxy
+++ b/bin/nova-ajax-console-proxy
@@ -113,11 +113,10 @@ class AjaxConsoleProxy(object):
AjaxConsoleProxy.tokens[kwargs['token']] = \
{'args': kwargs, 'last_activity': time.time()}
- conn = rpc.create_connection(new=True)
- consumer = rpc.create_consumer(
- conn,
- FLAGS.ajax_console_proxy_topic,
- TopicProxy)
+ self.conn = rpc.create_connection(new=True)
+ self.conn.create_consumer(
+ FLAGS.ajax_console_proxy_topic,
+ TopicProxy)
def delete_expired_tokens():
now = time.time()
@@ -129,7 +128,7 @@ class AjaxConsoleProxy(object):
for k in to_delete:
del AjaxConsoleProxy.tokens[k]
- utils.LoopingCall(consumer.fetch, enable_callbacks=True).start(0.1)
+ self.conn.consume_in_thread()
utils.LoopingCall(delete_expired_tokens).start(1)
if __name__ == '__main__':
@@ -142,3 +141,4 @@ if __name__ == '__main__':
server = wsgi.Server("AJAX Console Proxy", acp, port=acp_port)
service.serve(server)
service.wait()
+ self.conn.close()
diff --git a/contrib/nova.sh b/contrib/nova.sh
index 7994e5133..16cddebd5 100755
--- a/contrib/nova.sh
+++ b/contrib/nova.sh
@@ -81,7 +81,7 @@ if [ "$CMD" == "install" ]; then
sudo apt-get install -y python-netaddr python-pastedeploy python-eventlet
sudo apt-get install -y python-novaclient python-glance python-cheetah
sudo apt-get install -y python-carrot python-tempita python-sqlalchemy
- sudo apt-get install -y python-suds
+ sudo apt-get install -y python-suds python-kombu
if [ "$USE_IPV6" == 1 ]; then
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 9aebf92e3..fe44191c8 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -47,6 +47,7 @@ from nova import utils
from nova import volume
from nova.api.ec2 import ec2utils
from nova.compute import instance_types
+from nova.compute import vm_states
from nova.image import s3
@@ -78,6 +79,30 @@ def _gen_key(context, user_id, key_name):
return {'private_key': private_key, 'fingerprint': fingerprint}
+# EC2 API can return the following values as documented in the EC2 API
+# http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/
+# ApiReference-ItemType-InstanceStateType.html
+# pending | running | shutting-down | terminated | stopping | stopped
+_STATE_DESCRIPTION_MAP = {
+ None: 'pending',
+ vm_states.ACTIVE: 'running',
+ vm_states.BUILDING: 'pending',
+ vm_states.REBUILDING: 'pending',
+ vm_states.DELETED: 'terminated',
+ vm_states.STOPPED: 'stopped',
+ vm_states.MIGRATING: 'migrate',
+ vm_states.RESIZING: 'resize',
+ vm_states.PAUSED: 'pause',
+ vm_states.SUSPENDED: 'suspend',
+ vm_states.RESCUED: 'rescue',
+}
+
+
+def state_description_from_vm_state(vm_state):
+ """Map the vm state to the server status string"""
+ return _STATE_DESCRIPTION_MAP.get(vm_state, vm_state)
+
+
# TODO(yamahata): hypervisor dependent default device name
_DEFAULT_ROOT_DEVICE_NAME = '/dev/sda1'
_DEFAULT_MAPPINGS = {'ami': 'sda1',
@@ -1039,11 +1064,12 @@ class CloudController(object):
def _format_attr_instance_initiated_shutdown_behavior(instance,
result):
- state_description = instance['state_description']
- state_to_value = {'stopping': 'stop',
- 'stopped': 'stop',
- 'terminating': 'terminate'}
- value = state_to_value.get(state_description)
+ vm_state = instance['vm_state']
+ state_to_value = {
+ vm_states.STOPPED: 'stopped',
+ vm_states.DELETED: 'terminated',
+ }
+ value = state_to_value.get(vm_state)
if value:
result['instanceInitiatedShutdownBehavior'] = value
@@ -1198,8 +1224,8 @@ class CloudController(object):
self._format_kernel_id(instance, i, 'kernelId')
self._format_ramdisk_id(instance, i, 'ramdiskId')
i['instanceState'] = {
- 'code': instance['state'],
- 'name': instance['state_description']}
+ 'code': instance['power_state'],
+ 'name': state_description_from_vm_state(instance['vm_state'])}
fixed_addr = None
floating_addr = None
if instance['fixed_ips']:
@@ -1618,22 +1644,22 @@ class CloudController(object):
# stop the instance if necessary
restart_instance = False
if not no_reboot:
- state_description = instance['state_description']
+ vm_state = instance['vm_state']
# if the instance is in subtle state, refuse to proceed.
- if state_description not in ('running', 'stopping', 'stopped'):
+ if vm_state not in (vm_states.ACTIVE, vm_states.STOPPED):
raise exception.InstanceNotRunning(instance_id=ec2_instance_id)
- if state_description == 'running':
+ if vm_state == vm_states.ACTIVE:
restart_instance = True
self.compute_api.stop(context, instance_id=instance_id)
# wait instance for really stopped
start_time = time.time()
- while state_description != 'stopped':
+ while vm_state != vm_states.STOPPED:
time.sleep(1)
instance = self.compute_api.get(context, instance_id)
- state_description = instance['state_description']
+ vm_state = instance['vm_state']
# NOTE(yamahata): timeout and error. 1 hour for now for safety.
# Is it too short/long?
# Or is there any better way?
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index d9eb832f2..d743a66ef 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -27,7 +27,8 @@ from nova import flags
from nova import log as logging
from nova import quota
from nova.api.openstack import wsgi
-from nova.compute import power_state as compute_power_state
+from nova.compute import vm_states
+from nova.compute import task_states
LOG = logging.getLogger('nova.api.openstack.common')
@@ -38,36 +39,61 @@ XML_NS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0'
XML_NS_V11 = 'http://docs.openstack.org/compute/api/v1.1'
-_STATUS_MAP = {
- None: 'BUILD',
- compute_power_state.NOSTATE: 'BUILD',
- compute_power_state.RUNNING: 'ACTIVE',
- compute_power_state.BLOCKED: 'ACTIVE',
- compute_power_state.SUSPENDED: 'SUSPENDED',
- compute_power_state.PAUSED: 'PAUSED',
- compute_power_state.SHUTDOWN: 'SHUTDOWN',
- compute_power_state.SHUTOFF: 'SHUTOFF',
- compute_power_state.CRASHED: 'ERROR',
- compute_power_state.FAILED: 'ERROR',
- compute_power_state.BUILDING: 'BUILD',
+_STATE_MAP = {
+ vm_states.ACTIVE: {
+ 'default': 'ACTIVE',
+ task_states.REBOOTING: 'REBOOT',
+ task_states.UPDATING_PASSWORD: 'PASSWORD',
+ task_states.RESIZE_VERIFY: 'VERIFY_RESIZE',
+ },
+ vm_states.BUILDING: {
+ 'default': 'BUILD',
+ },
+ vm_states.REBUILDING: {
+ 'default': 'REBUILD',
+ },
+ vm_states.STOPPED: {
+ 'default': 'STOPPED',
+ },
+ vm_states.MIGRATING: {
+ 'default': 'MIGRATING',
+ },
+ vm_states.RESIZING: {
+ 'default': 'RESIZE',
+ },
+ vm_states.PAUSED: {
+ 'default': 'PAUSED',
+ },
+ vm_states.SUSPENDED: {
+ 'default': 'SUSPENDED',
+ },
+ vm_states.RESCUED: {
+ 'default': 'RESCUE',
+ },
+ vm_states.ERROR: {
+ 'default': 'ERROR',
+ },
+ vm_states.DELETED: {
+ 'default': 'DELETED',
+ },
}
-def status_from_power_state(power_state):
- """Map the power state to the server status string"""
- return _STATUS_MAP[power_state]
+def status_from_state(vm_state, task_state='default'):
+ """Given vm_state and task_state, return a status string."""
+ task_map = _STATE_MAP.get(vm_state, dict(default='UNKNOWN_STATE'))
+ status = task_map.get(task_state, task_map['default'])
+ LOG.debug("Generated %(status)s from vm_state=%(vm_state)s "
+ "task_state=%(task_state)s." % locals())
+ return status
-def power_states_from_status(status):
- """Map the server status string to a list of power states"""
- power_states = []
- for power_state, status_map in _STATUS_MAP.iteritems():
- # Skip the 'None' state
- if power_state is None:
- continue
- if status.lower() == status_map.lower():
- power_states.append(power_state)
- return power_states
+def vm_state_from_status(status):
+ """Map the server status string to a vm state."""
+ for state, task_map in _STATE_MAP.iteritems():
+ status_string = task_map.get("default")
+ if status.lower() == status_string.lower():
+ return state
def get_pagination_params(request):
diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py
index 40086f778..6ce531c8f 100644
--- a/nova/api/openstack/contrib/floating_ips.py
+++ b/nova/api/openstack/contrib/floating_ips.py
@@ -96,7 +96,8 @@ class FloatingIPController(object):
except rpc.RemoteError as ex:
# NOTE(tr3buchet) - why does this block exist?
if ex.exc_type == 'NoMoreFloatingIps':
- raise exception.NoMoreFloatingIps()
+ msg = _("No more floating ips available.")
+ raise webob.exc.HTTPBadRequest(explanation=msg)
else:
raise
@@ -138,7 +139,11 @@ class Floating_ips(extensions.ExtensionDescriptor):
msg = _("Address not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
- self.compute_api.associate_floating_ip(context, instance_id, address)
+ try:
+ self.compute_api.associate_floating_ip(context, instance_id,
+ address)
+ except exception.ApiError, e:
+ raise webob.exc.HTTPBadRequest(explanation=e.message)
return webob.Response(status_int=202)
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 27c67e79e..e0dd9bdb1 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -95,17 +95,15 @@ class Controller(object):
search_opts['recurse_zones'] = utils.bool_from_str(
search_opts.get('recurse_zones', False))
- # If search by 'status', we need to convert it to 'state'
- # If the status is unknown, bail.
- # Leave 'state' in search_opts so compute can pass it on to
- # child zones..
+ # If search by 'status', we need to convert it to 'vm_state'
+ # to pass on to child zones.
if 'status' in search_opts:
status = search_opts['status']
- search_opts['state'] = common.power_states_from_status(status)
- if len(search_opts['state']) == 0:
+ state = common.vm_state_from_status(status)
+ if state is None:
reason = _('Invalid server status: %(status)s') % locals()
- LOG.error(reason)
raise exception.InvalidInput(reason=reason)
+ search_opts['vm_state'] = state
# By default, compute's get_all() will return deleted instances.
# If an admin hasn't specified a 'deleted' search option, we need
@@ -608,9 +606,8 @@ class ControllerV10(Controller):
try:
self.compute_api.rebuild(context, instance_id, image_id, password)
- except exception.BuildInProgress:
- msg = _("Instance %s is currently being rebuilt.") % instance_id
- LOG.debug(msg)
+ except exception.RebuildRequiresActiveInstance:
+ msg = _("Instance %s must be active to rebuild.") % instance_id
raise exc.HTTPConflict(explanation=msg)
return webob.Response(status_int=202)
@@ -750,9 +747,8 @@ class ControllerV11(Controller):
self.compute_api.rebuild(context, instance_id, image_href,
password, name=name, metadata=metadata,
files_to_inject=personalities)
- except exception.BuildInProgress:
- msg = _("Instance %s is currently being rebuilt.") % instance_id
- LOG.debug(msg)
+ except exception.RebuildRequiresActiveInstance:
+ msg = _("Instance %s must be active to rebuild.") % instance_id
raise exc.HTTPConflict(explanation=msg)
except exception.InstanceNotFound:
msg = _("Instance %s could not be found") % instance_id
diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py
index 0ec98591e..b0daeb7a8 100644
--- a/nova/api/openstack/views/servers.py
+++ b/nova/api/openstack/views/servers.py
@@ -21,13 +21,12 @@ import hashlib
import os
from nova import exception
-import nova.compute
-import nova.context
from nova.api.openstack import common
from nova.api.openstack.views import addresses as addresses_view
from nova.api.openstack.views import flavors as flavors_view
from nova.api.openstack.views import images as images_view
from nova import utils
+from nova.compute import vm_states
class ViewBuilder(object):
@@ -61,17 +60,13 @@ class ViewBuilder(object):
def _build_detail(self, inst):
"""Returns a detailed model of a server."""
+ vm_state = inst.get('vm_state', vm_states.BUILDING)
+ task_state = inst.get('task_state')
inst_dict = {
'id': inst['id'],
'name': inst['display_name'],
- 'status': common.status_from_power_state(inst.get('state'))}
-
- ctxt = nova.context.get_admin_context()
- compute_api = nova.compute.API()
-
- if compute_api.has_finished_migration(ctxt, inst['uuid']):
- inst_dict['status'] = 'RESIZE-CONFIRM'
+ 'status': common.status_from_state(vm_state, task_state)}
# Return the metadata as a dictionary
metadata = {}
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 205207d66..e045ef3de 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -35,6 +35,8 @@ from nova import utils
from nova import volume
from nova.compute import instance_types
from nova.compute import power_state
+from nova.compute import task_states
+from nova.compute import vm_states
from nova.compute.utils import terminate_volumes
from nova.scheduler import api as scheduler_api
from nova.db import base
@@ -73,12 +75,18 @@ def generate_default_hostname(instance):
def _is_able_to_shutdown(instance, instance_id):
- states = {'terminating': "Instance %s is already being terminated",
- 'migrating': "Instance %s is being migrated",
- 'stopping': "Instance %s is being stopped"}
- msg = states.get(instance['state_description'])
- if msg:
- LOG.warning(_(msg), instance_id)
+ vm_state = instance["vm_state"]
+ task_state = instance["task_state"]
+
+ valid_shutdown_states = [
+ vm_states.ACTIVE,
+ vm_states.REBUILDING,
+ vm_states.BUILDING,
+ ]
+
+ if vm_state not in valid_shutdown_states:
+ LOG.warn(_("Instance %(instance_id)s is not in an 'active' state. It "
+ "is currently %(vm_state)s. Shutdown aborted.") % locals())
return False
return True
@@ -249,10 +257,10 @@ class API(base.Base):
'image_ref': image_href,
'kernel_id': kernel_id or '',
'ramdisk_id': ramdisk_id or '',
+ 'power_state': power_state.NOSTATE,
+ 'vm_state': vm_states.BUILDING,
'config_drive_id': config_drive_id or '',
'config_drive': config_drive or '',
- 'state': 0,
- 'state_description': 'scheduling',
'user_id': context.user_id,
'project_id': context.project_id,
'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
@@ -413,6 +421,8 @@ class API(base.Base):
updates['display_name'] = "Server %s" % instance_id
instance['display_name'] = updates['display_name']
updates['hostname'] = self.hostname_factory(instance)
+ updates['vm_state'] = vm_states.BUILDING
+ updates['task_state'] = task_states.SCHEDULING
instance = self.update(context, instance_id, **updates)
return instance
@@ -750,10 +760,8 @@ class API(base.Base):
return
self.update(context,
- instance['id'],
- state_description='terminating',
- state=0,
- terminated_at=utils.utcnow())
+ instance_id,
+ task_state=task_states.DELETING)
host = instance['host']
if host:
@@ -773,9 +781,9 @@ class API(base.Base):
return
self.update(context,
- instance['id'],
- state_description='stopping',
- state=power_state.NOSTATE,
+ instance_id,
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.STOPPING,
terminated_at=utils.utcnow())
host = instance['host']
@@ -787,12 +795,18 @@ class API(base.Base):
"""Start an instance."""
LOG.debug(_("Going to try to start %s"), instance_id)
instance = self._get_instance(context, instance_id, 'starting')
- if instance['state_description'] != 'stopped':
- _state_description = instance['state_description']
+ vm_state = instance["vm_state"]
+
+ if vm_state != vm_states.STOPPED:
LOG.warning(_("Instance %(instance_id)s is not "
- "stopped(%(_state_description)s)") % locals())
+ "stopped. (%(vm_state)s)") % locals())
return
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.STOPPED,
+ task_state=task_states.STARTING)
+
# TODO(yamahata): injected_files isn't supported right now.
# It is used only for osapi. not for ec2 api.
# availability_zone isn't used by run_instance.
@@ -1029,6 +1043,10 @@ class API(base.Base):
@scheduler_api.reroute_compute("reboot")
def reboot(self, context, instance_id):
"""Reboot the given instance."""
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.REBOOTING)
self._cast_compute_message('reboot_instance', context, instance_id)
@scheduler_api.reroute_compute("rebuild")
@@ -1036,21 +1054,25 @@ class API(base.Base):
name=None, metadata=None, files_to_inject=None):
"""Rebuild the given instance with the provided metadata."""
instance = self.db.instance_get(context, instance_id)
+ name = name or instance["display_name"]
- if instance["state"] == power_state.BUILDING:
- msg = _("Instance already building")
- raise exception.BuildInProgress(msg)
+ if instance["vm_state"] != vm_states.ACTIVE:
+ msg = _("Instance must be active to rebuild.")
+ raise exception.RebuildRequiresActiveInstance(msg)
files_to_inject = files_to_inject or []
+ metadata = metadata or {}
+
self._check_injected_file_quota(context, files_to_inject)
+ self._check_metadata_properties_quota(context, metadata)
- values = {"image_ref": image_href}
- if metadata is not None:
- self._check_metadata_properties_quota(context, metadata)
- values['metadata'] = metadata
- if name is not None:
- values['display_name'] = name
- self.db.instance_update(context, instance_id, values)
+ self.update(context,
+ instance_id,
+ metadata=metadata,
+ display_name=name,
+ image_ref=image_href,
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.REBUILDING)
rebuild_params = {
"new_pass": admin_password,
@@ -1074,6 +1096,11 @@ class API(base.Base):
raise exception.MigrationNotFoundByStatus(instance_id=instance_id,
status='finished')
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.ACTIVE,
+ task_state=None)
+
params = {'migration_id': migration_ref['id']}
self._cast_compute_message('revert_resize', context,
instance_ref['uuid'],
@@ -1094,6 +1121,12 @@ class API(base.Base):
if not migration_ref:
raise exception.MigrationNotFoundByStatus(instance_id=instance_id,
status='finished')
+
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.ACTIVE,
+ task_state=None)
+
params = {'migration_id': migration_ref['id']}
self._cast_compute_message('confirm_resize', context,
instance_ref['uuid'],
@@ -1139,6 +1172,11 @@ class API(base.Base):
if (current_memory_mb == new_memory_mb) and flavor_id:
raise exception.CannotResizeToSameSize()
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.RESIZING,
+ task_state=task_states.RESIZE_PREP)
+
instance_ref = self._get_instance(context, instance_id, 'resize')
self._cast_scheduler_message(context,
{"method": "prep_resize",
@@ -1172,11 +1210,19 @@ class API(base.Base):
@scheduler_api.reroute_compute("pause")
def pause(self, context, instance_id):
"""Pause the given instance."""
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.PAUSING)
self._cast_compute_message('pause_instance', context, instance_id)
@scheduler_api.reroute_compute("unpause")
def unpause(self, context, instance_id):
"""Unpause the given instance."""
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.PAUSED,
+ task_state=task_states.UNPAUSING)
self._cast_compute_message('unpause_instance', context, instance_id)
def _call_compute_message_for_host(self, action, context, host, params):
@@ -1209,21 +1255,37 @@ class API(base.Base):
@scheduler_api.reroute_compute("suspend")
def suspend(self, context, instance_id):
"""Suspend the given instance."""
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.SUSPENDING)
self._cast_compute_message('suspend_instance', context, instance_id)
@scheduler_api.reroute_compute("resume")
def resume(self, context, instance_id):
"""Resume the given instance."""
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.SUSPENDED,
+ task_state=task_states.RESUMING)
self._cast_compute_message('resume_instance', context, instance_id)
@scheduler_api.reroute_compute("rescue")
def rescue(self, context, instance_id):
"""Rescue the given instance."""
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.RESCUING)
self._cast_compute_message('rescue_instance', context, instance_id)
@scheduler_api.reroute_compute("unrescue")
def unrescue(self, context, instance_id):
"""Unrescue the given instance."""
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.RESCUED,
+ task_state=task_states.UNRESCUING)
self._cast_compute_message('unrescue_instance', context, instance_id)
@scheduler_api.reroute_compute("set_admin_password")
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 6fcb3786c..0477db745 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -56,6 +56,8 @@ from nova import rpc
from nova import utils
from nova import volume
from nova.compute import power_state
+from nova.compute import task_states
+from nova.compute import vm_states
from nova.notifier import api as notifier
from nova.compute.utils import terminate_volumes
from nova.virt import driver
@@ -146,6 +148,10 @@ class ComputeManager(manager.SchedulerDependentManager):
super(ComputeManager, self).__init__(service_name="compute",
*args, **kwargs)
+ def _instance_update(self, context, instance_id, **kwargs):
+ """Update an instance in the database using kwargs as value."""
+ return self.db.instance_update(context, instance_id, kwargs)
+
def init_host(self):
"""Initialization for a standalone compute service."""
self.driver.init_host(host=self.host)
@@ -153,8 +159,8 @@ class ComputeManager(manager.SchedulerDependentManager):
instances = self.db.instance_get_all_by_host(context, self.host)
for instance in instances:
inst_name = instance['name']
- db_state = instance['state']
- drv_state = self._update_state(context, instance['id'])
+ db_state = instance['power_state']
+ drv_state = self._get_power_state(context, instance)
expect_running = db_state == power_state.RUNNING \
and drv_state != db_state
@@ -177,29 +183,13 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.warning(_('Hypervisor driver does not '
'support firewall rules'))
- def _update_state(self, context, instance_id, state=None):
- """Update the state of an instance from the driver info."""
- instance_ref = self.db.instance_get(context, instance_id)
-
- if state is None:
- try:
- LOG.debug(_('Checking state of %s'), instance_ref['name'])
- info = self.driver.get_info(instance_ref['name'])
- except exception.NotFound:
- info = None
-
- if info is not None:
- state = info['state']
- else:
- state = power_state.FAILED
-
- self.db.instance_set_state(context, instance_id, state)
- return state
-
- def _update_launched_at(self, context, instance_id, launched_at=None):
- """Update the launched_at parameter of the given instance."""
- data = {'launched_at': launched_at or utils.utcnow()}
- self.db.instance_update(context, instance_id, data)
+ def _get_power_state(self, context, instance):
+ """Retrieve the power state for the given instance."""
+ LOG.debug(_('Checking state of %s'), instance['name'])
+ try:
+ return self.driver.get_info(instance['name'])["state"]
+ except exception.NotFound:
+ return power_state.FAILED
def get_console_topic(self, context, **kwargs):
"""Retrieves the console host for a project on this host.
@@ -251,11 +241,6 @@ class ComputeManager(manager.SchedulerDependentManager):
def _setup_block_device_mapping(self, context, instance_id):
"""setup volumes for block device mapping"""
- self.db.instance_set_state(context,
- instance_id,
- power_state.NOSTATE,
- 'block_device_mapping')
-
volume_api = volume.API()
block_device_mapping = []
swap = None
@@ -389,17 +374,12 @@ class ComputeManager(manager.SchedulerDependentManager):
updates = {}
updates['host'] = self.host
updates['launched_on'] = self.host
- instance = self.db.instance_update(context,
- instance_id,
- updates)
+ updates['vm_state'] = vm_states.BUILDING
+ updates['task_state'] = task_states.NETWORKING
+ instance = self.db.instance_update(context, instance_id, updates)
instance['injected_files'] = kwargs.get('injected_files', [])
instance['admin_pass'] = kwargs.get('admin_password', None)
- self.db.instance_set_state(context,
- instance_id,
- power_state.NOSTATE,
- 'networking')
-
is_vpn = instance['image_ref'] == str(FLAGS.vpn_image_id)
try:
# NOTE(vish): This could be a cast because we don't do anything
@@ -418,6 +398,11 @@ class ComputeManager(manager.SchedulerDependentManager):
# all vif creation and network injection, maybe this is correct
network_info = []
+ self._instance_update(context,
+ instance_id,
+ vm_state=vm_states.BUILDING,
+ task_state=task_states.BLOCK_DEVICE_MAPPING)
+
(swap, ephemerals,
block_device_mapping) = self._setup_block_device_mapping(
context, instance_id)
@@ -427,9 +412,12 @@ class ComputeManager(manager.SchedulerDependentManager):
'ephemerals': ephemerals,
'block_device_mapping': block_device_mapping}
- # TODO(vish) check to make sure the availability zone matches
- self._update_state(context, instance_id, power_state.BUILDING)
+ self._instance_update(context,
+ instance_id,
+ vm_state=vm_states.BUILDING,
+ task_state=task_states.SPAWNING)
+ # TODO(vish) check to make sure the availability zone matches
try:
self.driver.spawn(context, instance,
network_info, block_device_info)
@@ -438,13 +426,21 @@ class ComputeManager(manager.SchedulerDependentManager):
"virtualization enabled in the BIOS? Details: "
"%(ex)s") % locals()
LOG.exception(msg)
+ return
+
+ current_power_state = self._get_power_state(context, instance)
+ self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ task_state=None,
+ launched_at=utils.utcnow())
- self._update_launched_at(context, instance_id)
- self._update_state(context, instance_id)
usage_info = utils.usage_from_instance(instance)
notifier.notify('compute.%s' % self.host,
'compute.instance.create',
notifier.INFO, usage_info)
+
except exception.InstanceNotFound:
# FIXME(wwolf): We are just ignoring InstanceNotFound
# exceptions here in case the instance was immediately
@@ -480,8 +476,7 @@ class ComputeManager(manager.SchedulerDependentManager):
for volume in volumes:
self._detach_volume(context, instance_id, volume['id'], False)
- if (instance['state'] == power_state.SHUTOFF and
- instance['state_description'] != 'stopped'):
+ if instance['power_state'] == power_state.SHUTOFF:
self.db.instance_destroy(context, instance_id)
raise exception.Error(_('trying to destroy already destroyed'
' instance: %s') % instance_id)
@@ -496,9 +491,14 @@ class ComputeManager(manager.SchedulerDependentManager):
"""Terminate an instance on this host."""
self._shutdown_instance(context, instance_id, 'Terminating')
instance = self.db.instance_get(context.elevated(), instance_id)
+ self._instance_update(context,
+ instance_id,
+ vm_state=vm_states.DELETED,
+ task_state=None,
+ terminated_at=utils.utcnow())
- # TODO(ja): should we keep it in a terminated state for a bit?
self.db.instance_destroy(context, instance_id)
+
usage_info = utils.usage_from_instance(instance)
notifier.notify('compute.%s' % self.host,
'compute.instance.delete',
@@ -509,7 +509,10 @@ class ComputeManager(manager.SchedulerDependentManager):
def stop_instance(self, context, instance_id):
"""Stopping an instance on this host."""
self._shutdown_instance(context, instance_id, 'Stopping')
- # instance state will be updated to stopped by _poll_instance_states()
+ self._instance_update(context,
+ instance_id,
+ vm_state=vm_states.STOPPED,
+ task_state=None)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
@@ -529,26 +532,46 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_ref = self.db.instance_get(context, instance_id)
LOG.audit(_("Rebuilding instance %s"), instance_id, context=context)
- self._update_state(context, instance_id, power_state.BUILDING)
+ current_power_state = self._get_power_state(context, instance_ref)
+ self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ vm_state=vm_states.REBUILDING,
+ task_state=None)
network_info = self._get_instance_nw_info(context, instance_ref)
-
self.driver.destroy(instance_ref, network_info)
+
+ self._instance_update(context,
+ instance_id,
+ vm_state=vm_states.REBUILDING,
+ task_state=task_states.BLOCK_DEVICE_MAPPING)
+
instance_ref.injected_files = kwargs.get('injected_files', [])
network_info = self.network_api.get_instance_nw_info(context,
instance_ref)
bd_mapping = self._setup_block_device_mapping(context, instance_id)
+ self._instance_update(context,
+ instance_id,
+ vm_state=vm_states.REBUILDING,
+ task_state=task_states.SPAWNING)
+
# pull in new password here since the original password isn't in the db
instance_ref.admin_pass = kwargs.get('new_pass',
utils.generate_password(FLAGS.password_length))
self.driver.spawn(context, instance_ref, network_info, bd_mapping)
- self._update_launched_at(context, instance_id)
- self._update_state(context, instance_id)
- usage_info = utils.usage_from_instance(instance_ref)
+ current_power_state = self._get_power_state(context, instance_ref)
+ self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ task_state=None,
+ launched_at=utils.utcnow())
+ usage_info = utils.usage_from_instance(instance_ref)
notifier.notify('compute.%s' % self.host,
'compute.instance.rebuild',
notifier.INFO,
@@ -558,26 +581,34 @@ class ComputeManager(manager.SchedulerDependentManager):
@checks_instance_lock
def reboot_instance(self, context, instance_id):
"""Reboot an instance on this host."""
+ LOG.audit(_("Rebooting instance %s"), instance_id, context=context)
context = context.elevated()
- self._update_state(context, instance_id)
instance_ref = self.db.instance_get(context, instance_id)
- LOG.audit(_("Rebooting instance %s"), instance_id, context=context)
- if instance_ref['state'] != power_state.RUNNING:
- state = instance_ref['state']
+ current_power_state = self._get_power_state(context, instance_ref)
+ self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.REBOOTING)
+
+ if instance_ref['power_state'] != power_state.RUNNING:
+ state = instance_ref['power_state']
running = power_state.RUNNING
LOG.warn(_('trying to reboot a non-running '
'instance: %(instance_id)s (state: %(state)s '
'expected: %(running)s)') % locals(),
context=context)
- self.db.instance_set_state(context,
- instance_id,
- power_state.NOSTATE,
- 'rebooting')
network_info = self._get_instance_nw_info(context, instance_ref)
self.driver.reboot(instance_ref, network_info)
- self._update_state(context, instance_id)
+
+ current_power_state = self._get_power_state(context, instance_ref)
+ self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ task_state=None)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def snapshot_instance(self, context, instance_id, image_id,
@@ -593,37 +624,45 @@ class ComputeManager(manager.SchedulerDependentManager):
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
"""
+ if image_type == "snapshot":
+ task_state = task_states.IMAGE_SNAPSHOT
+ elif image_type == "backup":
+ task_state = task_states.IMAGE_BACKUP
+ else:
+ raise Exception(_('Image type not recognized %s') % image_type)
+
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
- #NOTE(sirp): update_state currently only refreshes the state field
- # if we add is_snapshotting, we will need this refreshed too,
- # potentially?
- self._update_state(context, instance_id)
+ current_power_state = self._get_power_state(context, instance_ref)
+ self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ task_state=task_state)
LOG.audit(_('instance %s: snapshotting'), instance_id,
context=context)
- if instance_ref['state'] != power_state.RUNNING:
- state = instance_ref['state']
+
+ if instance_ref['power_state'] != power_state.RUNNING:
+ state = instance_ref['power_state']
running = power_state.RUNNING
LOG.warn(_('trying to snapshot a non-running '
'instance: %(instance_id)s (state: %(state)s '
'expected: %(running)s)') % locals())
self.driver.snapshot(context, instance_ref, image_id)
+ self._instance_update(context, instance_id, task_state=None)
+
+ if image_type == 'snapshot' and rotation:
+ raise exception.ImageRotationNotAllowed()
+
+ elif image_type == 'backup' and rotation:
+ instance_uuid = instance_ref['uuid']
+ self.rotate_backups(context, instance_uuid, backup_type, rotation)
- if image_type == 'snapshot':
- if rotation:
- raise exception.ImageRotationNotAllowed()
elif image_type == 'backup':
- if rotation:
- instance_uuid = instance_ref['uuid']
- self.rotate_backups(context, instance_uuid, backup_type,
- rotation)
- else:
- raise exception.RotationRequiredForBackup()
- else:
- raise Exception(_('Image type not recognized %s') % image_type)
+ raise exception.RotationRequiredForBackup()
def rotate_backups(self, context, instance_uuid, backup_type, rotation):
"""Delete excess backups associated to an instance.
@@ -691,7 +730,7 @@ class ComputeManager(manager.SchedulerDependentManager):
for i in xrange(max_tries):
instance_ref = self.db.instance_get(context, instance_id)
instance_id = instance_ref["id"]
- instance_state = instance_ref["state"]
+ instance_state = instance_ref["power_state"]
expected_state = power_state.RUNNING
if instance_state != expected_state:
@@ -726,7 +765,7 @@ class ComputeManager(manager.SchedulerDependentManager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
instance_id = instance_ref['id']
- instance_state = instance_ref['state']
+ instance_state = instance_ref['power_state']
expected_state = power_state.RUNNING
if instance_state != expected_state:
LOG.warn(_('trying to inject a file into a non-running '
@@ -744,7 +783,7 @@ class ComputeManager(manager.SchedulerDependentManager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
instance_id = instance_ref['id']
- instance_state = instance_ref['state']
+ instance_state = instance_ref['power_state']
expected_state = power_state.RUNNING
if instance_state != expected_state:
LOG.warn(_('trying to update agent on a non-running '
@@ -759,40 +798,41 @@ class ComputeManager(manager.SchedulerDependentManager):
@checks_instance_lock
def rescue_instance(self, context, instance_id):
"""Rescue an instance on this host."""
+ LOG.audit(_('instance %s: rescuing'), instance_id, context=context)
context = context.elevated()
+
instance_ref = self.db.instance_get(context, instance_id)
- LOG.audit(_('instance %s: rescuing'), instance_id, context=context)
- self.db.instance_set_state(context,
- instance_id,
- power_state.NOSTATE,
- 'rescuing')
- _update_state = lambda result: self._update_state_callback(
- self, context, instance_id, result)
network_info = self._get_instance_nw_info(context, instance_ref)
- self.driver.rescue(context, instance_ref, _update_state, network_info)
- self._update_state(context, instance_id)
+
+ # NOTE(blamar): None of the virt drivers use the 'callback' param
+ self.driver.rescue(context, instance_ref, None, network_info)
+
+ current_power_state = self._get_power_state(context, instance_ref)
+ self._instance_update(context,
+ instance_id,
+ vm_state=vm_states.RESCUED,
+ task_state=None,
+ power_state=current_power_state)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def unrescue_instance(self, context, instance_id):
"""Rescue an instance on this host."""
+ LOG.audit(_('instance %s: unrescuing'), instance_id, context=context)
context = context.elevated()
+
instance_ref = self.db.instance_get(context, instance_id)
- LOG.audit(_('instance %s: unrescuing'), instance_id, context=context)
- self.db.instance_set_state(context,
- instance_id,
- power_state.NOSTATE,
- 'unrescuing')
- _update_state = lambda result: self._update_state_callback(
- self, context, instance_id, result)
network_info = self._get_instance_nw_info(context, instance_ref)
- self.driver.unrescue(instance_ref, _update_state, network_info)
- self._update_state(context, instance_id)
- @staticmethod
- def _update_state_callback(self, context, instance_id, result):
- """Update instance state when async task completes."""
- self._update_state(context, instance_id)
+ # NOTE(blamar): None of the virt drivers use the 'callback' param
+ self.driver.unrescue(instance_ref, None, network_info)
+
+ current_power_state = self._get_power_state(context, instance_ref)
+ self._instance_update(context,
+ instance_id,
+ vm_state=vm_states.ACTIVE,
+ task_state=None,
+ power_state=current_power_state)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
@@ -851,11 +891,12 @@ class ComputeManager(manager.SchedulerDependentManager):
# Just roll back the record. There's no need to resize down since
# the 'old' VM already has the preferred attributes
- self.db.instance_update(context, instance_ref['uuid'],
- dict(memory_mb=instance_type['memory_mb'],
- vcpus=instance_type['vcpus'],
- local_gb=instance_type['local_gb'],
- instance_type_id=instance_type['id']))
+ self._instance_update(context,
+ instance_ref["uuid"],
+ memory_mb=instance_type['memory_mb'],
+ vcpus=instance_type['vcpus'],
+ local_gb=instance_type['local_gb'],
+ instance_type_id=instance_type['id'])
self.driver.revert_migration(instance_ref)
self.db.migration_update(context, migration_id,
@@ -882,8 +923,11 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_ref = self.db.instance_get_by_uuid(context, instance_id)
if instance_ref['host'] == FLAGS.host:
- raise exception.Error(_(
- 'Migration error: destination same as source!'))
+ self._instance_update(context,
+ instance_id,
+ vm_state=vm_states.ERROR)
+ msg = _('Migration error: destination same as source!')
+ raise exception.Error(msg)
old_instance_type = self.db.instance_type_get(context,
instance_ref['instance_type_id'])
@@ -977,6 +1021,11 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.finish_migration(context, instance_ref, disk_info,
network_info, resize_instance)
+ self._instance_update(context,
+ instance_id,
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.RESIZE_VERIFY)
+
self.db.migration_update(context, migration_id,
{'status': 'finished', })
@@ -1008,35 +1057,35 @@ class ComputeManager(manager.SchedulerDependentManager):
@checks_instance_lock
def pause_instance(self, context, instance_id):
"""Pause an instance on this host."""
+ LOG.audit(_('instance %s: pausing'), instance_id, context=context)
context = context.elevated()
+
instance_ref = self.db.instance_get(context, instance_id)
- LOG.audit(_('instance %s: pausing'), instance_id, context=context)
- self.db.instance_set_state(context,
- instance_id,
- power_state.NOSTATE,
- 'pausing')
- self.driver.pause(instance_ref,
- lambda result: self._update_state_callback(self,
- context,
- instance_id,
- result))
+ self.driver.pause(instance_ref, lambda result: None)
+
+ current_power_state = self._get_power_state(context, instance_ref)
+ self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ vm_state=vm_states.PAUSED,
+ task_state=None)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def unpause_instance(self, context, instance_id):
"""Unpause a paused instance on this host."""
+ LOG.audit(_('instance %s: unpausing'), instance_id, context=context)
context = context.elevated()
+
instance_ref = self.db.instance_get(context, instance_id)
- LOG.audit(_('instance %s: unpausing'), instance_id, context=context)
- self.db.instance_set_state(context,
- instance_id,
- power_state.NOSTATE,
- 'unpausing')
- self.driver.unpause(instance_ref,
- lambda result: self._update_state_callback(self,
- context,
- instance_id,
- result))
+ self.driver.unpause(instance_ref, lambda result: None)
+
+ current_power_state = self._get_power_state(context, instance_ref)
+ self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ task_state=None)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def host_power_action(self, context, host=None, action=None):
@@ -1052,7 +1101,7 @@ class ComputeManager(manager.SchedulerDependentManager):
def get_diagnostics(self, context, instance_id):
"""Retrieve diagnostics for an instance on this host."""
instance_ref = self.db.instance_get(context, instance_id)
- if instance_ref["state"] == power_state.RUNNING:
+ if instance_ref["power_state"] == power_state.RUNNING:
LOG.audit(_("instance %s: retrieving diagnostics"), instance_id,
context=context)
return self.driver.get_diagnostics(instance_ref)
@@ -1061,33 +1110,35 @@ class ComputeManager(manager.SchedulerDependentManager):
@checks_instance_lock
def suspend_instance(self, context, instance_id):
"""Suspend the given instance."""
+ LOG.audit(_('instance %s: suspending'), instance_id, context=context)
context = context.elevated()
+
instance_ref = self.db.instance_get(context, instance_id)
- LOG.audit(_('instance %s: suspending'), instance_id, context=context)
- self.db.instance_set_state(context, instance_id,
- power_state.NOSTATE,
- 'suspending')
- self.driver.suspend(instance_ref,
- lambda result: self._update_state_callback(self,
- context,
- instance_id,
- result))
+ self.driver.suspend(instance_ref, lambda result: None)
+
+ current_power_state = self._get_power_state(context, instance_ref)
+ self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ vm_state=vm_states.SUSPENDED,
+ task_state=None)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def resume_instance(self, context, instance_id):
"""Resume the given suspended instance."""
+ LOG.audit(_('instance %s: resuming'), instance_id, context=context)
context = context.elevated()
+
instance_ref = self.db.instance_get(context, instance_id)
- LOG.audit(_('instance %s: resuming'), instance_id, context=context)
- self.db.instance_set_state(context, instance_id,
- power_state.NOSTATE,
- 'resuming')
- self.driver.resume(instance_ref,
- lambda result: self._update_state_callback(self,
- context,
- instance_id,
- result))
+ self.driver.resume(instance_ref, lambda result: None)
+
+ current_power_state = self._get_power_state(context, instance_ref)
+ self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ task_state=None)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def lock_instance(self, context, instance_id):
@@ -1498,11 +1549,14 @@ class ComputeManager(manager.SchedulerDependentManager):
'block_migration': block_migration}})
# Restore instance state
- self.db.instance_update(ctxt,
- instance_ref['id'],
- {'state_description': 'running',
- 'state': power_state.RUNNING,
- 'host': dest})
+ current_power_state = self._get_power_state(ctxt, instance_ref)
+ self._instance_update(ctxt,
+ instance_ref["id"],
+ host=dest,
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ task_state=None)
+
# Restore volume state
for volume_ref in instance_ref['volumes']:
volume_id = volume_ref['id']
@@ -1548,11 +1602,11 @@ class ComputeManager(manager.SchedulerDependentManager):
This param specifies destination host.
"""
host = instance_ref['host']
- self.db.instance_update(context,
- instance_ref['id'],
- {'state_description': 'running',
- 'state': power_state.RUNNING,
- 'host': host})
+ self._instance_update(context,
+ instance_ref['id'],
+ host=host,
+ vm_state=vm_states.ACTIVE,
+ task_state=None)
for volume_ref in instance_ref['volumes']:
volume_id = volume_ref['id']
@@ -1600,10 +1654,9 @@ class ComputeManager(manager.SchedulerDependentManager):
error_list.append(ex)
try:
- self._poll_instance_states(context)
+ self._sync_power_states(context)
except Exception as ex:
- LOG.warning(_("Error during instance poll: %s"),
- unicode(ex))
+ LOG.warning(_("Error during power_state sync: %s"), unicode(ex))
error_list.append(ex)
return error_list
@@ -1618,68 +1671,40 @@ class ComputeManager(manager.SchedulerDependentManager):
self.update_service_capabilities(
self.driver.get_host_stats(refresh=True))
- def _poll_instance_states(self, context):
+ def _sync_power_states(self, context):
+ """Align power states between the database and the hypervisor.
+
+ The hypervisor is authoritative for the power_state data, so we
+ simply loop over all known instances for this host and update the
+ power_state according to the hypervisor. If the instance is not found
+ then it will be set to power_state.NOSTATE, because it doesn't exist
+ on the hypervisor.
+
+ """
vm_instances = self.driver.list_instances_detail()
vm_instances = dict((vm.name, vm) for vm in vm_instances)
+ db_instances = self.db.instance_get_all_by_host(context, self.host)
- # Keep a list of VMs not in the DB, cross them off as we find them
- vms_not_found_in_db = list(vm_instances.keys())
+ num_vm_instances = len(vm_instances)
+ num_db_instances = len(db_instances)
- db_instances = self.db.instance_get_all_by_host(context, self.host)
+ if num_vm_instances != num_db_instances:
+ LOG.info(_("Found %(num_db_instances)s in the database and "
+ "%(num_vm_instances)s on the hypervisor.") % locals())
for db_instance in db_instances:
- name = db_instance['name']
- db_state = db_instance['state']
+ name = db_instance["name"]
+ db_power_state = db_instance['power_state']
vm_instance = vm_instances.get(name)
if vm_instance is None:
- # NOTE(justinsb): We have to be very careful here, because a
- # concurrent operation could be in progress (e.g. a spawn)
- if db_state == power_state.BUILDING:
- # TODO(justinsb): This does mean that if we crash during a
- # spawn, the machine will never leave the spawning state,
- # but this is just the way nova is; this function isn't
- # trying to correct that problem.
- # We could have a separate task to correct this error.
- # TODO(justinsb): What happens during a live migration?
- LOG.info(_("Found instance '%(name)s' in DB but no VM. "
- "State=%(db_state)s, so assuming spawn is in "
- "progress.") % locals())
- vm_state = db_state
- else:
- LOG.info(_("Found instance '%(name)s' in DB but no VM. "
- "State=%(db_state)s, so setting state to "
- "shutoff.") % locals())
- vm_state = power_state.SHUTOFF
- if db_instance['state_description'] == 'stopping':
- self.db.instance_stop(context, db_instance['id'])
- continue
+ vm_power_state = power_state.NOSTATE
else:
- vm_state = vm_instance.state
- vms_not_found_in_db.remove(name)
-
- if (db_instance['state_description'] in ['migrating', 'stopping']):
- # A situation which db record exists, but no instance"
- # sometimes occurs while live-migration at src compute,
- # this case should be ignored.
- LOG.debug(_("Ignoring %(name)s, as it's currently being "
- "migrated.") % locals())
- continue
-
- if vm_state != db_state:
- LOG.info(_("DB/VM state mismatch. Changing state from "
- "'%(db_state)s' to '%(vm_state)s'") % locals())
- self._update_state(context, db_instance['id'], vm_state)
+ vm_power_state = vm_instance.state
- # NOTE(justinsb): We no longer auto-remove SHUTOFF instances
- # It's quite hard to get them back when we do.
-
- # Are there VMs not in the DB?
- for vm_not_found_in_db in vms_not_found_in_db:
- name = vm_not_found_in_db
+ if vm_power_state == db_power_state:
+ continue
- # We only care about instances that compute *should* know about
- if name.startswith("instance-"):
- # TODO(justinsb): What to do here? Adopt it? Shut it down?
- LOG.warning(_("Found VM not in DB: '%(name)s'. Ignoring")
- % locals())
+ self._instance_update(context,
+ db_instance["id"],
+ power_state=vm_power_state)
diff --git a/nova/compute/task_states.py b/nova/compute/task_states.py
new file mode 100644
index 000000000..e3315a542
--- /dev/null
+++ b/nova/compute/task_states.py
@@ -0,0 +1,59 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Possible task states for instances.
+
+Compute instance task states represent what is happening to the instance at the
+current moment. These tasks can be generic, such as 'spawning', or specific,
+such as 'block_device_mapping'. These task states allow for a better view into
+what an instance is doing and should be displayed to users/administrators as
+necessary.
+
+"""
+
+SCHEDULING = 'scheduling'
+BLOCK_DEVICE_MAPPING = 'block_device_mapping'
+NETWORKING = 'networking'
+SPAWNING = 'spawning'
+
+IMAGE_SNAPSHOT = 'image_snapshot'
+IMAGE_BACKUP = 'image_backup'
+
+UPDATING_PASSWORD = 'updating_password'
+
+RESIZE_PREP = 'resize_prep'
+RESIZE_MIGRATING = 'resize_migrating'
+RESIZE_MIGRATED = 'resize_migrated'
+RESIZE_FINISH = 'resize_finish'
+RESIZE_REVERTING = 'resize_reverting'
+RESIZE_CONFIRMING = 'resize_confirming'
+RESIZE_VERIFY = 'resize_verify'
+
+REBUILDING = 'rebuilding'
+
+REBOOTING = 'rebooting'
+PAUSING = 'pausing'
+UNPAUSING = 'unpausing'
+SUSPENDING = 'suspending'
+RESUMING = 'resuming'
+
+RESCUING = 'rescuing'
+UNRESCUING = 'unrescuing'
+
+DELETING = 'deleting'
+STOPPING = 'stopping'
+STARTING = 'starting'
diff --git a/nova/compute/vm_states.py b/nova/compute/vm_states.py
new file mode 100644
index 000000000..6f16c1f09
--- /dev/null
+++ b/nova/compute/vm_states.py
@@ -0,0 +1,39 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Possible vm states for instances.
+
+Compute instance vm states represent the state of an instance as it pertains to
+a user or administrator. When combined with task states (task_states.py), a
+better picture can be formed regarding the instance's health.
+
+"""
+
+ACTIVE = 'active'
+BUILDING = 'building'
+REBUILDING = 'rebuilding'
+
+PAUSED = 'paused'
+SUSPENDED = 'suspended'
+RESCUED = 'rescued'
+DELETED = 'deleted'
+STOPPED = 'stopped'
+
+MIGRATING = 'migrating'
+RESIZING = 'resizing'
+
+ERROR = 'error'
diff --git a/nova/context.py b/nova/context.py
index b917a1d81..5c22641a0 100644
--- a/nova/context.py
+++ b/nova/context.py
@@ -38,7 +38,7 @@ class RequestContext(object):
self.roles = roles or []
self.is_admin = is_admin
if self.is_admin is None:
- self.admin = 'admin' in self.roles
+ self.is_admin = 'admin' in [x.lower() for x in self.roles]
self.read_deleted = read_deleted
self.remote_address = remote_address
if not timestamp:
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index d76dc22ed..c97ff5070 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -28,6 +28,7 @@ from nova import flags
from nova import ipv6
from nova import utils
from nova import log as logging
+from nova.compute import vm_states
from nova.db.sqlalchemy import models
from nova.db.sqlalchemy.session import get_session
from sqlalchemy import or_
@@ -1102,12 +1103,11 @@ def instance_destroy(context, instance_id):
def instance_stop(context, instance_id):
session = get_session()
with session.begin():
- from nova.compute import power_state
session.query(models.Instance).\
filter_by(id=instance_id).\
update({'host': None,
- 'state': power_state.SHUTOFF,
- 'state_description': 'stopped',
+ 'vm_state': vm_states.STOPPED,
+ 'task_state': None,
'updated_at': literal_column('updated_at')})
session.query(models.SecurityGroupInstanceAssociation).\
filter_by(instance_id=instance_id).\
@@ -1266,7 +1266,7 @@ def instance_get_all_by_filters(context, filters):
# Filters for exact matches that we can do along with the SQL query...
# For other filters that don't match this, we will do regexp matching
exact_match_filter_names = ['project_id', 'user_id', 'image_ref',
- 'state', 'instance_type_id', 'deleted']
+ 'vm_state', 'instance_type_id', 'deleted']
query_filters = [key for key in filters.iterkeys()
if key in exact_match_filter_names]
@@ -1503,18 +1503,6 @@ def instance_get_floating_address(context, instance_id):
return fixed_ip_refs[0].floating_ips[0]['address']
-@require_admin_context
-def instance_set_state(context, instance_id, state, description=None):
- # TODO(devcamcar): Move this out of models and into driver
- from nova.compute import power_state
- if not description:
- description = power_state.name(state)
- db.instance_update(context,
- instance_id,
- {'state': state,
- 'state_description': description})
-
-
@require_context
def instance_update(context, instance_id, values):
session = get_session()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py
new file mode 100644
index 000000000..e58ae5362
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py
@@ -0,0 +1,138 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy
+from sqlalchemy import MetaData, Table, Column, String
+
+from nova.compute import task_states
+from nova.compute import vm_states
+
+
+meta = MetaData()
+
+
+c_task_state = Column('task_state',
+ String(length=255, convert_unicode=False,
+ assert_unicode=None, unicode_error=None,
+ _warn_on_bytestring=False),
+ nullable=True)
+
+
+_upgrade_translations = {
+ "stopping": {
+ "state_description": vm_states.ACTIVE,
+ "task_state": task_states.STOPPING,
+ },
+ "stopped": {
+ "state_description": vm_states.STOPPED,
+ "task_state": None,
+ },
+ "terminated": {
+ "state_description": vm_states.DELETED,
+ "task_state": None,
+ },
+ "terminating": {
+ "state_description": vm_states.ACTIVE,
+ "task_state": task_states.DELETING,
+ },
+ "running": {
+ "state_description": vm_states.ACTIVE,
+ "task_state": None,
+ },
+ "scheduling": {
+ "state_description": vm_states.BUILDING,
+ "task_state": task_states.SCHEDULING,
+ },
+ "migrating": {
+ "state_description": vm_states.MIGRATING,
+ "task_state": None,
+ },
+ "pending": {
+ "state_description": vm_states.BUILDING,
+ "task_state": task_states.SCHEDULING,
+ },
+}
+
+
+_downgrade_translations = {
+ vm_states.ACTIVE: {
+ None: "running",
+ task_states.DELETING: "terminating",
+ task_states.STOPPING: "stopping",
+ },
+ vm_states.BUILDING: {
+ None: "pending",
+ task_states.SCHEDULING: "scheduling",
+ },
+ vm_states.STOPPED: {
+ None: "stopped",
+ },
+ vm_states.REBUILDING: {
+ None: "pending",
+ },
+ vm_states.DELETED: {
+ None: "terminated",
+ },
+ vm_states.MIGRATING: {
+ None: "migrating",
+ },
+}
+
+
+def upgrade(migrate_engine):
+ meta.bind = migrate_engine
+
+ instance_table = Table('instances', meta, autoload=True,
+ autoload_with=migrate_engine)
+
+ c_state = instance_table.c.state
+ c_state.alter(name='power_state')
+
+ c_vm_state = instance_table.c.state_description
+ c_vm_state.alter(name='vm_state')
+
+ instance_table.create_column(c_task_state)
+
+ for old_state, values in _upgrade_translations.iteritems():
+ instance_table.update().\
+ values(**values).\
+ where(c_vm_state == old_state).\
+ execute()
+
+
+def downgrade(migrate_engine):
+ meta.bind = migrate_engine
+
+ instance_table = Table('instances', meta, autoload=True,
+ autoload_with=migrate_engine)
+
+ c_task_state = instance_table.c.task_state
+
+ c_state = instance_table.c.power_state
+ c_state.alter(name='state')
+
+ c_vm_state = instance_table.c.vm_state
+ c_vm_state.alter(name='state_description')
+
+ for old_vm_state, old_task_states in _downgrade_translations.iteritems():
+ for old_task_state, new_state_desc in old_task_states.iteritems():
+ instance_table.update().\
+ where(c_task_state == old_task_state).\
+ where(c_vm_state == old_vm_state).\
+ values(vm_state=new_state_desc).\
+ execute()
+
+ instance_table.drop_column('task_state')
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index f8feb0b4f..854034f12 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -193,8 +193,9 @@ class Instance(BASE, NovaBase):
key_name = Column(String(255))
key_data = Column(Text)
- state = Column(Integer)
- state_description = Column(String(255))
+ power_state = Column(Integer)
+ vm_state = Column(String(255))
+ task_state = Column(String(255))
memory_mb = Column(Integer)
vcpus = Column(Integer)
@@ -238,17 +239,6 @@ class Instance(BASE, NovaBase):
access_ip_v4 = Column(String(255))
access_ip_v6 = Column(String(255))
- # TODO(vish): see Ewan's email about state improvements, probably
- # should be in a driver base class or some such
- # vmstate_state = running, halted, suspended, paused
- # power_state = what we have
- # task_state = transitory and may trigger power state transition
-
- #@validates('state')
- #def validate_state(self, key, state):
- # assert(state in ['nostate', 'running', 'blocked', 'paused',
- # 'shutdown', 'shutoff', 'crashed'])
-
class VirtualStorageArray(BASE, NovaBase):
"""
diff --git a/nova/exception.py b/nova/exception.py
index 32981f4d5..fca4586c3 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -61,7 +61,7 @@ class ApiError(Error):
super(ApiError, self).__init__(outstr)
-class BuildInProgress(Error):
+class RebuildRequiresActiveInstance(Error):
pass
@@ -533,6 +533,10 @@ class NoMoreFloatingIps(FloatingIpNotFound):
message = _("Zero floating ips available.")
+class FloatingIpAlreadyInUse(NovaException):
+ message = _("Floating ip %(address)s already in use by %(fixed_ip)s.")
+
+
class NoFloatingIpsDefined(NotFound):
message = _("Zero floating ips exist.")
diff --git a/nova/flags.py b/nova/flags.py
index a5951ebc8..aa76defe5 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -303,8 +303,12 @@ DEFINE_bool('rabbit_use_ssl', False, 'connect over SSL')
DEFINE_string('rabbit_userid', 'guest', 'rabbit userid')
DEFINE_string('rabbit_password', 'guest', 'rabbit password')
DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host')
-DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval')
-DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts')
+DEFINE_integer('rabbit_retry_interval', 1,
+ 'rabbit connection retry interval to start')
+DEFINE_integer('rabbit_retry_backoff', 2,
+ 'rabbit connection retry backoff in seconds')
+DEFINE_integer('rabbit_max_retries', 0,
+ 'maximum rabbit connection attempts (0=try forever)')
DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to')
DEFINE_boolean('rabbit_durable_queues', False, 'use durable queues')
DEFINE_list('enabled_apis', ['ec2', 'osapi'],
diff --git a/nova/network/api.py b/nova/network/api.py
index d04474df3..78580d360 100644
--- a/nova/network/api.py
+++ b/nova/network/api.py
@@ -111,6 +111,12 @@ class API(base.Base):
'(%(project)s)') %
{'address': floating_ip['address'],
'project': context.project_id})
+
+ # If this address has been previously associated to a
+ # different instance, disassociate the floating_ip
+ if floating_ip['fixed_ip'] and floating_ip['fixed_ip'] is not fixed_ip:
+ self.disassociate_floating_ip(context, floating_ip['address'])
+
# NOTE(vish): if we are multi_host, send to the instances host
if fixed_ip['network']['multi_host']:
host = fixed_ip['instance']['host']
diff --git a/nova/network/manager.py b/nova/network/manager.py
index b4605eea5..e6b30d1a0 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -280,6 +280,13 @@ class FloatingIP(object):
def associate_floating_ip(self, context, floating_address, fixed_address):
"""Associates an floating ip to a fixed ip."""
+ floating_ip = self.db.floating_ip_get_by_address(context,
+ floating_address)
+ if floating_ip['fixed_ip']:
+ raise exception.FloatingIpAlreadyInUse(
+ address=floating_ip['address'],
+ fixed_ip=floating_ip['fixed_ip']['address'])
+
self.db.floating_ip_fixed_ip_associate(context,
floating_address,
fixed_address)
diff --git a/nova/notifier/api.py b/nova/notifier/api.py
index 6ef4a050e..043838536 100644
--- a/nova/notifier/api.py
+++ b/nova/notifier/api.py
@@ -122,4 +122,5 @@ def notify(publisher_id, event_type, priority, payload):
driver.notify(msg)
except Exception, e:
LOG.exception(_("Problem '%(e)s' attempting to "
- "send to notification system." % locals()))
+ "send to notification system. Payload=%(payload)s" %
+ locals()))
diff --git a/nova/rpc/__init__.py b/nova/rpc/__init__.py
index bdf7f705b..c0cfdd5ce 100644
--- a/nova/rpc/__init__.py
+++ b/nova/rpc/__init__.py
@@ -23,44 +23,35 @@ from nova import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('rpc_backend',
- 'nova.rpc.amqp',
- "The messaging module to use, defaults to AMQP.")
+ 'nova.rpc.impl_kombu',
+ "The messaging module to use, defaults to kombu.")
-RPCIMPL = import_object(FLAGS.rpc_backend)
+_RPCIMPL = None
-def create_connection(new=True):
- return RPCIMPL.Connection.instance(new=True)
-
+def get_impl():
+ """Delay import of rpc_backend until FLAGS are loaded."""
+ global _RPCIMPL
+ if _RPCIMPL is None:
+ _RPCIMPL = import_object(FLAGS.rpc_backend)
+ return _RPCIMPL
-def create_consumer(conn, topic, proxy, fanout=False):
- if fanout:
- return RPCIMPL.FanoutAdapterConsumer(
- connection=conn,
- topic=topic,
- proxy=proxy)
- else:
- return RPCIMPL.TopicAdapterConsumer(
- connection=conn,
- topic=topic,
- proxy=proxy)
-
-def create_consumer_set(conn, consumers):
- return RPCIMPL.ConsumerSet(connection=conn, consumer_list=consumers)
+def create_connection(new=True):
+ return get_impl().create_connection(new=new)
def call(context, topic, msg):
- return RPCIMPL.call(context, topic, msg)
+ return get_impl().call(context, topic, msg)
def cast(context, topic, msg):
- return RPCIMPL.cast(context, topic, msg)
+ return get_impl().cast(context, topic, msg)
def fanout_cast(context, topic, msg):
- return RPCIMPL.fanout_cast(context, topic, msg)
+ return get_impl().fanout_cast(context, topic, msg)
def multicall(context, topic, msg):
- return RPCIMPL.multicall(context, topic, msg)
+ return get_impl().multicall(context, topic, msg)
diff --git a/nova/rpc/common.py b/nova/rpc/common.py
index 1d3065a83..b8c280630 100644
--- a/nova/rpc/common.py
+++ b/nova/rpc/common.py
@@ -1,8 +1,14 @@
from nova import exception
+from nova import flags
from nova import log as logging
LOG = logging.getLogger('nova.rpc')
+flags.DEFINE_integer('rpc_thread_pool_size', 1024,
+ 'Size of RPC thread pool')
+flags.DEFINE_integer('rpc_conn_pool_size', 30,
+ 'Size of RPC connection pool')
+
class RemoteError(exception.Error):
"""Signifies that a remote class has raised an exception.
diff --git a/nova/rpc/amqp.py b/nova/rpc/impl_carrot.py
index fe429b266..303a4ff88 100644
--- a/nova/rpc/amqp.py
+++ b/nova/rpc/impl_carrot.py
@@ -33,6 +33,7 @@ import uuid
from carrot import connection as carrot_connection
from carrot import messaging
+import eventlet
from eventlet import greenpool
from eventlet import pools
from eventlet import queue
@@ -42,21 +43,22 @@ from nova import context
from nova import exception
from nova import fakerabbit
from nova import flags
-from nova import log as logging
-from nova import utils
from nova.rpc.common import RemoteError, LOG
+# Needed for tests
+eventlet.monkey_patch()
FLAGS = flags.FLAGS
-flags.DEFINE_integer('rpc_thread_pool_size', 1024,
- 'Size of RPC thread pool')
-flags.DEFINE_integer('rpc_conn_pool_size', 30,
- 'Size of RPC connection pool')
class Connection(carrot_connection.BrokerConnection):
"""Connection instance object."""
+ def __init__(self, *args, **kwargs):
+ super(Connection, self).__init__(*args, **kwargs)
+ self._rpc_consumers = []
+ self._rpc_consumer_thread = None
+
@classmethod
def instance(cls, new=True):
"""Returns the instance."""
@@ -94,13 +96,63 @@ class Connection(carrot_connection.BrokerConnection):
pass
return cls.instance()
+ def close(self):
+ self.cancel_consumer_thread()
+ for consumer in self._rpc_consumers:
+ try:
+ consumer.close()
+ except Exception:
+ # ignore all errors
+ pass
+ self._rpc_consumers = []
+ super(Connection, self).close()
+
+ def consume_in_thread(self):
+ """Consumer from all queues/consumers in a greenthread"""
+
+ consumer_set = ConsumerSet(connection=self,
+ consumer_list=self._rpc_consumers)
+
+ def _consumer_thread():
+ try:
+ consumer_set.wait()
+ except greenlet.GreenletExit:
+ return
+ if self._rpc_consumer_thread is None:
+ self._rpc_consumer_thread = eventlet.spawn(_consumer_thread)
+ return self._rpc_consumer_thread
+
+ def cancel_consumer_thread(self):
+ """Cancel a consumer thread"""
+ if self._rpc_consumer_thread is not None:
+ self._rpc_consumer_thread.kill()
+ try:
+ self._rpc_consumer_thread.wait()
+ except greenlet.GreenletExit:
+ pass
+ self._rpc_consumer_thread = None
+
+ def create_consumer(self, topic, proxy, fanout=False):
+ """Create a consumer that calls methods in the proxy"""
+ if fanout:
+ consumer = FanoutAdapterConsumer(
+ connection=self,
+ topic=topic,
+ proxy=proxy)
+ else:
+ consumer = TopicAdapterConsumer(
+ connection=self,
+ topic=topic,
+ proxy=proxy)
+ self._rpc_consumers.append(consumer)
+
class Pool(pools.Pool):
"""Class that implements a Pool of Connections."""
# TODO(comstud): Timeout connections not used in a while
def create(self):
- LOG.debug('Creating new connection')
+ LOG.debug('Pool creating new connection')
return Connection.instance(new=True)
# Create a ConnectionPool to use for RPC calls. We'll order the
@@ -119,25 +171,34 @@ class Consumer(messaging.Consumer):
"""
def __init__(self, *args, **kwargs):
- for i in xrange(FLAGS.rabbit_max_retries):
- if i > 0:
- time.sleep(FLAGS.rabbit_retry_interval)
+ max_retries = FLAGS.rabbit_max_retries
+ sleep_time = FLAGS.rabbit_retry_interval
+ tries = 0
+ while True:
+ tries += 1
+ if tries > 1:
+ time.sleep(sleep_time)
+ # backoff for next retry attempt.. if there is one
+ sleep_time += FLAGS.rabbit_retry_backoff
+ if sleep_time > 30:
+ sleep_time = 30
try:
super(Consumer, self).__init__(*args, **kwargs)
self.failed_connection = False
break
except Exception as e: # Catching all because carrot sucks
+ self.failed_connection = True
+ if max_retries > 0 and tries == max_retries:
+ break
fl_host = FLAGS.rabbit_host
fl_port = FLAGS.rabbit_port
- fl_intv = FLAGS.rabbit_retry_interval
+ fl_intv = sleep_time
LOG.error(_('AMQP server on %(fl_host)s:%(fl_port)d is'
' unreachable: %(e)s. Trying again in %(fl_intv)d'
' seconds.') % locals())
- self.failed_connection = True
if self.failed_connection:
LOG.error(_('Unable to connect to AMQP server '
- 'after %d tries. Shutting down.'),
- FLAGS.rabbit_max_retries)
+ 'after %(tries)d tries. Shutting down.') % locals())
sys.exit(1)
def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False):
@@ -166,12 +227,6 @@ class Consumer(messaging.Consumer):
LOG.exception(_('Failed to fetch message from queue: %s' % e))
self.failed_connection = True
- def attach_to_eventlet(self):
- """Only needed for unit tests!"""
- timer = utils.LoopingCall(self.fetch, enable_callbacks=True)
- timer.start(0.1)
- return timer
-
class AdapterConsumer(Consumer):
"""Calls methods on a proxy object based on method and args."""
@@ -242,7 +297,7 @@ class AdapterConsumer(Consumer):
# NOTE(vish): this iterates through the generator
list(rval)
except Exception as e:
- logging.exception('Exception during message handling')
+ LOG.exception('Exception during message handling')
if msg_id:
msg_reply(msg_id, None, sys.exc_info())
return
@@ -520,6 +575,11 @@ class MulticallWaiter(object):
yield result
+def create_connection(new=True):
+ """Create a connection"""
+ return Connection.instance(new=new)
+
+
def call(context, topic, msg):
"""Sends a message on a topic and wait for a response."""
rv = multicall(context, topic, msg)
diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py
new file mode 100644
index 000000000..b994a6a10
--- /dev/null
+++ b/nova/rpc/impl_kombu.py
@@ -0,0 +1,781 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import kombu
+import kombu.entity
+import kombu.messaging
+import kombu.connection
+import itertools
+import sys
+import time
+import traceback
+import types
+import uuid
+
+import eventlet
+from eventlet import greenpool
+from eventlet import pools
+import greenlet
+
+from nova import context
+from nova import exception
+from nova import flags
+from nova.rpc.common import RemoteError, LOG
+
+# Needed for tests
+eventlet.monkey_patch()
+
+FLAGS = flags.FLAGS
+
+
+class ConsumerBase(object):
+ """Consumer base class."""
+
+ def __init__(self, channel, callback, tag, **kwargs):
+ """Declare a queue on an amqp channel.
+
+ 'channel' is the amqp channel to use
+ 'callback' is the callback to call when messages are received
+ 'tag' is a unique ID for the consumer on the channel
+
+ queue name, exchange name, and other kombu options are
+ passed in here as a dictionary.
+ """
+ self.callback = callback
+ self.tag = str(tag)
+ self.kwargs = kwargs
+ self.queue = None
+ self.reconnect(channel)
+
+ def reconnect(self, channel):
+ """Re-declare the queue after a rabbit reconnect"""
+ self.channel = channel
+ self.kwargs['channel'] = channel
+ self.queue = kombu.entity.Queue(**self.kwargs)
+ self.queue.declare()
+
+ def consume(self, *args, **kwargs):
+ """Actually declare the consumer on the amqp channel. This will
+ start the flow of messages from the queue. Using the
+ Connection.iterconsume() iterator will process the messages,
+ calling the appropriate callback.
+
+ If a callback is specified in kwargs, use that. Otherwise,
+ use the callback passed during __init__()
+
+ If kwargs['nowait'] is True, then this call will block until
+ a message is read.
+
+ Messages will automatically be acked if the callback doesn't
+ raise an exception
+ """
+
+ options = {'consumer_tag': self.tag}
+ options['nowait'] = kwargs.get('nowait', False)
+ callback = kwargs.get('callback', self.callback)
+ if not callback:
+ raise ValueError("No callback defined")
+
+ def _callback(raw_message):
+ message = self.channel.message_to_python(raw_message)
+ callback(message.payload)
+ message.ack()
+
+ self.queue.consume(*args, callback=_callback, **options)
+
+ def cancel(self):
+ """Cancel the consuming from the queue, if it has started"""
+ try:
+ self.queue.cancel(self.tag)
+ except KeyError, e:
+ # NOTE(comstud): Kludge to get around a amqplib bug
+ if str(e) != "u'%s'" % self.tag:
+ raise
+ self.queue = None
+
+
+class DirectConsumer(ConsumerBase):
+ """Queue/consumer class for 'direct'"""
+
+ def __init__(self, channel, msg_id, callback, tag, **kwargs):
+ """Init a 'direct' queue.
+
+ 'channel' is the amqp channel to use
+ 'msg_id' is the msg_id to listen on
+ 'callback' is the callback to call when messages are received
+ 'tag' is a unique ID for the consumer on the channel
+
+ Other kombu options may be passed
+ """
+ # Default options
+ options = {'durable': False,
+ 'auto_delete': True,
+ 'exclusive': True}
+ options.update(kwargs)
+ exchange = kombu.entity.Exchange(
+ name=msg_id,
+ type='direct',
+ durable=options['durable'],
+ auto_delete=options['auto_delete'])
+ super(DirectConsumer, self).__init__(
+ channel,
+ callback,
+ tag,
+ name=msg_id,
+ exchange=exchange,
+ routing_key=msg_id,
+ **options)
+
+
+class TopicConsumer(ConsumerBase):
+ """Consumer class for 'topic'"""
+
+ def __init__(self, channel, topic, callback, tag, **kwargs):
+ """Init a 'topic' queue.
+
+ 'channel' is the amqp channel to use
+ 'topic' is the topic to listen on
+ 'callback' is the callback to call when messages are received
+ 'tag' is a unique ID for the consumer on the channel
+
+ Other kombu options may be passed
+ """
+ # Default options
+ options = {'durable': FLAGS.rabbit_durable_queues,
+ 'auto_delete': False,
+ 'exclusive': False}
+ options.update(kwargs)
+ exchange = kombu.entity.Exchange(
+ name=FLAGS.control_exchange,
+ type='topic',
+ durable=options['durable'],
+ auto_delete=options['auto_delete'])
+ super(TopicConsumer, self).__init__(
+ channel,
+ callback,
+ tag,
+ name=topic,
+ exchange=exchange,
+ routing_key=topic,
+ **options)
+
+
+class FanoutConsumer(ConsumerBase):
+ """Consumer class for 'fanout'"""
+
+ def __init__(self, channel, topic, callback, tag, **kwargs):
+ """Init a 'fanout' queue.
+
+ 'channel' is the amqp channel to use
+ 'topic' is the topic to listen on
+ 'callback' is the callback to call when messages are received
+ 'tag' is a unique ID for the consumer on the channel
+
+ Other kombu options may be passed
+ """
+ unique = uuid.uuid4().hex
+ exchange_name = '%s_fanout' % topic
+ queue_name = '%s_fanout_%s' % (topic, unique)
+
+ # Default options
+ options = {'durable': False,
+ 'auto_delete': True,
+ 'exclusive': True}
+ options.update(kwargs)
+ exchange = kombu.entity.Exchange(
+ name=exchange_name,
+ type='fanout',
+ durable=options['durable'],
+ auto_delete=options['auto_delete'])
+ super(FanoutConsumer, self).__init__(
+ channel,
+ callback,
+ tag,
+ name=queue_name,
+ exchange=exchange,
+ routing_key=topic,
+ **options)
+
+
+class Publisher(object):
+ """Base Publisher class"""
+
+ def __init__(self, channel, exchange_name, routing_key, **kwargs):
+ """Init the Publisher class with the exchange_name, routing_key,
+ and other options
+ """
+ self.exchange_name = exchange_name
+ self.routing_key = routing_key
+ self.kwargs = kwargs
+ self.reconnect(channel)
+
+ def reconnect(self, channel):
+ """Re-establish the Producer after a rabbit reconnection"""
+ self.exchange = kombu.entity.Exchange(name=self.exchange_name,
+ **self.kwargs)
+ self.producer = kombu.messaging.Producer(exchange=self.exchange,
+ channel=channel, routing_key=self.routing_key)
+
+ def send(self, msg):
+ """Send a message"""
+ self.producer.publish(msg)
+
+
+class DirectPublisher(Publisher):
+ """Publisher class for 'direct'"""
+ def __init__(self, channel, msg_id, **kwargs):
+ """init a 'direct' publisher.
+
+ Kombu options may be passed as keyword args to override defaults
+ """
+
+ options = {'durable': False,
+ 'auto_delete': True,
+ 'exclusive': True}
+ options.update(kwargs)
+ super(DirectPublisher, self).__init__(channel,
+ msg_id,
+ msg_id,
+ type='direct',
+ **options)
+
+
+class TopicPublisher(Publisher):
+ """Publisher class for 'topic'"""
+ def __init__(self, channel, topic, **kwargs):
+ """init a 'topic' publisher.
+
+ Kombu options may be passed as keyword args to override defaults
+ """
+ options = {'durable': FLAGS.rabbit_durable_queues,
+ 'auto_delete': False,
+ 'exclusive': False}
+ options.update(kwargs)
+ super(TopicPublisher, self).__init__(channel,
+ FLAGS.control_exchange,
+ topic,
+ type='topic',
+ **options)
+
+
+class FanoutPublisher(Publisher):
+ """Publisher class for 'fanout'"""
+ def __init__(self, channel, topic, **kwargs):
+ """init a 'fanout' publisher.
+
+ Kombu options may be passed as keyword args to override defaults
+ """
+ options = {'durable': False,
+ 'auto_delete': True,
+ 'exclusive': True}
+ options.update(kwargs)
+ super(FanoutPublisher, self).__init__(channel,
+ '%s_fanout' % topic,
+ None,
+ type='fanout',
+ **options)
+
+
+class Connection(object):
+ """Connection object."""
+
+ def __init__(self):
+ self.consumers = []
+ self.consumer_thread = None
+ self.max_retries = FLAGS.rabbit_max_retries
+ # Try forever?
+ if self.max_retries <= 0:
+ self.max_retries = None
+ self.interval_start = FLAGS.rabbit_retry_interval
+ self.interval_stepping = FLAGS.rabbit_retry_backoff
+ # max retry-interval = 30 seconds
+ self.interval_max = 30
+ self.memory_transport = False
+
+ self.params = dict(hostname=FLAGS.rabbit_host,
+ port=FLAGS.rabbit_port,
+ userid=FLAGS.rabbit_userid,
+ password=FLAGS.rabbit_password,
+ virtual_host=FLAGS.rabbit_virtual_host)
+ if FLAGS.fake_rabbit:
+ self.params['transport'] = 'memory'
+ self.memory_transport = True
+ else:
+ self.memory_transport = False
+ self.connection = None
+ self.reconnect()
+
+ def reconnect(self):
+ """Handles reconnecting and re-estblishing queues"""
+ if self.connection:
+ try:
+ self.connection.close()
+ except self.connection.connection_errors:
+ pass
+ time.sleep(1)
+ self.connection = kombu.connection.BrokerConnection(**self.params)
+ if self.memory_transport:
+ # Kludge to speed up tests.
+ self.connection.transport.polling_interval = 0.0
+ self.consumer_num = itertools.count(1)
+
+ try:
+ self.connection.ensure_connection(errback=self.connect_error,
+ max_retries=self.max_retries,
+ interval_start=self.interval_start,
+ interval_step=self.interval_stepping,
+ interval_max=self.interval_max)
+ except self.connection.connection_errors, e:
+ # We should only get here if max_retries is set. We'll go
+ # ahead and exit in this case.
+ err_str = str(e)
+ max_retries = self.max_retries
+ LOG.error(_('Unable to connect to AMQP server '
+ 'after %(max_retries)d tries: %(err_str)s') % locals())
+ sys.exit(1)
+ LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d' %
+ self.params))
+ self.channel = self.connection.channel()
+ # work around 'memory' transport bug in 1.1.3
+ if self.memory_transport:
+ self.channel._new_queue('ae.undeliver')
+ for consumer in self.consumers:
+ consumer.reconnect(self.channel)
+ if self.consumers:
+ LOG.debug(_("Re-established AMQP queues"))
+
+ def get_channel(self):
+ """Convenience call for bin/clear_rabbit_queues"""
+ return self.channel
+
+ def connect_error(self, exc, interval):
+ """Callback when there are connection re-tries by kombu"""
+ info = self.params.copy()
+ info['intv'] = interval
+ info['e'] = exc
+ LOG.error(_('AMQP server on %(hostname)s:%(port)d is'
+ ' unreachable: %(e)s. Trying again in %(intv)d'
+ ' seconds.') % info)
+
+ def close(self):
+ """Close/release this connection"""
+ self.cancel_consumer_thread()
+ self.connection.release()
+ self.connection = None
+
+ def reset(self):
+ """Reset a connection so it can be used again"""
+ self.cancel_consumer_thread()
+ self.channel.close()
+ self.channel = self.connection.channel()
+ # work around 'memory' transport bug in 1.1.3
+ if self.memory_transport:
+ self.channel._new_queue('ae.undeliver')
+ self.consumers = []
+
+ def declare_consumer(self, consumer_cls, topic, callback):
+ """Create a Consumer using the class that was passed in and
+ add it to our list of consumers
+ """
+ consumer = consumer_cls(self.channel, topic, callback,
+ self.consumer_num.next())
+ self.consumers.append(consumer)
+ return consumer
+
+ def iterconsume(self, limit=None):
+ """Return an iterator that will consume from all queues/consumers"""
+ while True:
+ try:
+ queues_head = self.consumers[:-1]
+ queues_tail = self.consumers[-1]
+ for queue in queues_head:
+ queue.consume(nowait=True)
+ queues_tail.consume(nowait=False)
+
+ for iteration in itertools.count(0):
+ if limit and iteration >= limit:
+ raise StopIteration
+ yield self.connection.drain_events()
+ except self.connection.connection_errors, e:
+ LOG.exception(_('Failed to consume message from queue: '
+ '%s' % str(e)))
+ self.reconnect()
+
+ def cancel_consumer_thread(self):
+ """Cancel a consumer thread"""
+ if self.consumer_thread is not None:
+ self.consumer_thread.kill()
+ try:
+ self.consumer_thread.wait()
+ except greenlet.GreenletExit:
+ pass
+ self.consumer_thread = None
+
+ def publisher_send(self, cls, topic, msg):
+ """Send to a publisher based on the publisher class"""
+ while True:
+ publisher = None
+ try:
+ publisher = cls(self.channel, topic)
+ publisher.send(msg)
+ return
+ except self.connection.connection_errors, e:
+ LOG.exception(_('Failed to publish message %s' % str(e)))
+ try:
+ self.reconnect()
+ if publisher:
+ publisher.reconnect(self.channel)
+ except self.connection.connection_errors, e:
+ pass
+
+ def declare_direct_consumer(self, topic, callback):
+ """Create a 'direct' queue.
+ In nova's use, this is generally a msg_id queue used for
+ responses for call/multicall
+ """
+ self.declare_consumer(DirectConsumer, topic, callback)
+
+ def declare_topic_consumer(self, topic, callback=None):
+ """Create a 'topic' consumer."""
+ self.declare_consumer(TopicConsumer, topic, callback)
+
+ def declare_fanout_consumer(self, topic, callback):
+ """Create a 'fanout' consumer"""
+ self.declare_consumer(FanoutConsumer, topic, callback)
+
+ def direct_send(self, msg_id, msg):
+ """Send a 'direct' message"""
+ self.publisher_send(DirectPublisher, msg_id, msg)
+
+ def topic_send(self, topic, msg):
+ """Send a 'topic' message"""
+ self.publisher_send(TopicPublisher, topic, msg)
+
+ def fanout_send(self, topic, msg):
+ """Send a 'fanout' message"""
+ self.publisher_send(FanoutPublisher, topic, msg)
+
+ def consume(self, limit=None):
+ """Consume from all queues/consumers"""
+ it = self.iterconsume(limit=limit)
+ while True:
+ try:
+ it.next()
+ except StopIteration:
+ return
+
+ def consume_in_thread(self):
+ """Consumer from all queues/consumers in a greenthread"""
+ def _consumer_thread():
+ try:
+ self.consume()
+ except greenlet.GreenletExit:
+ return
+ if self.consumer_thread is None:
+ self.consumer_thread = eventlet.spawn(_consumer_thread)
+ return self.consumer_thread
+
+ def create_consumer(self, topic, proxy, fanout=False):
+ """Create a consumer that calls a method in a proxy object"""
+ if fanout:
+ self.declare_fanout_consumer(topic, ProxyCallback(proxy))
+ else:
+ self.declare_topic_consumer(topic, ProxyCallback(proxy))
+
+
+class Pool(pools.Pool):
+ """Class that implements a Pool of Connections."""
+
+ # TODO(comstud): Timeout connections not used in a while
+ def create(self):
+ LOG.debug('Pool creating new connection')
+ return Connection()
+
+# Create a ConnectionPool to use for RPC calls. We'll order the
+# pool as a stack (LIFO), so that we can potentially loop through and
+# timeout old unused connections at some point
+ConnectionPool = Pool(
+ max_size=FLAGS.rpc_conn_pool_size,
+ order_as_stack=True)
+
+
+class ConnectionContext(object):
+ """The class that is actually returned to the caller of
+ create_connection(). This is a essentially a wrapper around
+ Connection that supports 'with' and can return a new Connection or
+ one from a pool. It will also catch when an instance of this class
+ is to be deleted so that we can return Connections to the pool on
+ exceptions and so forth without making the caller be responsible for
+ catching all exceptions and making sure to return a connection to
+ the pool.
+ """
+
+ def __init__(self, pooled=True):
+ """Create a new connection, or get one from the pool"""
+ self.connection = None
+ if pooled:
+ self.connection = ConnectionPool.get()
+ else:
+ self.connection = Connection()
+ self.pooled = pooled
+
+ def __enter__(self):
+ """with ConnectionContext() should return self"""
+ return self
+
+ def _done(self):
+ """If the connection came from a pool, clean it up and put it back.
+ If it did not come from a pool, close it.
+ """
+ if self.connection:
+ if self.pooled:
+ # Reset the connection so it's ready for the next caller
+ # to grab from the pool
+ self.connection.reset()
+ ConnectionPool.put(self.connection)
+ else:
+ try:
+ self.connection.close()
+ except Exception:
+ # There's apparently a bug in kombu 'memory' transport
+ # which causes an assert failure.
+ # But, we probably want to ignore all exceptions when
+ # trying to close a connection, anyway...
+ pass
+ self.connection = None
+
+ def __exit__(self, t, v, tb):
+ """end of 'with' statement. We're done here."""
+ self._done()
+
+ def __del__(self):
+ """Caller is done with this connection. Make sure we cleaned up."""
+ self._done()
+
+ def close(self):
+ """Caller is done with this connection."""
+ self._done()
+
+ def __getattr__(self, key):
+ """Proxy all other calls to the Connection instance"""
+ if self.connection:
+ return getattr(self.connection, key)
+ else:
+ raise exception.InvalidRPCConnectionReuse()
+
+
+class ProxyCallback(object):
+ """Calls methods on a proxy object based on method and args."""
+
+ def __init__(self, proxy):
+ self.proxy = proxy
+ self.pool = greenpool.GreenPool(FLAGS.rpc_thread_pool_size)
+
+ def __call__(self, message_data):
+ """Consumer callback to call a method on a proxy object.
+
+ Parses the message for validity and fires off a thread to call the
+ proxy object method.
+
+ Message data should be a dictionary with two keys:
+ method: string representing the method to call
+ args: dictionary of arg: value
+
+ Example: {'method': 'echo', 'args': {'value': 42}}
+
+ """
+ LOG.debug(_('received %s') % message_data)
+ ctxt = _unpack_context(message_data)
+ method = message_data.get('method')
+ args = message_data.get('args', {})
+ if not method:
+ LOG.warn(_('no method for message: %s') % message_data)
+ ctxt.reply(_('No method for message: %s') % message_data)
+ return
+ self.pool.spawn_n(self._process_data, ctxt, method, args)
+
+ @exception.wrap_exception()
+ def _process_data(self, ctxt, method, args):
+ """Thread that maigcally looks for a method on the proxy
+ object and calls it.
+ """
+
+ node_func = getattr(self.proxy, str(method))
+ node_args = dict((str(k), v) for k, v in args.iteritems())
+ # NOTE(vish): magic is fun!
+ try:
+ rval = node_func(context=ctxt, **node_args)
+ # Check if the result was a generator
+ if isinstance(rval, types.GeneratorType):
+ for x in rval:
+ ctxt.reply(x, None)
+ else:
+ ctxt.reply(rval, None)
+ # This final None tells multicall that it is done.
+ ctxt.reply(None, None)
+ except Exception as e:
+ LOG.exception('Exception during message handling')
+ ctxt.reply(None, sys.exc_info())
+ return
+
+
+def _unpack_context(msg):
+ """Unpack context from msg."""
+ context_dict = {}
+ for key in list(msg.keys()):
+ # NOTE(vish): Some versions of python don't like unicode keys
+ # in kwargs.
+ key = str(key)
+ if key.startswith('_context_'):
+ value = msg.pop(key)
+ context_dict[key[9:]] = value
+ context_dict['msg_id'] = msg.pop('_msg_id', None)
+ LOG.debug(_('unpacked context: %s'), context_dict)
+ return RpcContext.from_dict(context_dict)
+
+
+def _pack_context(msg, context):
+ """Pack context into msg.
+
+ Values for message keys need to be less than 255 chars, so we pull
+ context out into a bunch of separate keys. If we want to support
+ more arguments in rabbit messages, we may want to do the same
+ for args at some point.
+
+ """
+ context_d = dict([('_context_%s' % key, value)
+ for (key, value) in context.to_dict().iteritems()])
+ msg.update(context_d)
+
+
+class RpcContext(context.RequestContext):
+ """Context that supports replying to a rpc.call"""
+ def __init__(self, *args, **kwargs):
+ msg_id = kwargs.pop('msg_id', None)
+ self.msg_id = msg_id
+ super(RpcContext, self).__init__(*args, **kwargs)
+
+ def reply(self, *args, **kwargs):
+ if self.msg_id:
+ msg_reply(self.msg_id, *args, **kwargs)
+
+
+class MulticallWaiter(object):
+ def __init__(self, connection):
+ self._connection = connection
+ self._iterator = connection.iterconsume()
+ self._result = None
+ self._done = False
+
+ def done(self):
+ self._done = True
+ self._connection.close()
+
+ def __call__(self, data):
+ """The consume() callback will call this. Store the result."""
+ if data['failure']:
+ self._result = RemoteError(*data['failure'])
+ else:
+ self._result = data['result']
+
+ def __iter__(self):
+ """Return a result until we get a 'None' response from consumer"""
+ if self._done:
+ raise StopIteration
+ while True:
+ self._iterator.next()
+ result = self._result
+ if isinstance(result, Exception):
+ self.done()
+ raise result
+ if result == None:
+ self.done()
+ raise StopIteration
+ yield result
+
+
+def create_connection(new=True):
+ """Create a connection"""
+ return ConnectionContext(pooled=not new)
+
+
+def multicall(context, topic, msg):
+ """Make a call that returns multiple times."""
+ # Can't use 'with' for multicall, as it returns an iterator
+ # that will continue to use the connection. When it's done,
+ # connection.close() will get called which will put it back into
+ # the pool
+ LOG.debug(_('Making asynchronous call on %s ...'), topic)
+ msg_id = uuid.uuid4().hex
+ msg.update({'_msg_id': msg_id})
+ LOG.debug(_('MSG_ID is %s') % (msg_id))
+ _pack_context(msg, context)
+
+ conn = ConnectionContext()
+ wait_msg = MulticallWaiter(conn)
+ conn.declare_direct_consumer(msg_id, wait_msg)
+ conn.topic_send(topic, msg)
+
+ return wait_msg
+
+
+def call(context, topic, msg):
+ """Sends a message on a topic and wait for a response."""
+ rv = multicall(context, topic, msg)
+ # NOTE(vish): return the last result from the multicall
+ rv = list(rv)
+ if not rv:
+ return
+ return rv[-1]
+
+
+def cast(context, topic, msg):
+ """Sends a message on a topic without waiting for a response."""
+ LOG.debug(_('Making asynchronous cast on %s...'), topic)
+ _pack_context(msg, context)
+ with ConnectionContext() as conn:
+ conn.topic_send(topic, msg)
+
+
+def fanout_cast(context, topic, msg):
+ """Sends a message on a fanout exchange without waiting for a response."""
+ LOG.debug(_('Making asynchronous fanout cast...'))
+ _pack_context(msg, context)
+ with ConnectionContext() as conn:
+ conn.fanout_send(topic, msg)
+
+
+def msg_reply(msg_id, reply=None, failure=None):
+ """Sends a reply or an error on the channel signified by msg_id.
+
+ Failure should be a sys.exc_info() tuple.
+
+ """
+ with ConnectionContext() as conn:
+ if failure:
+ message = str(failure[1])
+ tb = traceback.format_exception(*failure)
+ LOG.error(_("Returning exception %s to caller"), message)
+ LOG.error(tb)
+ failure = (failure[0].__name__, str(failure[1]), tb)
+
+ try:
+ msg = {'result': reply, 'failure': failure}
+ except TypeError:
+ msg = {'result': dict((k, repr(v))
+ for k, v in reply.__dict__.iteritems()),
+ 'failure': failure}
+ conn.direct_send(msg_id, msg)
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index f28353f05..22f4e14f9 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -30,6 +30,7 @@ from nova import log as logging
from nova import rpc
from nova import utils
from nova.compute import power_state
+from nova.compute import vm_states
from nova.api.ec2 import ec2utils
@@ -104,10 +105,8 @@ class Scheduler(object):
dest, block_migration)
# Changing instance_state.
- db.instance_set_state(context,
- instance_id,
- power_state.PAUSED,
- 'migrating')
+ values = {"vm_state": vm_states.MIGRATING}
+ db.instance_update(context, instance_id, values)
# Changing volume state
for volume_ref in instance_ref['volumes']:
@@ -129,8 +128,7 @@ class Scheduler(object):
"""
# Checking instance is running.
- if (power_state.RUNNING != instance_ref['state'] or \
- 'running' != instance_ref['state_description']):
+ if instance_ref['power_state'] != power_state.RUNNING:
instance_id = ec2utils.id_to_ec2_id(instance_ref['id'])
raise exception.InstanceNotRunning(instance_id=instance_id)
diff --git a/nova/service.py b/nova/service.py
index 959e79052..247eb4fb1 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -153,26 +153,15 @@ class Service(object):
self.topic)
# Share this same connection for these Consumers
- consumer_all = rpc.create_consumer(self.conn, self.topic, self,
- fanout=False)
+ self.conn.create_consumer(self.topic, self, fanout=False)
node_topic = '%s.%s' % (self.topic, self.host)
- consumer_node = rpc.create_consumer(self.conn, node_topic, self,
- fanout=False)
+ self.conn.create_consumer(node_topic, self, fanout=False)
- fanout = rpc.create_consumer(self.conn, self.topic, self, fanout=True)
+ self.conn.create_consumer(self.topic, self, fanout=True)
- consumers = [consumer_all, consumer_node, fanout]
- consumer_set = rpc.create_consumer_set(self.conn, consumers)
-
- # Wait forever, processing these consumers
- def _wait():
- try:
- consumer_set.wait()
- finally:
- consumer_set.close()
-
- self.consumer_set_thread = eventlet.spawn(_wait)
+ # Consume from all consumers in a thread
+ self.conn.consume_in_thread()
if self.report_interval:
pulse = utils.LoopingCall(self.report_state)
@@ -237,10 +226,11 @@ class Service(object):
logging.warn(_('Service killed that has no database entry'))
def stop(self):
- self.consumer_set_thread.kill()
+ # Try to shut the connection down, but if we get any sort of
+ # errors, go ahead and ignore them.. as we're shutting down anyway
try:
- self.consumer_set_thread.wait()
- except greenlet.GreenletExit:
+ self.conn.close()
+ except Exception:
pass
for x in self.timers:
try:
diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py
index 568faf867..fc10f2f6c 100644
--- a/nova/tests/api/openstack/contrib/test_floating_ips.py
+++ b/nova/tests/api/openstack/contrib/test_floating_ips.py
@@ -20,9 +20,11 @@ import webob
from nova import compute
from nova import context
from nova import db
-from nova import test
from nova import network
+from nova import rpc
+from nova import test
from nova.tests.api.openstack import fakes
+from nova.tests.api.openstack import test_servers
from nova.api.openstack.contrib.floating_ips import FloatingIPController
@@ -60,10 +62,38 @@ def compute_api_associate(self, context, instance_id, floating_ip):
pass
+def network_api_associate(self, context, floating_ip, fixed_ip):
+ pass
+
+
def network_api_disassociate(self, context, floating_address):
pass
+def network_get_instance_nw_info(self, context, instance):
+ info = {
+ 'label': 'fake',
+ 'gateway': 'fake',
+ 'dhcp_server': 'fake',
+ 'broadcast': 'fake',
+ 'mac': 'fake',
+ 'vif_uuid': 'fake',
+ 'rxtx_cap': 'fake',
+ 'dns': [],
+ 'ips': [{'ip': '10.0.0.1'}],
+ 'should_create_bridge': False,
+ 'should_create_vlan': False}
+
+ return [['ignore', info]]
+
+
+def fake_instance_get(context, instance_id):
+ return {
+ "id": 1,
+ "user_id": 'fakeuser',
+ "project_id": '123'}
+
+
class FloatingIpTest(test.TestCase):
address = "10.10.10.10"
@@ -79,23 +109,21 @@ class FloatingIpTest(test.TestCase):
def setUp(self):
super(FloatingIpTest, self).setUp()
- self.controller = FloatingIPController()
- fakes.stub_out_networking(self.stubs)
- fakes.stub_out_rate_limiting(self.stubs)
self.stubs.Set(network.api.API, "get_floating_ip",
network_api_get_floating_ip)
self.stubs.Set(network.api.API, "get_floating_ip_by_ip",
network_api_get_floating_ip)
self.stubs.Set(network.api.API, "list_floating_ips",
network_api_list_floating_ips)
- self.stubs.Set(network.api.API, "allocate_floating_ip",
- network_api_allocate)
self.stubs.Set(network.api.API, "release_floating_ip",
network_api_release)
- self.stubs.Set(compute.api.API, "associate_floating_ip",
- compute_api_associate)
self.stubs.Set(network.api.API, "disassociate_floating_ip",
network_api_disassociate)
+ self.stubs.Set(network.api.API, "get_instance_nw_info",
+ network_get_instance_nw_info)
+ self.stubs.Set(db.api, 'instance_get',
+ fake_instance_get)
+
self.context = context.get_admin_context()
self._create_floating_ip()
@@ -143,7 +171,20 @@ class FloatingIpTest(test.TestCase):
self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10')
self.assertEqual(res_dict['floating_ip']['instance_id'], None)
+ def test_floating_ip_allocate_no_free_ips(self):
+ def fake_call(*args, **kwargs):
+ raise(rpc.RemoteError('NoMoreFloatingIps', '', ''))
+
+ self.stubs.Set(rpc, "call", fake_call)
+ req = webob.Request.blank('/v1.1/123/os-floating-ips')
+ req.method = 'POST'
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
def test_floating_ip_allocate(self):
+ self.stubs.Set(network.api.API, "allocate_floating_ip",
+ network_api_allocate)
req = webob.Request.blank('/v1.1/123/os-floating-ips')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
@@ -165,6 +206,8 @@ class FloatingIpTest(test.TestCase):
self.assertEqual(res.status_int, 202)
def test_add_floating_ip_to_instance(self):
+ self.stubs.Set(network.api.API, "associate_floating_ip",
+ network_api_associate)
body = dict(addFloatingIp=dict(address='11.0.0.1'))
req = webob.Request.blank('/v1.1/123/servers/test_inst/action')
req.method = "POST"
@@ -174,6 +217,65 @@ class FloatingIpTest(test.TestCase):
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 202)
+ def test_associate_floating_ip_to_instance_wrong_project_id(self):
+ def fake_fixed_ip_get_by_address(ctx, address, session=None):
+ return {'address': address, 'network': {'multi_host': None,
+ 'host': 'fake'}}
+ self.stubs.Set(db.api, "fixed_ip_get_by_address",
+ fake_fixed_ip_get_by_address)
+ db.floating_ip_update(self.context, self.address, {'project_id': 'bad',
+ 'fixed_ip_id': 1})
+ body = dict(addFloatingIp=dict(address=self.address))
+ req = webob.Request.blank('/v1.1/123/servers/test_inst/action')
+ req.method = "POST"
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 400)
+
+ def test_associate_floating_ip_to_instance_no_project_id(self):
+ def fake_fixed_ip_get_by_address(ctx, address, session=None):
+ return {'address': address, 'network': {'multi_host': None,
+ 'host': 'fake'}}
+ self.stubs.Set(db.api, "fixed_ip_get_by_address",
+ fake_fixed_ip_get_by_address)
+ db.floating_ip_update(self.context, self.address, {'project_id': None,
+ 'fixed_ip_id': 1})
+ body = dict(addFloatingIp=dict(address=self.address))
+ req = webob.Request.blank('/v1.1/123/servers/test_inst/action')
+ req.method = "POST"
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 400)
+
+ def test_add_associated_floating_ip_to_instance(self):
+ def fake_fixed_ip_get_by_address(ctx, address, session=None):
+ return {'address': address, 'network': {'multi_host': None,
+ 'host': 'fake'}}
+
+ self.disassociated = False
+
+ def fake_network_api_disassociate(local_self, ctx, floating_address):
+ self.disassociated = True
+
+ db.floating_ip_update(self.context, self.address, {'project_id': '123',
+ 'fixed_ip_id': 1})
+ self.stubs.Set(network.api.API, "disassociate_floating_ip",
+ fake_network_api_disassociate)
+ self.stubs.Set(db.api, "fixed_ip_get_by_address",
+ fake_fixed_ip_get_by_address)
+
+ body = dict(addFloatingIp=dict(address=self.address))
+ req = webob.Request.blank('/v1.1/123/servers/test_inst/action')
+ req.method = "POST"
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 202)
+ self.assertTrue(self.disassociated)
+
def test_remove_floating_ip_from_instance(self):
body = dict(removeFloatingIp=dict(address='11.0.0.1'))
req = webob.Request.blank('/v1.1/123/servers/test_inst/action')
diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py
index c9c33abbd..0c5fab41e 100644
--- a/nova/tests/api/openstack/test_server_actions.py
+++ b/nova/tests/api/openstack/test_server_actions.py
@@ -10,8 +10,8 @@ from nova import utils
from nova import exception
from nova import flags
from nova.api.openstack import create_instance_helper
+from nova.compute import vm_states
from nova.compute import instance_types
-from nova.compute import power_state
import nova.db.api
from nova import test
from nova.tests.api.openstack import common
@@ -35,17 +35,19 @@ def return_server_with_attributes(**kwargs):
return _return_server
-def return_server_with_power_state(power_state):
- return return_server_with_attributes(power_state=power_state)
+def return_server_with_state(vm_state, task_state=None):
+ return return_server_with_attributes(vm_state=vm_state,
+ task_state=task_state)
-def return_server_with_uuid_and_power_state(power_state):
- return return_server_with_power_state(power_state)
-
+def return_server_with_uuid_and_state(vm_state, task_state=None):
+ def _return_server(context, id):
+ return return_server_with_state(vm_state, task_state)
+ return _return_server
-def stub_instance(id, power_state=0, metadata=None,
- image_ref="10", flavor_id="1", name=None):
+def stub_instance(id, metadata=None, image_ref="10", flavor_id="1",
+ name=None, vm_state=None, task_state=None):
if metadata is not None:
metadata_items = [{'key':k, 'value':v} for k, v in metadata.items()]
else:
@@ -66,8 +68,8 @@ def stub_instance(id, power_state=0, metadata=None,
"launch_index": 0,
"key_name": "",
"key_data": "",
- "state": power_state,
- "state_description": "",
+ "vm_state": vm_state or vm_states.ACTIVE,
+ "task_state": task_state,
"memory_mb": 0,
"vcpus": 0,
"local_gb": 0,
@@ -175,11 +177,11 @@ class ServerActionsTest(test.TestCase):
},
}
- state = power_state.BUILDING
- new_return_server = return_server_with_power_state(state)
+ state = vm_states.BUILDING
+ new_return_server = return_server_with_state(state)
self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_server_with_uuid_and_power_state(state))
+ return_server_with_uuid_and_state(state))
req = webob.Request.blank('/v1.0/servers/1/action')
req.method = 'POST'
@@ -642,11 +644,11 @@ class ServerActionsTestV11(test.TestCase):
},
}
- state = power_state.BUILDING
- new_return_server = return_server_with_power_state(state)
+ state = vm_states.BUILDING
+ new_return_server = return_server_with_state(state)
self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_server_with_uuid_and_power_state(state))
+ return_server_with_uuid_and_state(state))
req = webob.Request.blank('/v1.1/fake/servers/1/action')
req.method = 'POST'
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index d065f48b6..19b530d99 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -37,7 +37,8 @@ from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
import nova.compute.api
from nova.compute import instance_types
-from nova.compute import power_state
+from nova.compute import task_states
+from nova.compute import vm_states
import nova.db.api
import nova.scheduler.api
from nova.db.sqlalchemy.models import Instance
@@ -91,15 +92,18 @@ def return_server_with_addresses(private, public):
return _return_server
-def return_server_with_power_state(power_state):
+def return_server_with_state(vm_state, task_state=None):
def _return_server(context, id):
- return stub_instance(id, power_state=power_state)
+ return stub_instance(id, vm_state=vm_state, task_state=task_state)
return _return_server
-def return_server_with_uuid_and_power_state(power_state):
+def return_server_with_uuid_and_state(vm_state, task_state):
def _return_server(context, id):
- return stub_instance(id, uuid=FAKE_UUID, power_state=power_state)
+ return stub_instance(id,
+ uuid=FAKE_UUID,
+ vm_state=vm_state,
+ task_state=task_state)
return _return_server
@@ -148,7 +152,8 @@ def instance_addresses(context, instance_id):
def stub_instance(id, user_id='fake', project_id='fake', private_address=None,
- public_addresses=None, host=None, power_state=0,
+ public_addresses=None, host=None,
+ vm_state=None, task_state=None,
reservation_id="", uuid=FAKE_UUID, image_ref="10",
flavor_id="1", interfaces=None, name=None,
access_ipv4=None, access_ipv6=None):
@@ -184,8 +189,8 @@ def stub_instance(id, user_id='fake', project_id='fake', private_address=None,
"launch_index": 0,
"key_name": "",
"key_data": "",
- "state": power_state,
- "state_description": "",
+ "vm_state": vm_state or vm_states.BUILDING,
+ "task_state": task_state,
"memory_mb": 0,
"vcpus": 0,
"local_gb": 0,
@@ -494,7 +499,7 @@ class ServersTest(test.TestCase):
},
]
new_return_server = return_server_with_attributes(
- interfaces=interfaces, power_state=1)
+ interfaces=interfaces, vm_state=vm_states.ACTIVE)
self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
req = webob.Request.blank('/v1.1/fake/servers/1')
@@ -587,8 +592,8 @@ class ServersTest(test.TestCase):
},
]
new_return_server = return_server_with_attributes(
- interfaces=interfaces, power_state=1, image_ref=image_ref,
- flavor_id=flavor_id)
+ interfaces=interfaces, vm_state=vm_states.ACTIVE,
+ image_ref=image_ref, flavor_id=flavor_id)
self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
req = webob.Request.blank('/v1.1/fake/servers/1')
@@ -1209,9 +1214,8 @@ class ServersTest(test.TestCase):
def test_get_servers_allows_status_v1_1(self):
def fake_get_all(compute_self, context, search_opts=None):
self.assertNotEqual(search_opts, None)
- self.assertTrue('state' in search_opts)
- self.assertEqual(set(search_opts['state']),
- set([power_state.RUNNING, power_state.BLOCKED]))
+ self.assertTrue('vm_state' in search_opts)
+ self.assertEqual(search_opts['vm_state'], vm_states.ACTIVE)
return [stub_instance(100)]
self.stubs.Set(nova.compute.API, 'get_all', fake_get_all)
@@ -1228,13 +1232,9 @@ class ServersTest(test.TestCase):
def test_get_servers_invalid_status_v1_1(self):
"""Test getting servers by invalid status"""
-
self.flags(allow_admin_api=False)
-
req = webob.Request.blank('/v1.1/fake/servers?status=running')
res = req.get_response(fakes.wsgi_app())
- # The following assert will fail if either of the asserts in
- # fake_get_all() fail
self.assertEqual(res.status_int, 400)
self.assertTrue(res.body.find('Invalid server status') > -1)
@@ -1738,6 +1738,7 @@ class ServersTest(test.TestCase):
server = json.loads(res.body)['server']
self.assertEqual(16, len(server['adminPass']))
self.assertEqual(1, server['id'])
+ self.assertEqual("BUILD", server["status"])
self.assertEqual(0, server['progress'])
self.assertEqual('server_test', server['name'])
self.assertEqual(expected_flavor, server['flavor'])
@@ -2467,23 +2468,51 @@ class ServersTest(test.TestCase):
self.assertEqual(res.status_int, 204)
self.assertEqual(self.server_delete_called, True)
- def test_shutdown_status(self):
- new_server = return_server_with_power_state(power_state.SHUTDOWN)
- self.stubs.Set(nova.db.api, 'instance_get', new_server)
- req = webob.Request.blank('/v1.0/servers/1')
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 200)
- res_dict = json.loads(res.body)
- self.assertEqual(res_dict['server']['status'], 'SHUTDOWN')
- def test_shutoff_status(self):
- new_server = return_server_with_power_state(power_state.SHUTOFF)
+class TestServerStatus(test.TestCase):
+
+ def _get_with_state(self, vm_state, task_state=None):
+ new_server = return_server_with_state(vm_state, task_state)
self.stubs.Set(nova.db.api, 'instance_get', new_server)
- req = webob.Request.blank('/v1.0/servers/1')
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 200)
- res_dict = json.loads(res.body)
- self.assertEqual(res_dict['server']['status'], 'SHUTOFF')
+ request = webob.Request.blank('/v1.0/servers/1')
+ response = request.get_response(fakes.wsgi_app())
+ self.assertEqual(response.status_int, 200)
+ return json.loads(response.body)
+
+ def test_active(self):
+ response = self._get_with_state(vm_states.ACTIVE)
+ self.assertEqual(response['server']['status'], 'ACTIVE')
+
+ def test_reboot(self):
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.REBOOTING)
+ self.assertEqual(response['server']['status'], 'REBOOT')
+
+ def test_rebuild(self):
+ response = self._get_with_state(vm_states.REBUILDING)
+ self.assertEqual(response['server']['status'], 'REBUILD')
+
+ def test_rebuild_error(self):
+ response = self._get_with_state(vm_states.ERROR)
+ self.assertEqual(response['server']['status'], 'ERROR')
+
+ def test_resize(self):
+ response = self._get_with_state(vm_states.RESIZING)
+ self.assertEqual(response['server']['status'], 'RESIZE')
+
+ def test_verify_resize(self):
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.RESIZE_VERIFY)
+ self.assertEqual(response['server']['status'], 'VERIFY_RESIZE')
+
+ def test_password_update(self):
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.UPDATING_PASSWORD)
+ self.assertEqual(response['server']['status'], 'PASSWORD')
+
+ def test_stopped(self):
+ response = self._get_with_state(vm_states.STOPPED)
+ self.assertEqual(response['server']['status'], 'STOPPED')
class TestServerCreateRequestXMLDeserializerV10(unittest.TestCase):
@@ -3537,8 +3566,8 @@ class ServersViewBuilderV11Test(test.TestCase):
"launch_index": 0,
"key_name": "",
"key_data": "",
- "state": 0,
- "state_description": "",
+ "vm_state": vm_states.BUILDING,
+ "task_state": None,
"memory_mb": 0,
"vcpus": 0,
"local_gb": 0,
@@ -3683,7 +3712,7 @@ class ServersViewBuilderV11Test(test.TestCase):
def test_build_server_detail_active_status(self):
#set the power state of the instance to running
- self.instance['state'] = 1
+ self.instance['vm_state'] = vm_states.ACTIVE
image_bookmark = "http://localhost/images/5"
flavor_bookmark = "http://localhost/flavors/1"
expected_server = {
diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py
index b9382038a..2cf604d06 100644
--- a/nova/tests/integrated/test_servers.py
+++ b/nova/tests/integrated/test_servers.py
@@ -28,6 +28,17 @@ LOG = logging.getLogger('nova.tests.integrated')
class ServersTest(integrated_helpers._IntegratedTestBase):
+ def _wait_for_creation(self, server):
+ retries = 0
+ while server['status'] == 'BUILD':
+ time.sleep(1)
+ server = self.api.get_server(server['id'])
+ print server
+ retries = retries + 1
+ if retries > 5:
+ break
+ return server
+
def test_get_servers(self):
"""Simple check that listing servers works."""
servers = self.api.get_servers()
@@ -36,9 +47,9 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
def test_create_and_delete_server(self):
"""Creates and deletes a server."""
+ self.flags(stub_network=True)
# Create server
-
# Build the server data gradually, checking errors along the way
server = {}
good_server = self._build_minimal_create_server_request()
@@ -91,19 +102,11 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
server_ids = [server['id'] for server in servers]
self.assertTrue(created_server_id in server_ids)
- # Wait (briefly) for creation
- retries = 0
- while found_server['status'] == 'build':
- LOG.debug("found server: %s" % found_server)
- time.sleep(1)
- found_server = self.api.get_server(created_server_id)
- retries = retries + 1
- if retries > 5:
- break
+ found_server = self._wait_for_creation(found_server)
# It should be available...
# TODO(justinsb): Mock doesn't yet do this...
- #self.assertEqual('available', found_server['status'])
+ self.assertEqual('ACTIVE', found_server['status'])
servers = self.api.get_servers(detail=True)
for server in servers:
self.assertTrue("image" in server)
@@ -181,6 +184,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
def test_create_and_rebuild_server(self):
"""Rebuild a server."""
+ self.flags(stub_network=True)
# create a server with initially has no metadata
server = self._build_minimal_create_server_request()
@@ -190,6 +194,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
+ created_server = self._wait_for_creation(created_server)
+
# rebuild the server with metadata
post = {}
post['rebuild'] = {
@@ -212,6 +218,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
def test_create_and_rebuild_server_with_metadata(self):
"""Rebuild a server with metadata."""
+ self.flags(stub_network=True)
# create a server with initially has no metadata
server = self._build_minimal_create_server_request()
@@ -221,6 +228,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
+ created_server = self._wait_for_creation(created_server)
+
# rebuild the server with metadata
post = {}
post['rebuild'] = {
@@ -248,6 +257,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
def test_create_and_rebuild_server_with_metadata_removal(self):
"""Rebuild a server with metadata."""
+ self.flags(stub_network=True)
# create a server with initially has no metadata
server = self._build_minimal_create_server_request()
@@ -264,6 +274,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
+ created_server = self._wait_for_creation(created_server)
+
# rebuild the server with metadata
post = {}
post['rebuild'] = {
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index 158df2a27..a52dd041a 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -40,6 +40,7 @@ from nova.scheduler import driver
from nova.scheduler import manager
from nova.scheduler import multi
from nova.compute import power_state
+from nova.compute import vm_states
FLAGS = flags.FLAGS
@@ -94,6 +95,9 @@ class SchedulerTestCase(test.TestCase):
inst['vcpus'] = kwargs.get('vcpus', 1)
inst['memory_mb'] = kwargs.get('memory_mb', 10)
inst['local_gb'] = kwargs.get('local_gb', 20)
+ inst['vm_state'] = kwargs.get('vm_state', vm_states.ACTIVE)
+ inst['power_state'] = kwargs.get('power_state', power_state.RUNNING)
+ inst['task_state'] = kwargs.get('task_state', None)
return db.instance_create(ctxt, inst)
def test_fallback(self):
@@ -271,8 +275,9 @@ class SimpleDriverTestCase(test.TestCase):
inst['memory_mb'] = kwargs.get('memory_mb', 20)
inst['local_gb'] = kwargs.get('local_gb', 30)
inst['launched_on'] = kwargs.get('launghed_on', 'dummy')
- inst['state_description'] = kwargs.get('state_description', 'running')
- inst['state'] = kwargs.get('state', power_state.RUNNING)
+ inst['vm_state'] = kwargs.get('vm_state', vm_states.ACTIVE)
+ inst['task_state'] = kwargs.get('task_state', None)
+ inst['power_state'] = kwargs.get('power_state', power_state.RUNNING)
return db.instance_create(self.context, inst)['id']
def _create_volume(self):
@@ -664,14 +669,14 @@ class SimpleDriverTestCase(test.TestCase):
block_migration=False)
i_ref = db.instance_get(self.context, instance_id)
- self.assertTrue(i_ref['state_description'] == 'migrating')
+ self.assertTrue(i_ref['vm_state'] == vm_states.MIGRATING)
db.instance_destroy(self.context, instance_id)
db.volume_destroy(self.context, v_ref['id'])
def test_live_migration_src_check_instance_not_running(self):
"""The instance given by instance_id is not running."""
- instance_id = self._create_instance(state_description='migrating')
+ instance_id = self._create_instance(power_state=power_state.NOSTATE)
i_ref = db.instance_get(self.context, instance_id)
try:
diff --git a/nova/tests/test_adminapi.py b/nova/tests/test_adminapi.py
index 06cc498ac..aaa633adc 100644
--- a/nova/tests/test_adminapi.py
+++ b/nova/tests/test_adminapi.py
@@ -38,8 +38,6 @@ class AdminApiTestCase(test.TestCase):
super(AdminApiTestCase, self).setUp()
self.flags(connection_type='fake')
- self.conn = rpc.create_connection()
-
# set up our cloud
self.api = admin.AdminController()
diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py
index 0793784f8..d533a4794 100644
--- a/nova/tests/test_cloud.py
+++ b/nova/tests/test_cloud.py
@@ -38,6 +38,7 @@ from nova import test
from nova import utils
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
+from nova.compute import vm_states
from nova.image import fake
@@ -51,8 +52,6 @@ class CloudTestCase(test.TestCase):
self.flags(connection_type='fake',
stub_network=True)
- self.conn = rpc.create_connection()
-
# set up our cloud
self.cloud = cloud.CloudController()
@@ -1163,7 +1162,7 @@ class CloudTestCase(test.TestCase):
self.compute = self.start_service('compute')
def _wait_for_state(self, ctxt, instance_id, predicate):
- """Wait for an stopping instance to be a given state"""
+ """Wait for a stopped instance to be a given state"""
id = ec2utils.ec2_id_to_id(instance_id)
while True:
info = self.cloud.compute_api.get(context=ctxt, instance_id=id)
@@ -1174,12 +1173,16 @@ class CloudTestCase(test.TestCase):
def _wait_for_running(self, instance_id):
def is_running(info):
- return info['state_description'] == 'running'
+ vm_state = info["vm_state"]
+ task_state = info["task_state"]
+ return vm_state == vm_states.ACTIVE and task_state == None
self._wait_for_state(self.context, instance_id, is_running)
def _wait_for_stopped(self, instance_id):
def is_stopped(info):
- return info['state_description'] == 'stopped'
+ vm_state = info["vm_state"]
+ task_state = info["task_state"]
+ return vm_state == vm_states.STOPPED and task_state == None
self._wait_for_state(self.context, instance_id, is_stopped)
def _wait_for_terminate(self, instance_id):
@@ -1562,7 +1565,7 @@ class CloudTestCase(test.TestCase):
'id': 0,
'root_device_name': '/dev/sdh',
'security_groups': [{'name': 'fake0'}, {'name': 'fake1'}],
- 'state_description': 'stopping',
+ 'vm_state': vm_states.STOPPED,
'instance_type': {'name': 'fake_type'},
'kernel_id': 1,
'ramdisk_id': 2,
@@ -1606,7 +1609,7 @@ class CloudTestCase(test.TestCase):
self.assertEqual(groupSet, expected_groupSet)
self.assertEqual(get_attribute('instanceInitiatedShutdownBehavior'),
{'instance_id': 'i-12345678',
- 'instanceInitiatedShutdownBehavior': 'stop'})
+ 'instanceInitiatedShutdownBehavior': 'stopped'})
self.assertEqual(get_attribute('instanceType'),
{'instance_id': 'i-12345678',
'instanceType': 'fake_type'})
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index 6659b81eb..766a7da9b 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -24,6 +24,7 @@ from nova import compute
from nova.compute import instance_types
from nova.compute import manager as compute_manager
from nova.compute import power_state
+from nova.compute import vm_states
from nova import context
from nova import db
from nova.db.sqlalchemy import models
@@ -763,8 +764,8 @@ class ComputeTestCase(test.TestCase):
'block_migration': False,
'disk': None}}).\
AndRaise(rpc.RemoteError('', '', ''))
- dbmock.instance_update(c, i_ref['id'], {'state_description': 'running',
- 'state': power_state.RUNNING,
+ dbmock.instance_update(c, i_ref['id'], {'vm_state': vm_states.ACTIVE,
+ 'task_state': None,
'host': i_ref['host']})
for v in i_ref['volumes']:
dbmock.volume_update(c, v['id'], {'status': 'in-use'})
@@ -795,8 +796,8 @@ class ComputeTestCase(test.TestCase):
'block_migration': False,
'disk': None}}).\
AndRaise(rpc.RemoteError('', '', ''))
- dbmock.instance_update(c, i_ref['id'], {'state_description': 'running',
- 'state': power_state.RUNNING,
+ dbmock.instance_update(c, i_ref['id'], {'vm_state': vm_states.ACTIVE,
+ 'task_state': None,
'host': i_ref['host']})
self.compute.db = dbmock
@@ -841,8 +842,8 @@ class ComputeTestCase(test.TestCase):
c = context.get_admin_context()
instance_id = self._create_instance()
i_ref = db.instance_get(c, instance_id)
- db.instance_update(c, i_ref['id'], {'state_description': 'migrating',
- 'state': power_state.PAUSED})
+ db.instance_update(c, i_ref['id'], {'vm_state': vm_states.MIGRATING,
+ 'power_state': power_state.PAUSED})
v_ref = db.volume_create(c, {'size': 1, 'instance_id': instance_id})
fix_addr = db.fixed_ip_create(c, {'address': '1.1.1.1',
'instance_id': instance_id})
@@ -903,7 +904,7 @@ class ComputeTestCase(test.TestCase):
instances = db.instance_get_all(context.get_admin_context())
LOG.info(_("After force-killing instances: %s"), instances)
self.assertEqual(len(instances), 1)
- self.assertEqual(power_state.SHUTOFF, instances[0]['state'])
+ self.assertEqual(power_state.NOSTATE, instances[0]['power_state'])
def test_get_all_by_name_regexp(self):
"""Test searching instances by name (display_name)"""
@@ -1323,25 +1324,28 @@ class ComputeTestCase(test.TestCase):
"""Test searching instances by state"""
c = context.get_admin_context()
- instance_id1 = self._create_instance({'state': power_state.SHUTDOWN})
+ instance_id1 = self._create_instance({
+ 'power_state': power_state.SHUTDOWN,
+ })
instance_id2 = self._create_instance({
- 'id': 2,
- 'state': power_state.RUNNING})
+ 'id': 2,
+ 'power_state': power_state.RUNNING,
+ })
instance_id3 = self._create_instance({
- 'id': 10,
- 'state': power_state.RUNNING})
-
+ 'id': 10,
+ 'power_state': power_state.RUNNING,
+ })
instances = self.compute_api.get_all(c,
- search_opts={'state': power_state.SUSPENDED})
+ search_opts={'power_state': power_state.SUSPENDED})
self.assertEqual(len(instances), 0)
instances = self.compute_api.get_all(c,
- search_opts={'state': power_state.SHUTDOWN})
+ search_opts={'power_state': power_state.SHUTDOWN})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0].id, instance_id1)
instances = self.compute_api.get_all(c,
- search_opts={'state': power_state.RUNNING})
+ search_opts={'power_state': power_state.RUNNING})
self.assertEqual(len(instances), 2)
instance_ids = [instance.id for instance in instances]
self.assertTrue(instance_id2 in instance_ids)
@@ -1349,7 +1353,7 @@ class ComputeTestCase(test.TestCase):
# Test passing a list as search arg
instances = self.compute_api.get_all(c,
- search_opts={'state': [power_state.SHUTDOWN,
+ search_opts={'power_state': [power_state.SHUTDOWN,
power_state.RUNNING]})
self.assertEqual(len(instances), 3)
diff --git a/nova/tests/test_context.py b/nova/tests/test_context.py
new file mode 100644
index 000000000..b2507fa59
--- /dev/null
+++ b/nova/tests/test_context.py
@@ -0,0 +1,33 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import context
+from nova import test
+
+
+class ContextTestCase(test.TestCase):
+
+ def test_request_context_sets_is_admin(self):
+ ctxt = context.RequestContext('111',
+ '222',
+ roles=['admin', 'weasel'])
+ self.assertEquals(ctxt.is_admin, True)
+
+ def test_request_context_sets_is_admin_upcase(self):
+ ctxt = context.RequestContext('111',
+ '222',
+ roles=['Admin', 'weasel'])
+ self.assertEquals(ctxt.is_admin, True)
diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py
index 0b8539442..25ff940f0 100644
--- a/nova/tests/test_network.py
+++ b/nova/tests/test_network.py
@@ -371,6 +371,22 @@ class VlanNetworkTestCase(test.TestCase):
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
+ def test_cant_associate_associated_floating_ip(self):
+ ctxt = context.RequestContext('testuser', 'testproject',
+ is_admin=False)
+
+ def fake_floating_ip_get_by_address(context, address):
+ return {'address': '10.10.10.10',
+ 'fixed_ip': {'address': '10.0.0.1'}}
+ self.stubs.Set(self.network.db, 'floating_ip_get_by_address',
+ fake_floating_ip_get_by_address)
+
+ self.assertRaises(exception.FloatingIpAlreadyInUse,
+ self.network.associate_floating_ip,
+ ctxt,
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+
class CommonNetworkTestCase(test.TestCase):
diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py
index ba9c0a859..6b4454747 100644
--- a/nova/tests/test_rpc.py
+++ b/nova/tests/test_rpc.py
@@ -22,168 +22,16 @@ Unit Tests for remote procedure calls using queue
from nova import context
from nova import log as logging
from nova import rpc
-from nova import test
+from nova.tests import test_rpc_common
LOG = logging.getLogger('nova.tests.rpc')
-class RpcTestCase(test.TestCase):
+class RpcTestCase(test_rpc_common._BaseRpcTestCase):
def setUp(self):
+ self.rpc = rpc
super(RpcTestCase, self).setUp()
- self.conn = rpc.create_connection(True)
- self.receiver = TestReceiver()
- self.consumer = rpc.create_consumer(self.conn,
- 'test',
- self.receiver,
- False)
- self.consumer.attach_to_eventlet()
- self.context = context.get_admin_context()
- def test_call_succeed(self):
- value = 42
- result = rpc.call(self.context, 'test', {"method": "echo",
- "args": {"value": value}})
- self.assertEqual(value, result)
-
- def test_call_succeed_despite_multiple_returns(self):
- value = 42
- result = rpc.call(self.context, 'test', {"method": "echo_three_times",
- "args": {"value": value}})
- self.assertEqual(value + 2, result)
-
- def test_call_succeed_despite_multiple_returns_yield(self):
- value = 42
- result = rpc.call(self.context, 'test',
- {"method": "echo_three_times_yield",
- "args": {"value": value}})
- self.assertEqual(value + 2, result)
-
- def test_multicall_succeed_once(self):
- value = 42
- result = rpc.multicall(self.context,
- 'test',
- {"method": "echo",
- "args": {"value": value}})
- for i, x in enumerate(result):
- if i > 0:
- self.fail('should only receive one response')
- self.assertEqual(value + i, x)
-
- def test_multicall_succeed_three_times(self):
- value = 42
- result = rpc.multicall(self.context,
- 'test',
- {"method": "echo_three_times",
- "args": {"value": value}})
- for i, x in enumerate(result):
- self.assertEqual(value + i, x)
-
- def test_multicall_succeed_three_times_yield(self):
- value = 42
- result = rpc.multicall(self.context,
- 'test',
- {"method": "echo_three_times_yield",
- "args": {"value": value}})
- for i, x in enumerate(result):
- self.assertEqual(value + i, x)
-
- def test_context_passed(self):
- """Makes sure a context is passed through rpc call."""
- value = 42
- result = rpc.call(self.context,
- 'test', {"method": "context",
- "args": {"value": value}})
- self.assertEqual(self.context.to_dict(), result)
-
- def test_call_exception(self):
- """Test that exception gets passed back properly.
-
- rpc.call returns a RemoteError object. The value of the
- exception is converted to a string, so we convert it back
- to an int in the test.
-
- """
- value = 42
- self.assertRaises(rpc.RemoteError,
- rpc.call,
- self.context,
- 'test',
- {"method": "fail",
- "args": {"value": value}})
- try:
- rpc.call(self.context,
- 'test',
- {"method": "fail",
- "args": {"value": value}})
- self.fail("should have thrown rpc.RemoteError")
- except rpc.RemoteError as exc:
- self.assertEqual(int(exc.value), value)
-
- def test_nested_calls(self):
- """Test that we can do an rpc.call inside another call."""
- class Nested(object):
- @staticmethod
- def echo(context, queue, value):
- """Calls echo in the passed queue"""
- LOG.debug(_("Nested received %(queue)s, %(value)s")
- % locals())
- # TODO: so, it will replay the context and use the same REQID?
- # that's bizarre.
- ret = rpc.call(context,
- queue,
- {"method": "echo",
- "args": {"value": value}})
- LOG.debug(_("Nested return %s"), ret)
- return value
-
- nested = Nested()
- conn = rpc.create_connection(True)
- consumer = rpc.create_consumer(conn,
- 'nested',
- nested,
- False)
- consumer.attach_to_eventlet()
- value = 42
- result = rpc.call(self.context,
- 'nested', {"method": "echo",
- "args": {"queue": "test",
- "value": value}})
- self.assertEqual(value, result)
-
-
-class TestReceiver(object):
- """Simple Proxy class so the consumer has methods to call.
-
- Uses static methods because we aren't actually storing any state.
-
- """
-
- @staticmethod
- def echo(context, value):
- """Simply returns whatever value is sent in."""
- LOG.debug(_("Received %s"), value)
- return value
-
- @staticmethod
- def context(context, value):
- """Returns dictionary version of context."""
- LOG.debug(_("Received %s"), context)
- return context.to_dict()
-
- @staticmethod
- def echo_three_times(context, value):
- context.reply(value)
- context.reply(value + 1)
- context.reply(value + 2)
-
- @staticmethod
- def echo_three_times_yield(context, value):
- yield value
- yield value + 1
- yield value + 2
-
- @staticmethod
- def fail(context, value):
- """Raises an exception with the value sent in."""
- raise Exception(value)
+ def tearDown(self):
+ super(RpcTestCase, self).tearDown()
diff --git a/nova/tests/test_rpc_amqp.py b/nova/tests/test_rpc_amqp.py
deleted file mode 100644
index 2215a908b..000000000
--- a/nova/tests/test_rpc_amqp.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2010 Openstack, LLC.
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Tests For RPC AMQP.
-"""
-
-from nova import context
-from nova import log as logging
-from nova import rpc
-from nova.rpc import amqp
-from nova import test
-
-
-LOG = logging.getLogger('nova.tests.rpc')
-
-
-class RpcAMQPTestCase(test.TestCase):
- def setUp(self):
- super(RpcAMQPTestCase, self).setUp()
- self.conn = rpc.create_connection(True)
- self.receiver = TestReceiver()
- self.consumer = rpc.create_consumer(self.conn,
- 'test',
- self.receiver,
- False)
- self.consumer.attach_to_eventlet()
- self.context = context.get_admin_context()
-
- def test_connectionpool_single(self):
- """Test that ConnectionPool recycles a single connection."""
- conn1 = amqp.ConnectionPool.get()
- amqp.ConnectionPool.put(conn1)
- conn2 = amqp.ConnectionPool.get()
- amqp.ConnectionPool.put(conn2)
- self.assertEqual(conn1, conn2)
-
-
-class TestReceiver(object):
- """Simple Proxy class so the consumer has methods to call.
-
- Uses static methods because we aren't actually storing any state.
-
- """
-
- @staticmethod
- def echo(context, value):
- """Simply returns whatever value is sent in."""
- LOG.debug(_("Received %s"), value)
- return value
-
- @staticmethod
- def context(context, value):
- """Returns dictionary version of context."""
- LOG.debug(_("Received %s"), context)
- return context.to_dict()
-
- @staticmethod
- def echo_three_times(context, value):
- context.reply(value)
- context.reply(value + 1)
- context.reply(value + 2)
-
- @staticmethod
- def echo_three_times_yield(context, value):
- yield value
- yield value + 1
- yield value + 2
-
- @staticmethod
- def fail(context, value):
- """Raises an exception with the value sent in."""
- raise Exception(value)
diff --git a/nova/tests/test_rpc_carrot.py b/nova/tests/test_rpc_carrot.py
new file mode 100644
index 000000000..57cdebf4f
--- /dev/null
+++ b/nova/tests/test_rpc_carrot.py
@@ -0,0 +1,45 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Unit Tests for remote procedure calls using carrot
+"""
+
+from nova import context
+from nova import log as logging
+from nova.rpc import impl_carrot
+from nova.tests import test_rpc_common
+
+
+LOG = logging.getLogger('nova.tests.rpc')
+
+
+class RpcCarrotTestCase(test_rpc_common._BaseRpcTestCase):
+ def setUp(self):
+ self.rpc = impl_carrot
+ super(RpcCarrotTestCase, self).setUp()
+
+ def tearDown(self):
+ super(RpcCarrotTestCase, self).tearDown()
+
+ def test_connectionpool_single(self):
+ """Test that ConnectionPool recycles a single connection."""
+ conn1 = self.rpc.ConnectionPool.get()
+ self.rpc.ConnectionPool.put(conn1)
+ conn2 = self.rpc.ConnectionPool.get()
+ self.rpc.ConnectionPool.put(conn2)
+ self.assertEqual(conn1, conn2)
diff --git a/nova/tests/test_rpc_common.py b/nova/tests/test_rpc_common.py
new file mode 100644
index 000000000..4ab4e8a0e
--- /dev/null
+++ b/nova/tests/test_rpc_common.py
@@ -0,0 +1,189 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Unit Tests for remote procedure calls shared between all implementations
+"""
+
+from nova import context
+from nova import log as logging
+from nova.rpc.common import RemoteError
+from nova import test
+
+
+LOG = logging.getLogger('nova.tests.rpc')
+
+
+class _BaseRpcTestCase(test.TestCase):
+ def setUp(self):
+ super(_BaseRpcTestCase, self).setUp()
+ self.conn = self.rpc.create_connection(True)
+ self.receiver = TestReceiver()
+ self.conn.create_consumer('test', self.receiver, False)
+ self.conn.consume_in_thread()
+ self.context = context.get_admin_context()
+
+ def tearDown(self):
+ self.conn.close()
+ super(_BaseRpcTestCase, self).tearDown()
+
+ def test_call_succeed(self):
+ value = 42
+ result = self.rpc.call(self.context, 'test', {"method": "echo",
+ "args": {"value": value}})
+ self.assertEqual(value, result)
+
+ def test_call_succeed_despite_multiple_returns(self):
+ value = 42
+ result = self.rpc.call(self.context, 'test',
+ {"method": "echo_three_times",
+ "args": {"value": value}})
+ self.assertEqual(value + 2, result)
+
+ def test_call_succeed_despite_multiple_returns_yield(self):
+ value = 42
+ result = self.rpc.call(self.context, 'test',
+ {"method": "echo_three_times_yield",
+ "args": {"value": value}})
+ self.assertEqual(value + 2, result)
+
+ def test_multicall_succeed_once(self):
+ value = 42
+ result = self.rpc.multicall(self.context,
+ 'test',
+ {"method": "echo",
+ "args": {"value": value}})
+ for i, x in enumerate(result):
+ if i > 0:
+ self.fail('should only receive one response')
+ self.assertEqual(value + i, x)
+
+ def test_multicall_succeed_three_times(self):
+ value = 42
+ result = self.rpc.multicall(self.context,
+ 'test',
+ {"method": "echo_three_times",
+ "args": {"value": value}})
+ for i, x in enumerate(result):
+ self.assertEqual(value + i, x)
+
+ def test_multicall_succeed_three_times_yield(self):
+ value = 42
+ result = self.rpc.multicall(self.context,
+ 'test',
+ {"method": "echo_three_times_yield",
+ "args": {"value": value}})
+ for i, x in enumerate(result):
+ self.assertEqual(value + i, x)
+
+ def test_context_passed(self):
+ """Makes sure a context is passed through rpc call."""
+ value = 42
+ result = self.rpc.call(self.context,
+ 'test', {"method": "context",
+ "args": {"value": value}})
+ self.assertEqual(self.context.to_dict(), result)
+
+ def test_call_exception(self):
+ """Test that exception gets passed back properly.
+
+ rpc.call returns a RemoteError object. The value of the
+ exception is converted to a string, so we convert it back
+ to an int in the test.
+
+ """
+ value = 42
+ self.assertRaises(RemoteError,
+ self.rpc.call,
+ self.context,
+ 'test',
+ {"method": "fail",
+ "args": {"value": value}})
+ try:
+ self.rpc.call(self.context,
+ 'test',
+ {"method": "fail",
+ "args": {"value": value}})
+ self.fail("should have thrown RemoteError")
+ except RemoteError as exc:
+ self.assertEqual(int(exc.value), value)
+
+ def test_nested_calls(self):
+ """Test that we can do an rpc.call inside another call."""
+ class Nested(object):
+ @staticmethod
+ def echo(context, queue, value):
+ """Calls echo in the passed queue"""
+ LOG.debug(_("Nested received %(queue)s, %(value)s")
+ % locals())
+ # TODO: so, it will replay the context and use the same REQID?
+ # that's bizarre.
+ ret = self.rpc.call(context,
+ queue,
+ {"method": "echo",
+ "args": {"value": value}})
+ LOG.debug(_("Nested return %s"), ret)
+ return value
+
+ nested = Nested()
+ conn = self.rpc.create_connection(True)
+ conn.create_consumer('nested', nested, False)
+ conn.consume_in_thread()
+ value = 42
+ result = self.rpc.call(self.context,
+ 'nested', {"method": "echo",
+ "args": {"queue": "test",
+ "value": value}})
+ conn.close()
+ self.assertEqual(value, result)
+
+
+class TestReceiver(object):
+ """Simple Proxy class so the consumer has methods to call.
+
+ Uses static methods because we aren't actually storing any state.
+
+ """
+
+ @staticmethod
+ def echo(context, value):
+ """Simply returns whatever value is sent in."""
+ LOG.debug(_("Received %s"), value)
+ return value
+
+ @staticmethod
+ def context(context, value):
+ """Returns dictionary version of context."""
+ LOG.debug(_("Received %s"), context)
+ return context.to_dict()
+
+ @staticmethod
+ def echo_three_times(context, value):
+ context.reply(value)
+ context.reply(value + 1)
+ context.reply(value + 2)
+
+ @staticmethod
+ def echo_three_times_yield(context, value):
+ yield value
+ yield value + 1
+ yield value + 2
+
+ @staticmethod
+ def fail(context, value):
+ """Raises an exception with the value sent in."""
+ raise Exception(value)
diff --git a/nova/tests/test_rpc_kombu.py b/nova/tests/test_rpc_kombu.py
new file mode 100644
index 000000000..101ed14af
--- /dev/null
+++ b/nova/tests/test_rpc_kombu.py
@@ -0,0 +1,110 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Unit Tests for remote procedure calls using kombu
+"""
+
+from nova import context
+from nova import log as logging
+from nova import test
+from nova.rpc import impl_kombu
+from nova.tests import test_rpc_common
+
+
+LOG = logging.getLogger('nova.tests.rpc')
+
+
+class RpcKombuTestCase(test_rpc_common._BaseRpcTestCase):
+ def setUp(self):
+ self.rpc = impl_kombu
+ super(RpcKombuTestCase, self).setUp()
+
+ def tearDown(self):
+ super(RpcKombuTestCase, self).tearDown()
+
+ def test_reusing_connection(self):
+ """Test that reusing a connection returns same one."""
+ conn_context = self.rpc.create_connection(new=False)
+ conn1 = conn_context.connection
+ conn_context.close()
+ conn_context = self.rpc.create_connection(new=False)
+ conn2 = conn_context.connection
+ conn_context.close()
+ self.assertEqual(conn1, conn2)
+
+ def test_topic_send_receive(self):
+ """Test sending to a topic exchange/queue"""
+
+ conn = self.rpc.create_connection()
+ message = 'topic test message'
+
+ self.received_message = None
+
+ def _callback(message):
+ self.received_message = message
+
+ conn.declare_topic_consumer('a_topic', _callback)
+ conn.topic_send('a_topic', message)
+ conn.consume(limit=1)
+ conn.close()
+
+ self.assertEqual(self.received_message, message)
+
+ def test_direct_send_receive(self):
+ """Test sending to a direct exchange/queue"""
+ conn = self.rpc.create_connection()
+ message = 'direct test message'
+
+ self.received_message = None
+
+ def _callback(message):
+ self.received_message = message
+
+ conn.declare_direct_consumer('a_direct', _callback)
+ conn.direct_send('a_direct', message)
+ conn.consume(limit=1)
+ conn.close()
+
+ self.assertEqual(self.received_message, message)
+
+ @test.skip_test("kombu memory transport seems buggy with fanout queues "
+ "as this test passes when you use rabbit (fake_rabbit=False)")
+ def test_fanout_send_receive(self):
+ """Test sending to a fanout exchange and consuming from 2 queues"""
+
+ conn = self.rpc.create_connection()
+ conn2 = self.rpc.create_connection()
+ message = 'fanout test message'
+
+ self.received_message = None
+
+ def _callback(message):
+ self.received_message = message
+
+ conn.declare_fanout_consumer('a_fanout', _callback)
+ conn2.declare_fanout_consumer('a_fanout', _callback)
+ conn.fanout_send('a_fanout', message)
+
+ conn.consume(limit=1)
+ conn.close()
+ self.assertEqual(self.received_message, message)
+
+ self.received_message = None
+ conn2.consume(limit=1)
+ conn2.close()
+ self.assertEqual(self.received_message, message)
diff --git a/nova/tests/test_test.py b/nova/tests/test_test.py
index 64f11fa45..3482ff6a0 100644
--- a/nova/tests/test_test.py
+++ b/nova/tests/test_test.py
@@ -40,6 +40,5 @@ class IsolationTestCase(test.TestCase):
connection = rpc.create_connection(new=True)
proxy = NeverCalled()
- consumer = rpc.create_consumer(connection, 'compute',
- proxy, fanout=False)
- consumer.attach_to_eventlet()
+ connection.create_consumer('compute', proxy, fanout=False)
+ connection.consume_in_thread()
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 2f0559366..45dad3516 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -16,7 +16,6 @@
"""Test suite for XenAPI."""
-import eventlet
import functools
import json
import os
@@ -203,42 +202,6 @@ class XenAPIVMTestCase(test.TestCase):
self.context = context.RequestContext(self.user_id, self.project_id)
self.conn = xenapi_conn.get_connection(False)
- def test_parallel_builds(self):
- stubs.stubout_loopingcall_delay(self.stubs)
-
- def _do_build(id, proj, user, *args):
- values = {
- 'id': id,
- 'project_id': proj,
- 'user_id': user,
- 'image_ref': 1,
- 'kernel_id': 2,
- 'ramdisk_id': 3,
- 'instance_type_id': '3', # m1.large
- 'os_type': 'linux',
- 'architecture': 'x86-64'}
- network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
- {'broadcast': '192.168.0.255',
- 'dns': ['192.168.0.1'],
- 'gateway': '192.168.0.1',
- 'gateway6': 'dead:beef::1',
- 'ip6s': [{'enabled': '1',
- 'ip': 'dead:beef::dcad:beff:feef:0',
- 'netmask': '64'}],
- 'ips': [{'enabled': '1',
- 'ip': '192.168.0.100',
- 'netmask': '255.255.255.0'}],
- 'label': 'fake',
- 'mac': 'DE:AD:BE:EF:00:00',
- 'rxtx_cap': 3})]
- instance = db.instance_create(self.context, values)
- self.conn.spawn(self.context, instance, network_info)
-
- gt1 = eventlet.spawn(_do_build, 1, self.project_id, self.user_id)
- gt2 = eventlet.spawn(_do_build, 2, self.project_id, self.user_id)
- gt1.wait()
- gt2.wait()
-
def test_list_instances_0(self):
instances = self.conn.list_instances()
self.assertEquals(instances, [])
diff --git a/nova/tests/vmwareapi/db_fakes.py b/nova/tests/vmwareapi/db_fakes.py
index afd672c7a..0d896239a 100644
--- a/nova/tests/vmwareapi/db_fakes.py
+++ b/nova/tests/vmwareapi/db_fakes.py
@@ -23,6 +23,8 @@ import time
from nova import db
from nova import utils
+from nova.compute import task_states
+from nova.compute import vm_states
def stub_out_db_instance_api(stubs):
@@ -64,7 +66,8 @@ def stub_out_db_instance_api(stubs):
'image_ref': values['image_ref'],
'kernel_id': values['kernel_id'],
'ramdisk_id': values['ramdisk_id'],
- 'state_description': 'scheduling',
+ 'vm_state': vm_states.BUILDING,
+ 'task_state': task_states.SCHEDULING,
'user_id': values['user_id'],
'project_id': values['project_id'],
'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
diff --git a/tools/pip-requires b/tools/pip-requires
index 60b502ffd..66d6a48d9 100644
--- a/tools/pip-requires
+++ b/tools/pip-requires
@@ -8,6 +8,7 @@ anyjson==0.2.4
boto==1.9b
carrot==0.10.5
eventlet
+kombu
lockfile==0.8
lxml==2.3
python-novaclient==2.6.0