diff options
-rw-r--r-- | nova/compute/api.py | 23 | ||||
-rw-r--r-- | nova/compute/manager.py | 8 | ||||
-rw-r--r-- | nova/compute/utils.py | 94 | ||||
-rw-r--r-- | nova/db/api.py | 16 | ||||
-rw-r--r-- | nova/db/sqlalchemy/api.py | 32 | ||||
-rw-r--r-- | nova/notifications.py | 242 | ||||
-rw-r--r-- | nova/scheduler/driver.py | 7 | ||||
-rw-r--r-- | nova/scheduler/manager.py | 7 | ||||
-rw-r--r-- | nova/tests/api/openstack/compute/contrib/test_disk_config.py | 8 | ||||
-rw-r--r-- | nova/tests/api/openstack/compute/test_server_actions.py | 6 | ||||
-rw-r--r-- | nova/tests/api/openstack/compute/test_servers.py | 15 | ||||
-rw-r--r-- | nova/tests/scheduler/test_scheduler.py | 46 | ||||
-rw-r--r-- | nova/tests/test_db_api.py | 12 | ||||
-rw-r--r-- | nova/tests/test_notifications.py | 169 | ||||
-rw-r--r-- | nova/virt/baremetal/proxy.py | 22 | ||||
-rw-r--r-- | nova/virt/xenapi/host.py | 30 |
16 files changed, 602 insertions, 135 deletions
diff --git a/nova/compute/api.py b/nova/compute/api.py index e83dfb3a6..3fd358a34 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -41,6 +41,7 @@ from nova import flags import nova.image from nova import log as logging from nova import network +from nova import notifications from nova.openstack.common import cfg from nova.openstack.common import excutils from nova.openstack.common import jsonutils @@ -599,6 +600,11 @@ class API(base.Base): instance_id = instance['id'] instance_uuid = instance['uuid'] + # send a state update notification for the initial create to + # show it going from non-existent to BUILDING + notifications.send_update_with_states(context, instance, None, + vm_states.BUILDING, None, None) + for security_group_id in security_groups: self.db.instance_add_security_group(elevated, instance_uuid, @@ -919,8 +925,14 @@ class API(base.Base): :returns: None """ - rv = self.db.instance_update(context, instance["id"], kwargs) - return dict(rv.iteritems()) + + # Update the instance record and send a state update notification + # if task or vm state changed + old_ref, instance_ref = self.db.instance_update_and_get_original( + context, instance["id"], kwargs) + notifications.send_update(context, old_ref, instance_ref) + + return dict(instance_ref.iteritems()) @wrap_check_policy @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF, @@ -1260,9 +1272,16 @@ class API(base.Base): else: raise Exception(_('Image type not recognized %s') % image_type) + # change instance state and notify + old_vm_state = instance["vm_state"] + old_task_state = instance["task_state"] + self.db.instance_test_and_set( context, instance_uuid, 'task_state', [None], task_state) + notifications.send_update_with_states(context, instance, old_vm_state, + instance["vm_state"], old_task_state, instance["task_state"]) + properties = { 'instance_uuid': instance_uuid, 'user_id': str(context.user_id), diff --git a/nova/compute/manager.py b/nova/compute/manager.py index bd3cd02cc..6a0251a15 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -61,6 +61,7 @@ from nova import log as logging from nova import manager from nova import network from nova.network import model as network_model +from nova import notifications from nova.notifier import api as notifier from nova.openstack.common import cfg from nova.openstack.common import excutils @@ -253,7 +254,12 @@ class ComputeManager(manager.SchedulerDependentManager): def _instance_update(self, context, instance_id, **kwargs): """Update an instance in the database using kwargs as value.""" - return self.db.instance_update(context, instance_id, kwargs) + + (old_ref, instance_ref) = self.db.instance_update_and_get_original( + context, instance_id, kwargs) + notifications.send_update(context, old_ref, instance_ref) + + return instance_ref def _set_instance_error_state(self, context, instance_uuid): try: diff --git a/nova/compute/utils.py b/nova/compute/utils.py index 7a6ac8671..ab5ccfbf0 100644 --- a/nova/compute/utils.py +++ b/nova/compute/utils.py @@ -25,6 +25,7 @@ from nova import flags from nova import log from nova import network from nova.network import model as network_model +from nova import notifications from nova.notifier import api as notifier_api from nova import utils @@ -52,45 +53,10 @@ def notify_usage_exists(context, instance_ref, current_period=False, override in the notification if not None. """ - admin_context = nova.context.get_admin_context(read_deleted='yes') - begin, end = utils.last_completed_audit_period() - bw = {} - if current_period: - audit_start = end - audit_end = utils.utcnow() - else: - audit_start = begin - audit_end = end - - if (instance_ref.get('info_cache') and - instance_ref['info_cache'].get('network_info')): - - cached_info = instance_ref['info_cache']['network_info'] - nw_info = network_model.NetworkInfo.hydrate(cached_info) - else: - try: - nw_info = network.API().get_instance_nw_info(admin_context, - instance_ref) - except Exception: - LOG.exception('Failed to get nw_info', instance=instance_ref) - if ignore_missing_network_data: - return - raise - - macs = [vif['address'] for vif in nw_info] - uuids = [instance_ref.uuid] - - bw_usages = db.bw_usage_get_by_uuids(admin_context, uuids, audit_start) - bw_usages = [b for b in bw_usages if b.mac in macs] - - for b in bw_usages: - label = 'net-name-not-found-%s' % b['mac'] - for vif in nw_info: - if vif['address'] == b['mac']: - label = vif['network']['label'] - break + audit_start, audit_end = notifications.audit_period_bounds(current_period) - bw[label] = dict(bw_in=b.bw_in, bw_out=b.bw_out) + bw = notifications.bandwidth_usage(instance_ref, audit_start, + ignore_missing_network_data) if system_metadata is None: try: @@ -100,10 +66,7 @@ def notify_usage_exists(context, instance_ref, current_period=False, system_metadata = {} # add image metadata to the notification: - image_meta = {} - for md_key, md_value in system_metadata.iteritems(): - if md_key.startswith('image_'): - image_meta[md_key[6:]] = md_value + image_meta = notifications.image_meta(system_metadata) extra_info = dict(audit_period_beginning=str(audit_start), audit_period_ending=str(audit_end), @@ -231,49 +194,6 @@ def legacy_network_info(network_model): return network_info -def _usage_from_instance(context, instance_ref, network_info, - system_metadata, **kw): - """ - Get usage information for an instance which is common to all - notifications. - - :param network_info: network_info provided if not None - :param system_metadata: system_metadata DB entries for the instance, - if not None. *NOTE*: Currently unused here in trunk, but needed for - potential custom modifications. - """ - - def null_safe_str(s): - return str(s) if s else '' - - image_ref_url = utils.generate_image_url(instance_ref['image_ref']) - - usage_info = dict( - tenant_id=instance_ref['project_id'], - user_id=instance_ref['user_id'], - instance_id=instance_ref['uuid'], - instance_type=instance_ref['instance_type']['name'], - instance_type_id=instance_ref['instance_type_id'], - memory_mb=instance_ref['memory_mb'], - disk_gb=instance_ref['root_gb'] + instance_ref['ephemeral_gb'], - display_name=instance_ref['display_name'], - created_at=str(instance_ref['created_at']), - # Nova's deleted vs terminated instance terminology is confusing, - # this should be when the instance was deleted (i.e. terminated_at), - # not when the db record was deleted. (mdragon) - deleted_at=null_safe_str(instance_ref['terminated_at']), - launched_at=null_safe_str(instance_ref['launched_at']), - image_ref_url=image_ref_url, - state=instance_ref['vm_state'], - state_description=null_safe_str(instance_ref['task_state'])) - - if network_info is not None: - usage_info['fixed_ips'] = network_info.fixed_ips() - - usage_info.update(kw) - return usage_info - - def notify_about_instance_usage(context, instance, event_suffix, network_info=None, system_metadata=None, extra_usage_info=None, host=None): @@ -296,8 +216,8 @@ def notify_about_instance_usage(context, instance, event_suffix, if not extra_usage_info: extra_usage_info = {} - usage_info = _usage_from_instance(context, instance, network_info, - system_metadata, **extra_usage_info) + usage_info = notifications.usage_from_instance(context, instance, + network_info, system_metadata, **extra_usage_info) notifier_api.notify(context, 'compute.%s' % host, 'compute.instance.%s' % event_suffix, diff --git a/nova/db/api.py b/nova/db/api.py index ff72aff37..c43a48c19 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -618,6 +618,22 @@ def instance_update(context, instance_id, values): return IMPL.instance_update(context, instance_id, values) +def instance_update_and_get_original(context, instance_id, values): + """Set the given properties on an instance and update it. Return + a shallow copy of the original instance reference, as well as the + updated one. + + :param context: = request context object + :param instance_id: = instance id or uuid + :param values: = dict containing column values + + :returns: a tuple of the form (old_instance_ref, new_instance_ref) + + Raises NotFound if instance does not exist. + """ + return IMPL.instance_update_and_get_original(context, instance_id, values) + + def instance_add_security_group(context, instance_id, security_group_id): """Associate the given security group with the given instance.""" return IMPL.instance_add_security_group(context, instance_id, diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 88ddeac34..5386b0f41 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -19,6 +19,7 @@ """Implementation of SQLAlchemy backend.""" +import copy import datetime import functools import re @@ -1640,6 +1641,30 @@ def instance_test_and_set(context, instance_id, attr, ok_states, @require_context def instance_update(context, instance_id, values): + + instance_ref = _instance_update(context, instance_id, values)[1] + return instance_ref + + +@require_context +def instance_update_and_get_original(context, instance_id, values): + """Set the given properties on an instance and update it. Return + a shallow copy of the original instance reference, as well as the + updated one. + + :param context: = request context object + :param instance_id: = instance id or uuid + :param values: = dict containing column values + + :returns: a tuple of the form (old_instance_ref, new_instance_ref) + + Raises NotFound if instance does not exist. + """ + return _instance_update(context, instance_id, values, + copy_old_instance=True) + + +def _instance_update(context, instance_id, values, copy_old_instance=False): session = get_session() if utils.is_uuid_like(instance_id): @@ -1648,6 +1673,11 @@ def instance_update(context, instance_id, values): else: instance_ref = instance_get(context, instance_id, session=session) + if copy_old_instance: + old_instance_ref = copy.copy(instance_ref) + else: + old_instance_ref = None + metadata = values.get('metadata') if metadata is not None: instance_metadata_update( @@ -1663,7 +1693,7 @@ def instance_update(context, instance_id, values): instance_ref.update(values) instance_ref.save(session=session) - return instance_ref + return (old_instance_ref, instance_ref) def instance_add_security_group(context, instance_uuid, security_group_id): diff --git a/nova/notifications.py b/nova/notifications.py new file mode 100644 index 000000000..4e3d1362d --- /dev/null +++ b/nova/notifications.py @@ -0,0 +1,242 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Functionality related to notifications common to multiple layers of +the system. +""" + +import nova.context +from nova import db +from nova import exception +from nova import flags +from nova import log +from nova import network +from nova.network import model as network_model +from nova.notifier import api as notifier_api +from nova.openstack.common import cfg +from nova import utils + +LOG = log.getLogger(__name__) + +notify_state_opt = cfg.StrOpt('notify_on_state_change', default=None, + help='If set, send compute.instance.update notifications on instance ' + 'state changes. Valid values are None for no notifications, ' + '"vm_state" for notifications on VM state changes, or ' + '"vm_and_task_state" for notifications on VM and task state ' + 'changes.') + +FLAGS = flags.FLAGS +FLAGS.register_opt(notify_state_opt) + + +def send_update(context, old_instance, new_instance, host=None): + """Send compute.instance.update notification to report changes + in vm state and (optionally) task state + """ + + send_update_with_states(context, new_instance, old_instance["vm_state"], + new_instance["vm_state"], old_instance["task_state"], + new_instance["task_state"], host) + + +def send_update_with_states(context, instance, old_vm_state, new_vm_state, + old_task_state, new_task_state, host=None): + """Send compute.instance.update notification to report changes + in vm state and (optionally) task state + """ + + if not FLAGS.notify_on_state_change: + # skip all this if state updates are disabled + return + + fire_update = False + + if old_vm_state != new_vm_state: + # yes, the vm state is changing: + fire_update = True + elif FLAGS.notify_on_state_change.lower() == "vm_and_task_state" and \ + old_task_state != new_task_state: + # yes, the task state is changing: + fire_update = True + + if fire_update: + try: + _send_instance_update_notification(context, instance, old_vm_state, + old_task_state, new_vm_state, new_task_state, host) + except Exception: + LOG.exception(_("Failed to send state update notification"), + instance=instance) + + +def _send_instance_update_notification(context, instance, old_vm_state, + old_task_state, new_vm_state, new_task_state, host=None): + """Send 'compute.instance.exists' notification to inform observers + about instance state changes""" + + payload = usage_from_instance(context, instance, None, None) + + states_payload = { + "old_state": old_vm_state, + "state": new_vm_state, + "old_task_state": old_task_state, + "new_task_state": new_task_state, + } + + payload.update(states_payload) + + # add audit fields: + (audit_start, audit_end) = audit_period_bounds(current_period=True) + payload["audit_period_beginning"] = audit_start + payload["audit_period_ending"] = audit_end + + # add bw usage info: + bw = bandwidth_usage(instance, audit_start) + payload["bandwidth"] = bw + + try: + system_metadata = db.instance_system_metadata_get( + context, instance.uuid) + except exception.NotFound: + system_metadata = {} + + # add image metadata + image_meta_props = image_meta(system_metadata) + payload["image_meta"] = image_meta_props + + if not host: + host = FLAGS.host + + notifier_api.notify(context, host, 'compute.instance.update', + notifier_api.INFO, payload) + + +def audit_period_bounds(current_period=False): + """Get the start and end of the relevant audit usage period + + :param current_period: if True, this will generate a usage for the + current usage period; if False, this will generate a usage for the + previous audit period. + """ + + begin, end = utils.last_completed_audit_period() + if current_period: + audit_start = end + audit_end = utils.utcnow() + else: + audit_start = begin + audit_end = end + + return (audit_start, audit_end) + + +def bandwidth_usage(instance_ref, audit_start, + ignore_missing_network_data=True): + """Get bandwidth usage information for the instance for the + specified audit period. + """ + + admin_context = nova.context.get_admin_context(read_deleted='yes') + + if (instance_ref.get('info_cache') and + instance_ref['info_cache'].get('network_info')): + + cached_info = instance_ref['info_cache']['network_info'] + nw_info = network_model.NetworkInfo.hydrate(cached_info) + else: + try: + nw_info = network.API().get_instance_nw_info(admin_context, + instance_ref) + except Exception: + LOG.exception('Failed to get nw_info', instance=instance_ref) + if ignore_missing_network_data: + return + raise + + macs = [vif['address'] for vif in nw_info] + uuids = [instance_ref["uuid"]] + + bw_usages = db.bw_usage_get_by_uuids(admin_context, uuids, audit_start) + bw_usages = [b for b in bw_usages if b.mac in macs] + + bw = {} + + for b in bw_usages: + label = 'net-name-not-found-%s' % b['mac'] + for vif in nw_info: + if vif['address'] == b['mac']: + label = vif['network']['label'] + break + + bw[label] = dict(bw_in=b.bw_in, bw_out=b.bw_out) + + return bw + + +def image_meta(system_metadata): + """Format image metadata for use in notifications from the instance + system metadata. + """ + image_meta = {} + for md_key, md_value in system_metadata.iteritems(): + if md_key.startswith('image_'): + image_meta[md_key[6:]] = md_value + + return image_meta + + +def usage_from_instance(context, instance_ref, network_info, + system_metadata, **kw): + """Get usage information for an instance which is common to all + notifications. + + :param network_info: network_info provided if not None + :param system_metadata: system_metadata DB entries for the instance, + if not None. *NOTE*: Currently unused here in trunk, but needed for + potential custom modifications. + """ + + def null_safe_str(s): + return str(s) if s else '' + + image_ref_url = utils.generate_image_url(instance_ref['image_ref']) + + instance_type_name = instance_ref.get('instance_type', {}).get('name', '') + + usage_info = dict( + tenant_id=instance_ref['project_id'], + user_id=instance_ref['user_id'], + instance_id=instance_ref['uuid'], + instance_type=instance_type_name, + instance_type_id=instance_ref['instance_type_id'], + memory_mb=instance_ref['memory_mb'], + disk_gb=instance_ref['root_gb'] + instance_ref['ephemeral_gb'], + display_name=instance_ref['display_name'], + created_at=str(instance_ref['created_at']), + # Nova's deleted vs terminated instance terminology is confusing, + # this should be when the instance was deleted (i.e. terminated_at), + # not when the db record was deleted. (mdragon) + deleted_at=null_safe_str(instance_ref.get('terminated_at')), + launched_at=null_safe_str(instance_ref.get('launched_at')), + image_ref_url=image_ref_url, + state=instance_ref['vm_state'], + state_description=null_safe_str(instance_ref.get('task_state'))) + + if network_info is not None: + usage_info['fixed_ips'] = network_info.fixed_ips() + + usage_info.update(kw) + return usage_info diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index 82bba0ce4..8e49e5aa4 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -28,6 +28,7 @@ from nova import db from nova import exception from nova import flags from nova import log as logging +from nova import notifications from nova.openstack.common import cfg from nova.openstack.common import importutils from nova.openstack.common import jsonutils @@ -226,7 +227,11 @@ class Scheduler(object): # Changing instance_state. values = {"vm_state": vm_states.MIGRATING} - db.instance_update(context, instance_id, values) + + # update instance state and notify + (old_ref, new_instance_ref) = db.instance_update_and_get_original( + context, instance_id, values) + notifications.send_update(context, old_ref, new_instance_ref) src = instance_ref['host'] cast_to_compute_host(context, src, 'live_migration', diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index a2cef11a8..470368b03 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -29,6 +29,7 @@ from nova import exception from nova import flags from nova import log as logging from nova import manager +from nova import notifications from nova.notifier import api as notifier from nova.openstack.common import cfg from nova.openstack.common import excutils @@ -173,7 +174,11 @@ class SchedulerManager(manager.Manager): state = vm_state.upper() LOG.warning(_('Setting instance to %(state)s state.'), locals(), instance_uuid=instance_uuid) - db.instance_update(context, instance_uuid, updates) + + # update instance state and notify on the transition + (old_ref, new_ref) = db.instance_update_and_get_original(context, + instance_uuid, updates) + notifications.send_update(context, old_ref, new_ref) payload = dict(request_spec=request_spec, instance_properties=properties, diff --git a/nova/tests/api/openstack/compute/contrib/test_disk_config.py b/nova/tests/api/openstack/compute/contrib/test_disk_config.py index 59ae14ac8..41a1cd607 100644 --- a/nova/tests/api/openstack/compute/contrib/test_disk_config.py +++ b/nova/tests/api/openstack/compute/contrib/test_disk_config.py @@ -92,13 +92,13 @@ class DiskConfigTestCase(test.TestCase): inst['updated_at'] = datetime.datetime(2010, 10, 10, 12, 0, 0) inst['progress'] = 0 inst['name'] = 'instance-1' # this is a property + inst['task_state'] = '' + inst['vm_state'] = '' def fake_instance_get_for_create(context, id_, *args, **kwargs): - return inst + return (inst, inst) - self.stubs.Set(nova.db, 'instance_get', - fake_instance_get_for_create) - self.stubs.Set(nova.db, 'instance_update', + self.stubs.Set(nova.db, 'instance_update_and_get_original', fake_instance_get_for_create) def fake_instance_get_all_for_create(context, *args, **kwargs): diff --git a/nova/tests/api/openstack/compute/test_server_actions.py b/nova/tests/api/openstack/compute/test_server_actions.py index 774700361..04aef91f9 100644 --- a/nova/tests/api/openstack/compute/test_server_actions.py +++ b/nova/tests/api/openstack/compute/test_server_actions.py @@ -38,7 +38,8 @@ def return_server_not_found(context, uuid): def instance_update(context, instance_id, kwargs): - return fakes.stub_instance(instance_id) + inst = fakes.stub_instance(instance_id) + return (inst, inst) class MockSetAdminPassword(object): @@ -59,7 +60,8 @@ class ServerActionsControllerTest(test.TestCase): self.stubs.Set(nova.db, 'instance_get_by_uuid', fakes.fake_instance_get(vm_state=vm_states.ACTIVE, host='fake_host')) - self.stubs.Set(nova.db, 'instance_update', instance_update) + self.stubs.Set(nova.db, 'instance_update_and_get_original', + instance_update) fakes.stub_out_glance(self.stubs) fakes.stub_out_nw_api(self.stubs) diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py index 14d1dff75..aefe19581 100644 --- a/nova/tests/api/openstack/compute/test_servers.py +++ b/nova/tests/api/openstack/compute/test_servers.py @@ -72,7 +72,8 @@ def return_security_group(context, instance_id, security_group_id): def instance_update(context, instance_id, values): - return fakes.stub_instance(instance_id, name=values.get('display_name')) + inst = fakes.stub_instance(instance_id, name=values.get('display_name')) + return (inst, inst) def fake_compute_api(cls, req, id): @@ -106,7 +107,8 @@ class ServersControllerTest(test.TestCase): return_servers) self.stubs.Set(nova.db, 'instance_add_security_group', return_security_group) - self.stubs.Set(nova.db, 'instance_update', instance_update) + self.stubs.Set(nova.db, 'instance_update_and_get_original', + instance_update) self.controller = servers.Controller() self.ips_controller = ips.Controller() @@ -1433,7 +1435,9 @@ class ServersControllerCreateTest(test.TestCase): "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0), "config_drive": None, "progress": 0, - "fixed_ips": [] + "fixed_ips": [], + "task_state": "", + "vm_state": "", } self.instance_cache[instance['id']] = instance return instance @@ -1459,7 +1463,7 @@ class ServersControllerCreateTest(test.TestCase): def server_update(context, instance_id, params): inst = self.instance_cache[instance_id] inst.update(params) - return inst + return (inst, inst) def fake_method(*args, **kwargs): pass @@ -1485,7 +1489,8 @@ class ServersControllerCreateTest(test.TestCase): self.stubs.Set(nova.db, 'instance_get', instance_get) self.stubs.Set(nova.rpc, 'cast', fake_method) self.stubs.Set(nova.rpc, 'call', rpc_call_wrapper) - self.stubs.Set(nova.db, 'instance_update', server_update) + self.stubs.Set(nova.db, 'instance_update_and_get_original', + server_update) self.stubs.Set(nova.db, 'queue_get_for', queue_get_for) self.stubs.Set(nova.network.manager.VlanManager, 'allocate_fixed_ip', fake_method) diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index 23b553d64..b7915ed8e 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -28,6 +28,7 @@ from nova import context from nova import db from nova import exception from nova import flags +from nova import notifications from nova import rpc from nova.rpc import common as rpc_common from nova.scheduler import driver @@ -232,9 +233,10 @@ class SchedulerManagerTestCase(test.TestCase): """ fake_instance_uuid = 'fake-instance-id' + inst = {"vm_state": "", "task_state": ""} self._mox_schedule_method_helper('schedule_run_instance') - self.mox.StubOutWithMock(db, 'instance_update') + self.mox.StubOutWithMock(db, 'instance_update_and_get_original') request_spec = {'instance_properties': {'uuid': fake_instance_uuid}} @@ -243,8 +245,8 @@ class SchedulerManagerTestCase(test.TestCase): self.manager.driver.schedule_run_instance(self.context, *self.fake_args, **self.fake_kwargs).AndRaise( exception.NoValidHost(reason="")) - db.instance_update(self.context, fake_instance_uuid, - {'vm_state': vm_states.ERROR}) + db.instance_update_and_get_original(self.context, fake_instance_uuid, + {"vm_state": vm_states.ERROR}).AndReturn((inst, inst)) self.mox.ReplayAll() self.manager.run_instance(self.context, self.topic, @@ -255,10 +257,11 @@ class SchedulerManagerTestCase(test.TestCase): the instance in ACTIVE state """ fake_instance_uuid = 'fake-instance-id' + inst = {"vm_state": "", "task_state": ""} self._mox_schedule_method_helper('schedule_prep_resize') - self.mox.StubOutWithMock(db, 'instance_update') + self.mox.StubOutWithMock(db, 'instance_update_and_get_original') request_spec = {'instance_properties': {'uuid': fake_instance_uuid}} @@ -267,9 +270,9 @@ class SchedulerManagerTestCase(test.TestCase): self.manager.driver.schedule_prep_resize(self.context, *self.fake_args, **self.fake_kwargs).AndRaise( exception.NoValidHost(reason="")) - db.instance_update(self.context, fake_instance_uuid, - {'vm_state': vm_states.ACTIVE, - 'task_state': None}) + db.instance_update_and_get_original(self.context, fake_instance_uuid, + {"vm_state": vm_states.ACTIVE, "task_state": None}).AndReturn( + (inst, inst)) self.mox.ReplayAll() self.manager.prep_resize(self.context, self.topic, @@ -283,7 +286,7 @@ class SchedulerManagerTestCase(test.TestCase): self._mox_schedule_method_helper('schedule_prep_resize') - self.mox.StubOutWithMock(db, 'instance_update') + self.mox.StubOutWithMock(db, 'instance_update_and_get_original') request_spec = {'instance_properties': {'uuid': fake_instance_uuid}} @@ -292,8 +295,13 @@ class SchedulerManagerTestCase(test.TestCase): self.manager.driver.schedule_prep_resize(self.context, *self.fake_args, **self.fake_kwargs).AndRaise( self.AnException('something happened')) - db.instance_update(self.context, fake_instance_uuid, - {'vm_state': vm_states.ERROR}) + + inst = { + "vm_state": "", + "task_state": "", + } + db.instance_update_and_get_original(self.context, fake_instance_uuid, + {"vm_state": vm_states.ERROR}).AndReturn((inst, inst)) self.mox.ReplayAll() @@ -421,7 +429,9 @@ class SchedulerTestCase(test.TestCase): 'power_state': power_state.RUNNING, 'memory_mb': 1024, 'root_gb': 1024, - 'ephemeral_gb': 0} + 'ephemeral_gb': 0, + 'vm_state': '', + 'task_state': ''} def test_live_migration_basic(self): """Test basic schedule_live_migration functionality""" @@ -429,7 +439,7 @@ class SchedulerTestCase(test.TestCase): self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check') self.mox.StubOutWithMock(self.driver, '_live_migration_common_check') - self.mox.StubOutWithMock(db, 'instance_update') + self.mox.StubOutWithMock(db, 'instance_update_and_get_original') self.mox.StubOutWithMock(driver, 'cast_to_compute_host') dest = 'fake_host2' @@ -443,8 +453,9 @@ class SchedulerTestCase(test.TestCase): dest, block_migration, disk_over_commit) self.driver._live_migration_common_check(self.context, instance, dest, block_migration, disk_over_commit) - db.instance_update(self.context, instance['id'], - {'vm_state': vm_states.MIGRATING}) + db.instance_update_and_get_original(self.context, instance['id'], + {"vm_state": vm_states.MIGRATING}).AndReturn( + (instance, instance)) driver.cast_to_compute_host(self.context, instance['host'], 'live_migration', update_db=False, @@ -468,7 +479,7 @@ class SchedulerTestCase(test.TestCase): self.mox.StubOutWithMock(db, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'call') self.mox.StubOutWithMock(rpc, 'cast') - self.mox.StubOutWithMock(db, 'instance_update') + self.mox.StubOutWithMock(db, 'instance_update_and_get_original') self.mox.StubOutWithMock(driver, 'cast_to_compute_host') dest = 'fake_host2' @@ -530,8 +541,9 @@ class SchedulerTestCase(test.TestCase): {'method': 'compare_cpu', 'args': {'cpu_info': 'fake_cpu_info'}}).AndReturn(True) - db.instance_update(self.context, instance['id'], - {'vm_state': vm_states.MIGRATING}) + db.instance_update_and_get_original(self.context, instance['id'], + {"vm_state": vm_states.MIGRATING}).AndReturn( + (instance, instance)) driver.cast_to_compute_host(self.context, instance['host'], 'live_migration', update_db=False, diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py index 33bef98c2..b16ffd335 100644 --- a/nova/tests/test_db_api.py +++ b/nova/tests/test_db_api.py @@ -211,6 +211,18 @@ class DbApiTestCase(test.TestCase): system_meta = db.instance_system_metadata_get(ctxt, instance.uuid) self.assertEqual('baz', system_meta['original_image_ref']) + def test_instance_update_with_and_get_original(self): + ctxt = context.get_admin_context() + + # Create an instance with some metadata + values = {'vm_state': 'building'} + instance = db.instance_create(ctxt, values) + + (old_ref, new_ref) = db.instance_update_and_get_original(ctxt, + instance['id'], {'vm_state': 'needscoffee'}) + self.assertEquals("building", old_ref["vm_state"]) + self.assertEquals("needscoffee", new_ref["vm_state"]) + def test_instance_fault_create(self): """Ensure we can create an instance fault""" ctxt = context.get_admin_context() diff --git a/nova/tests/test_notifications.py b/nova/tests/test_notifications.py new file mode 100644 index 000000000..a9d4f3f3b --- /dev/null +++ b/nova/tests/test_notifications.py @@ -0,0 +1,169 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Tests for common notifcations.""" + +import copy + +from nova.compute import instance_types +from nova.compute import task_states +from nova.compute import vm_states +from nova import context +from nova import db +from nova import flags +import nova.image.fake +from nova import log as logging +from nova import notifications +from nova.notifier import test_notifier +from nova import test +from nova.tests import fake_network + +LOG = logging.getLogger(__name__) +FLAGS = flags.FLAGS +flags.DECLARE('stub_network', 'nova.compute.manager') + + +class NotificationsTestCase(test.TestCase): + + def setUp(self): + + def fake_get_nw_info(cls, ctxt, instance): + self.assertTrue(ctxt.is_admin) + return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1, + spectacular=True) + + super(NotificationsTestCase, self).setUp() + self.stubs.Set(nova.network.API, 'get_instance_nw_info', + fake_get_nw_info) + + self.flags(connection_type='fake', + stub_network=True, + notification_driver='nova.notifier.test_notifier', + network_manager='nova.network.manager.FlatManager', + notify_on_state_change="vm_and_task_state") + + self.user_id = 'fake' + self.project_id = 'fake' + self.context = context.RequestContext(self.user_id, self.project_id) + test_notifier.NOTIFICATIONS = [] + + self.instance = self._wrapped_create() + + def _wrapped_create(self, params=None): + inst = {} + inst['image_ref'] = 1 + inst['user_id'] = self.user_id + inst['project_id'] = self.project_id + type_id = instance_types.get_instance_type_by_name('m1.tiny')['id'] + inst['instance_type_id'] = type_id + inst['root_gb'] = 0 + inst['ephemeral_gb'] = 0 + if params: + inst.update(params) + return db.instance_create(self.context, inst) + + def test_notif_disabled(self): + + # test config disable of the notifcations + self.flags(notify_on_state_change=None) + + old = copy.copy(self.instance) + self.instance["vm_state"] = vm_states.ACTIVE + + notifications.send_update(self.context, old, self.instance) + self.assertEquals(0, len(test_notifier.NOTIFICATIONS)) + + def test_task_notif(self): + + # test config disable of just the task state notifications + self.flags(notify_on_state_change="vm_state") + + # we should not get a notification on task stgate chagne now + old = copy.copy(self.instance) + self.instance["task_state"] = task_states.SPAWNING + + notifications.send_update(self.context, old, self.instance) + self.assertEquals(0, len(test_notifier.NOTIFICATIONS)) + + # ok now enable task state notifcations and re-try + self.flags(notify_on_state_change="vm_and_task_state") + + notifications.send_update(self.context, old, self.instance) + self.assertEquals(1, len(test_notifier.NOTIFICATIONS)) + + def test_send_no_notif(self): + + # test notification on send no initial vm state: + notifications.send_update(self.context, self.instance, self.instance) + self.assertEquals(0, len(test_notifier.NOTIFICATIONS)) + + def test_send_on_vm_change(self): + + # pretend we just transitioned to ACTIVE: + params = {"vm_state": vm_states.ACTIVE} + (old_ref, new_ref) = db.instance_update_and_get_original(self.context, + self.instance["id"], params) + notifications.send_update(self.context, old_ref, new_ref) + + self.assertEquals(1, len(test_notifier.NOTIFICATIONS)) + + def test_send_on_task_change(self): + + # pretend we just transitioned to task SPAWNING: + params = {"task_state": task_states.SPAWNING} + (old_ref, new_ref) = db.instance_update_and_get_original(self.context, + self.instance["id"], params) + print old_ref["task_state"] + print new_ref["task_state"] + notifications.send_update(self.context, old_ref, new_ref) + + self.assertEquals(1, len(test_notifier.NOTIFICATIONS)) + + def test_no_update_with_states(self): + + notifications.send_update_with_states(self.context, self.instance, + vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING, + task_states.SPAWNING) + self.assertEquals(0, len(test_notifier.NOTIFICATIONS)) + + def test_vm_update_with_states(self): + + notifications.send_update_with_states(self.context, self.instance, + vm_states.BUILDING, vm_states.ACTIVE, task_states.SPAWNING, + task_states.SPAWNING) + self.assertEquals(1, len(test_notifier.NOTIFICATIONS)) + notif = test_notifier.NOTIFICATIONS[0] + payload = notif["payload"] + + self.assertEquals(vm_states.BUILDING, payload["old_state"]) + self.assertEquals(vm_states.ACTIVE, payload["state"]) + self.assertEquals(task_states.SPAWNING, payload["old_task_state"]) + self.assertEquals(task_states.SPAWNING, payload["new_task_state"]) + + def test_task_update_with_states(self): + + notifications.send_update_with_states(self.context, self.instance, + vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING, + None) + self.assertEquals(1, len(test_notifier.NOTIFICATIONS)) + notif = test_notifier.NOTIFICATIONS[0] + payload = notif["payload"] + + self.assertEquals(vm_states.BUILDING, payload["old_state"]) + self.assertEquals(vm_states.BUILDING, payload["state"]) + self.assertEquals(task_states.SPAWNING, payload["old_task_state"]) + self.assertEquals(None, payload["new_task_state"]) diff --git a/nova/virt/baremetal/proxy.py b/nova/virt/baremetal/proxy.py index 90011f28b..ba8296127 100644 --- a/nova/virt/baremetal/proxy.py +++ b/nova/virt/baremetal/proxy.py @@ -42,6 +42,7 @@ from nova import db from nova import exception from nova import flags from nova import log as logging +from nova import notifications from nova.openstack.common import cfg from nova import utils from nova.virt.baremetal import dom @@ -253,14 +254,20 @@ class ProxyConnection(driver.ComputeDriver): try: LOG.debug(_("Key is injected but instance is not running yet"), instance=instance) - db.instance_update(context, instance['id'], - {'vm_state': vm_states.BUILDING}) + (old_ref, new_ref) = db.instance_update_and_get_original( + context, instance['id'], + {'vm_state': vm_states.BUILDING}) + notifications.send_update(context, old_ref, new_ref) + state = self._conn.create_domain(xml_dict, bpath) if state == power_state.RUNNING: LOG.debug(_('instance %s: booted'), instance['name'], instance=instance) - db.instance_update(context, instance['id'], + (old_ref, new_ref) = db.instance_update_and_get_original( + context, instance['id'], {'vm_state': vm_states.ACTIVE}) + notifications.send_update(context, old_ref, new_ref) + LOG.debug(_('~~~~~~ current state = %s ~~~~~~'), state, instance=instance) LOG.debug(_("instance %s spawned successfully"), @@ -271,9 +278,12 @@ class ProxyConnection(driver.ComputeDriver): except Exception as Exn: LOG.debug(_("Bremetal assignment is overcommitted."), instance=instance) - db.instance_update(context, instance['id'], - {'vm_state': vm_states.OVERCOMMIT, - 'power_state': power_state.SUSPENDED}) + (old_ref, new_ref) = db.instance_update_and_get_original( + context, instance['id'], + {'vm_state': vm_states.OVERCOMMIT, + 'power_state': power_state.SUSPENDED}) + notifications.send_update(context, old_ref, new_ref) + timer.stop() timer.f = _wait_for_boot diff --git a/nova/virt/xenapi/host.py b/nova/virt/xenapi/host.py index cceca0da0..d5f568360 100644 --- a/nova/virt/xenapi/host.py +++ b/nova/virt/xenapi/host.py @@ -26,6 +26,7 @@ from nova.compute import vm_states from nova import context from nova import db from nova import exception +from nova import notifications from nova.virt.xenapi import vm_utils LOG = logging.getLogger(__name__) @@ -74,21 +75,34 @@ class Host(object): vm_counter = vm_counter + 1 dest = _host_find(ctxt, self._session, host, host_ref) - db.instance_update(ctxt, instance.id, - {'host': dest, - 'vm_state': vm_states.MIGRATING}) + (old_ref, new_ref) = \ + db.instance_update_and_get_original(ctxt, + instance.id, + {'host': dest, + 'vm_state': vm_states.MIGRATING}) + notifications.send_update(ctxt, old_ref, new_ref) + self._session.call_xenapi('VM.pool_migrate', vm_ref, host_ref, {}) migrations_counter = migrations_counter + 1 - db.instance_update(ctxt, instance.id, - {'vm_state': vm_states.ACTIVE}) + + (old_ref, new_ref) = \ + db.instance_update_and_get_original(ctxt, + instance.id, + {'vm_state': vm_states.ACTIVE}) + notifications.send_update(ctxt, old_ref, new_ref) + break except self.XenAPI.Failure: LOG.exception('Unable to migrate VM %(vm_ref)s' 'from %(host)s' % locals()) - db.instance_update(ctxt, instance.id, - {'host': host, - 'vm_state': vm_states.ACTIVE}) + (old_ref, new_ref) = \ + db.instance_update_and_get_original(ctxt, + instance.id, + {'hosts': host, + 'vm_state': vm_states.ACTIVE}) + notifications.send_update(ctxt, old_ref, new_ref) + if vm_counter == migrations_counter: return 'on_maintenance' else: |